gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
from abc import ABCMeta
from util.reflection import deriving
from util.functions import unique_id
import special
import attributes
# pylint: disable=W0231
class Node(deriving('eq', 'show')):
__metaclass__ = ABCMeta
def __init__(self):
self._attrs = attributes.Attributes()
self._unique_name = None
@property
def unique_name(self):
if self._unique_name is None:
self._unique_name = unique_id(self.__class__.__name__)
return self._unique_name
def walk_down(self, visitor, short_circuit=False):
visitor.visit(self)
if short_circuit and visitor.recurse_on(self):
self.recurse(visitor, Node.walk_down)
else:
self.recurse(visitor, Node.walk_down)
def walk_up(self, visitor, short_circuit=False):
if short_circuit and visitor.recurse_on(self):
self.recurse(visitor, Node.walk_up)
else:
self.recurse(visitor, Node.walk_up)
visitor.visit(self)
def recurse(self, visitor, walk):
pass
def set_soft(self, key, value):
self._attrs.set_soft(key, value)
def set_hard(self, key, value):
self._attrs.set_hard(key, value)
def __contains__(self, key):
return self._attrs.__contains__(key)
def __getitem__(self, key):
return self._attrs.__getitem__(key)
def __setitem__(self, key, value):
self._attrs.__setitem__(key, value)
class Module(Node):
def __init__(self, name=None, exprs=None):
Node.__init__(self)
self.name = name
self.exprs = exprs
def recurse(self, visitor, walk):
for expr in self.exprs:
walk(expr, visitor)
class NoOp(Node):
def __init__(self):
Node.__init__(self)
class _Collection(Node):
def __init__(self, values=None):
Node.__init__(self)
if values is None:
values = []
self.values = values
def recurse(self, visitor, walk):
for value in self.values:
walk(value, visitor)
class Tuple(_Collection):
pass
class List(_Collection):
pass
class _Value(Node):
def __init__(self, value):
Node.__init__(self)
self.value = value
class Int(_Value):
pass
class Real(_Value):
pass
class Sci(_Value):
pass
class Bool(_Value):
def __init__(self, value):
assert value in ('0', '1')
_Value.__init__(self, value)
class ValueId(_Value):
pass
class SymbolId(_Value):
pass
class TypeId(_Value):
pass
class Unit(Node):
def __init__(self):
Node.__init__(self)
class Block(Node):
def __init__(self, exprs):
Node.__init__(self)
self.exprs = exprs
def recurse(self, visitor, walk):
for expr in self.exprs:
walk(expr, visitor)
class BinOp(Node):
def __init__(self, func, args):
Node.__init__(self)
self.func = func
self.args = args
def recurse(self, visitor, walk):
for arg in self.args:
walk(arg, visitor)
class If(Node):
def __init__(self, pred, if_body, else_body=None):
Node.__init__(self)
self.pred = pred
self.if_body = if_body
if else_body is not None:
self.else_body = else_body
else:
self.else_body = Unit()
def recurse(self, visitor, walk):
walk(self.pred, visitor)
walk(self.if_body, visitor)
walk(self.else_body, visitor)
class Else(Node):
def __init__(self, expr, body):
Node.__init__(self)
self.expr = expr
self.body = body
def recurse(self, visitor, walk):
walk(self.expr, visitor)
walk(self.body, visitor)
class Assign(Node):
def __init__(self, name, value):
Node.__init__(self)
self.name = name
self.value = value
def recurse(self, visitor, walk):
walk(self.value, visitor)
class AssignRhs(Node):
def __init__(self, value):
Node.__init__(self)
self.value = value
def recurse(self, visitor, walk):
walk(self.value, visitor)
class While(Node):
def __init__(self, pred, body):
Node.__init__(self)
self.pred = pred
self.body = body
def recurse(self, visitor, walk):
walk(self.pred, visitor)
walk(self.body, visitor)
class _Declaration(Node):
def __init__(self, name, value, type_=None):
Node.__init__(self)
self.name = name
self.value = value
if type_ is None:
self.type_ = InferType()
else:
self.type_ = type_
def recurse(self, visitor, walk):
walk(self.value, visitor)
class Val(_Declaration):
pass
class Var(_Declaration):
pass
class Mut(_Declaration):
pass
class Ref(_Declaration):
pass
class For(Node):
def __init__(self, clauses, body):
Node.__init__(self)
self.clauses = clauses
self.body = body
def recurse(self, visitor, walk):
walk(self.body, visitor)
class ForClause(Node):
def __init__(self, bind, in_):
Node.__init__(self)
self.bind = bind
self.in_ = in_
class KV(Node):
def __init__(self, key, value):
Node.__init__(self)
self.key = key
self.value = value
def recurse(self, visitor, walk):
walk(self.value, visitor)
class _Comment(Node):
def __init__(self, content):
Node.__init__(self)
self.content = content
class TempComment(_Comment):
pass
class DocComment(_Comment):
pass
class BlockComment(_Comment):
pass
class Binding(Node):
def __init__(self, left, right):
Node.__init__(self)
self.left = left
self.right = right
class Call(Node):
def __init__(self, func, arg, block=None):
Node.__init__(self)
self.func = func
self.arg = arg
if block is not None:
self.block = block
else:
self.block = Unit()
def recurse(self, visitor, walk):
walk(self.arg, visitor)
walk(self.block, visitor)
class Param(Node):
def __init__(self, name, type_=None):
Node.__init__(self)
self.name = name
if type_ is None:
self.type_ = InferType()
else:
self.type_ = type_
class Def(Node):
def __init__(self, name, param, body, return_type=None):
Node.__init__(self)
self.name = name
self.param = param
self.body = body
if return_type is not None:
self.return_type = return_type
else:
self.return_type = InferType()
def recurse(self, visitor, walk):
walk(self.param, visitor)
walk(self.body, visitor)
class _Specification(Node):
def __init__(self, name, body, param=None):
Node.__init__(self)
self.name = name
self.body = body
if param is not None:
self.param = param
else:
self.param = Tuple([])
class Proto(_Specification):
pass
class Object(_Specification):
pass
class Trait(_Specification):
pass
##############################################################################
# Types
##############################################################################
class InferType(Node):
pass
class IntType(Node):
pass
class BoolType(Node):
pass
class RealType(Node):
pass
class UnitType(Node):
pass
class AnyType(Node):
pass
class FunctionType(Node):
def __init__(self, param_type, return_type):
self.param_type = param_type
self.return_type = return_type
def recurse(self, visitor, walk):
walk(self.param_type, visitor)
walk(self.return_type, visitor)
|
|
from test import support
import time
import unittest
import locale
import sysconfig
import sys
import warnings
class TimeTestCase(unittest.TestCase):
def setUp(self):
self.t = time.time()
def test_data_attributes(self):
time.altzone
time.daylight
time.timezone
time.tzname
def test_clock(self):
time.clock()
def test_conversions(self):
self.assertEqual(time.ctime(self.t),
time.asctime(time.localtime(self.t)))
self.assertEqual(int(time.mktime(time.localtime(self.t))),
int(self.t))
def test_sleep(self):
time.sleep(1.2)
def test_strftime(self):
tt = time.gmtime(self.t)
for directive in ('a', 'A', 'b', 'B', 'c', 'd', 'H', 'I',
'j', 'm', 'M', 'p', 'S',
'U', 'w', 'W', 'x', 'X', 'y', 'Y', 'Z', '%'):
format = ' %' + directive
try:
time.strftime(format, tt)
except ValueError:
self.fail('conversion specifier: %r failed.' % format)
# Issue #10762: Guard against invalid/non-supported format string
# so that Python don't crash (Windows crashes when the format string
# input to [w]strftime is not kosher.
if sys.platform.startswith('win'):
with self.assertRaises(ValueError):
time.strftime('%f')
def _bounds_checking(self, func=time.strftime):
# Make sure that strftime() checks the bounds of the various parts
#of the time tuple (0 is valid for *all* values).
# The year field is tested by other test cases above
# Check month [1, 12] + zero support
self.assertRaises(ValueError, func,
(1900, -1, 1, 0, 0, 0, 0, 1, -1))
self.assertRaises(ValueError, func,
(1900, 13, 1, 0, 0, 0, 0, 1, -1))
# Check day of month [1, 31] + zero support
self.assertRaises(ValueError, func,
(1900, 1, -1, 0, 0, 0, 0, 1, -1))
self.assertRaises(ValueError, func,
(1900, 1, 32, 0, 0, 0, 0, 1, -1))
# Check hour [0, 23]
self.assertRaises(ValueError, func,
(1900, 1, 1, -1, 0, 0, 0, 1, -1))
self.assertRaises(ValueError, func,
(1900, 1, 1, 24, 0, 0, 0, 1, -1))
# Check minute [0, 59]
self.assertRaises(ValueError, func,
(1900, 1, 1, 0, -1, 0, 0, 1, -1))
self.assertRaises(ValueError, func,
(1900, 1, 1, 0, 60, 0, 0, 1, -1))
# Check second [0, 61]
self.assertRaises(ValueError, func,
(1900, 1, 1, 0, 0, -1, 0, 1, -1))
# C99 only requires allowing for one leap second, but Python's docs say
# allow two leap seconds (0..61)
self.assertRaises(ValueError, func,
(1900, 1, 1, 0, 0, 62, 0, 1, -1))
# No check for upper-bound day of week;
# value forced into range by a ``% 7`` calculation.
# Start check at -2 since gettmarg() increments value before taking
# modulo.
self.assertRaises(ValueError, func,
(1900, 1, 1, 0, 0, 0, -2, 1, -1))
# Check day of the year [1, 366] + zero support
self.assertRaises(ValueError, func,
(1900, 1, 1, 0, 0, 0, 0, -1, -1))
self.assertRaises(ValueError, func,
(1900, 1, 1, 0, 0, 0, 0, 367, -1))
def test_strftime_bounding_check(self):
self._bounds_checking(lambda tup: time.strftime('', tup))
def test_default_values_for_zero(self):
# Make sure that using all zeros uses the proper default values.
# No test for daylight savings since strftime() does not change output
# based on its value.
expected = "2000 01 01 00 00 00 1 001"
with support.check_warnings():
result = time.strftime("%Y %m %d %H %M %S %w %j", (0,)*9)
self.assertEqual(expected, result)
def test_strptime(self):
# Should be able to go round-trip from strftime to strptime without
# throwing an exception.
tt = time.gmtime(self.t)
for directive in ('a', 'A', 'b', 'B', 'c', 'd', 'H', 'I',
'j', 'm', 'M', 'p', 'S',
'U', 'w', 'W', 'x', 'X', 'y', 'Y', 'Z', '%'):
format = '%' + directive
strf_output = time.strftime(format, tt)
try:
time.strptime(strf_output, format)
except ValueError:
self.fail("conversion specifier %r failed with '%s' input." %
(format, strf_output))
def test_strptime_bytes(self):
# Make sure only strings are accepted as arguments to strptime.
self.assertRaises(TypeError, time.strptime, b'2009', "%Y")
self.assertRaises(TypeError, time.strptime, '2009', b'%Y')
def test_asctime(self):
time.asctime(time.gmtime(self.t))
# Max year is only limited by the size of C int.
sizeof_int = sysconfig.get_config_var('SIZEOF_INT') or 4
bigyear = (1 << 8 * sizeof_int - 1) - 1
asc = time.asctime((bigyear, 6, 1) + (0,)*6)
self.assertEqual(asc[-len(str(bigyear)):], str(bigyear))
self.assertRaises(OverflowError, time.asctime, (bigyear + 1,) + (0,)*8)
self.assertRaises(TypeError, time.asctime, 0)
self.assertRaises(TypeError, time.asctime, ())
self.assertRaises(TypeError, time.asctime, (0,) * 10)
def test_asctime_bounding_check(self):
self._bounds_checking(time.asctime)
def test_ctime(self):
t = time.mktime((1973, 9, 16, 1, 3, 52, 0, 0, -1))
self.assertEqual(time.ctime(t), 'Sun Sep 16 01:03:52 1973')
t = time.mktime((2000, 1, 1, 0, 0, 0, 0, 0, -1))
self.assertEqual(time.ctime(t), 'Sat Jan 1 00:00:00 2000')
for year in [-100, 100, 1000, 2000, 10000]:
try:
testval = time.mktime((year, 1, 10) + (0,)*6)
except (ValueError, OverflowError):
# If mktime fails, ctime will fail too. This may happen
# on some platforms.
pass
else:
self.assertEqual(time.ctime(testval)[20:], str(year))
@unittest.skipIf(not hasattr(time, "tzset"),
"time module has no attribute tzset")
def test_tzset(self):
from os import environ
# Epoch time of midnight Dec 25th 2002. Never DST in northern
# hemisphere.
xmas2002 = 1040774400.0
# These formats are correct for 2002, and possibly future years
# This format is the 'standard' as documented at:
# http://www.opengroup.org/onlinepubs/007904975/basedefs/xbd_chap08.html
# They are also documented in the tzset(3) man page on most Unix
# systems.
eastern = 'EST+05EDT,M4.1.0,M10.5.0'
victoria = 'AEST-10AEDT-11,M10.5.0,M3.5.0'
utc='UTC+0'
org_TZ = environ.get('TZ',None)
try:
# Make sure we can switch to UTC time and results are correct
# Note that unknown timezones default to UTC.
# Note that altzone is undefined in UTC, as there is no DST
environ['TZ'] = eastern
time.tzset()
environ['TZ'] = utc
time.tzset()
self.assertEqual(
time.gmtime(xmas2002), time.localtime(xmas2002)
)
self.assertEqual(time.daylight, 0)
self.assertEqual(time.timezone, 0)
self.assertEqual(time.localtime(xmas2002).tm_isdst, 0)
# Make sure we can switch to US/Eastern
environ['TZ'] = eastern
time.tzset()
self.assertNotEqual(time.gmtime(xmas2002), time.localtime(xmas2002))
self.assertEqual(time.tzname, ('EST', 'EDT'))
self.assertEqual(len(time.tzname), 2)
self.assertEqual(time.daylight, 1)
self.assertEqual(time.timezone, 18000)
self.assertEqual(time.altzone, 14400)
self.assertEqual(time.localtime(xmas2002).tm_isdst, 0)
self.assertEqual(len(time.tzname), 2)
# Now go to the southern hemisphere.
environ['TZ'] = victoria
time.tzset()
self.assertNotEqual(time.gmtime(xmas2002), time.localtime(xmas2002))
self.assertTrue(time.tzname[0] == 'AEST', str(time.tzname[0]))
self.assertTrue(time.tzname[1] == 'AEDT', str(time.tzname[1]))
self.assertEqual(len(time.tzname), 2)
self.assertEqual(time.daylight, 1)
self.assertEqual(time.timezone, -36000)
self.assertEqual(time.altzone, -39600)
self.assertEqual(time.localtime(xmas2002).tm_isdst, 1)
finally:
# Repair TZ environment variable in case any other tests
# rely on it.
if org_TZ is not None:
environ['TZ'] = org_TZ
elif 'TZ' in environ:
del environ['TZ']
time.tzset()
def test_insane_timestamps(self):
# It's possible that some platform maps time_t to double,
# and that this test will fail there. This test should
# exempt such platforms (provided they return reasonable
# results!).
for func in time.ctime, time.gmtime, time.localtime:
for unreasonable in -1e200, 1e200:
self.assertRaises(ValueError, func, unreasonable)
def test_ctime_without_arg(self):
# Not sure how to check the values, since the clock could tick
# at any time. Make sure these are at least accepted and
# don't raise errors.
time.ctime()
time.ctime(None)
def test_gmtime_without_arg(self):
gt0 = time.gmtime()
gt1 = time.gmtime(None)
t0 = time.mktime(gt0)
t1 = time.mktime(gt1)
self.assertAlmostEqual(t1, t0, delta=0.2)
def test_localtime_without_arg(self):
lt0 = time.localtime()
lt1 = time.localtime(None)
t0 = time.mktime(lt0)
t1 = time.mktime(lt1)
self.assertAlmostEqual(t1, t0, delta=0.2)
class TestLocale(unittest.TestCase):
def setUp(self):
self.oldloc = locale.setlocale(locale.LC_ALL)
def tearDown(self):
locale.setlocale(locale.LC_ALL, self.oldloc)
def test_bug_3061(self):
try:
tmp = locale.setlocale(locale.LC_ALL, "fr_FR")
except locale.Error:
# skip this test
return
# This should not cause an exception
time.strftime("%B", (2009,2,1,0,0,0,0,0,0))
class _BaseYearTest(unittest.TestCase):
accept2dyear = None
def setUp(self):
self.saved_accept2dyear = time.accept2dyear
time.accept2dyear = self.accept2dyear
def tearDown(self):
time.accept2dyear = self.saved_accept2dyear
def yearstr(self, y):
raise NotImplementedError()
class _TestAsctimeYear:
def yearstr(self, y):
return time.asctime((y,) + (0,) * 8).split()[-1]
def test_large_year(self):
# Check that it doesn't crash for year > 9999
self.assertEqual(self.yearstr(12345), '12345')
self.assertEqual(self.yearstr(123456789), '123456789')
class _TestStrftimeYear:
def yearstr(self, y):
return time.strftime('%Y', (y,) + (0,) * 8).split()[-1]
def test_large_year(self):
# Check that it doesn't crash for year > 9999
try:
text = self.yearstr(12345)
except ValueError:
# strftime() is limited to [1; 9999] with Visual Studio
return
self.assertEqual(text, '12345')
self.assertEqual(self.yearstr(123456789), '123456789')
class _Test2dYear(_BaseYearTest):
accept2dyear = 1
def test_year(self):
with support.check_warnings():
self.assertEqual(self.yearstr(0), '2000')
self.assertEqual(self.yearstr(69), '1969')
self.assertEqual(self.yearstr(68), '2068')
self.assertEqual(self.yearstr(99), '1999')
def test_invalid(self):
self.assertRaises(ValueError, self.yearstr, -1)
self.assertRaises(ValueError, self.yearstr, 100)
self.assertRaises(ValueError, self.yearstr, 999)
class _Test4dYear(_BaseYearTest):
accept2dyear = 0
def test_year(self):
self.assertIn(self.yearstr(1), ('1', '0001'))
self.assertIn(self.yearstr(68), ('68', '0068'))
self.assertIn(self.yearstr(69), ('69', '0069'))
self.assertIn(self.yearstr(99), ('99', '0099'))
self.assertIn(self.yearstr(999), ('999', '0999'))
self.assertEqual(self.yearstr(9999), '9999')
def test_negative(self):
try:
text = self.yearstr(-1)
except ValueError:
# strftime() is limited to [1; 9999] with Visual Studio
return
self.assertIn(text, ('-1', '-001'))
self.assertEqual(self.yearstr(-1234), '-1234')
self.assertEqual(self.yearstr(-123456), '-123456')
def test_mktime(self):
# Issue #1726687
for t in (-2, -1, 0, 1):
try:
tt = time.localtime(t)
except (OverflowError, ValueError):
pass
else:
self.assertEqual(time.mktime(tt), t)
# It may not be possible to reliably make mktime return error
# on all platfom. This will make sure that no other exception
# than OverflowError is raised for an extreme value.
try:
time.mktime((-1, 1, 1, 0, 0, 0, -1, -1, -1))
except OverflowError:
pass
class TestAsctimeAccept2dYear(_TestAsctimeYear, _Test2dYear):
pass
class TestStrftimeAccept2dYear(_TestStrftimeYear, _Test2dYear):
pass
class TestAsctime4dyear(_TestAsctimeYear, _Test4dYear):
pass
class TestStrftime4dyear(_TestStrftimeYear, _Test4dYear):
pass
class Test2dyearBool(_TestAsctimeYear, _Test2dYear):
accept2dyear = True
class Test4dyearBool(_TestAsctimeYear, _Test4dYear):
accept2dyear = False
class TestAccept2YearBad(_TestAsctimeYear, _BaseYearTest):
class X:
def __bool__(self):
raise RuntimeError('boo')
accept2dyear = X()
def test_2dyear(self):
pass
def test_invalid(self):
self.assertRaises(RuntimeError, self.yearstr, 200)
def test_main():
support.run_unittest(
TimeTestCase,
TestLocale,
TestAsctimeAccept2dYear,
TestStrftimeAccept2dYear,
TestAsctime4dyear,
TestStrftime4dyear,
Test2dyearBool,
Test4dyearBool,
TestAccept2YearBad)
if __name__ == "__main__":
test_main()
|
|
from __future__ import absolute_import
import re
import json
import unittest
from io import BytesIO
from six.moves import cPickle as pickle
import lxml.etree
from scrapy.item import Item, Field
from scrapy.utils.python import to_unicode
from scrapy.exporters import (
BaseItemExporter, PprintItemExporter, PickleItemExporter, CsvItemExporter,
XmlItemExporter, JsonLinesItemExporter, JsonItemExporter, PythonItemExporter
)
class TestItem(Item):
name = Field()
age = Field()
class BaseItemExporterTest(unittest.TestCase):
def setUp(self):
self.i = TestItem(name=u'John\xa3', age='22')
self.output = BytesIO()
self.ie = self._get_exporter()
def _get_exporter(self, **kwargs):
return BaseItemExporter(**kwargs)
def _check_output(self):
pass
def _assert_expected_item(self, exported_dict):
for k, v in exported_dict.items():
exported_dict[k] = to_unicode(v)
self.assertEqual(self.i, exported_dict)
def assertItemExportWorks(self, item):
self.ie.start_exporting()
try:
self.ie.export_item(item)
except NotImplementedError:
if self.ie.__class__ is not BaseItemExporter:
raise
self.ie.finish_exporting()
self._check_output()
def test_export_item(self):
self.assertItemExportWorks(self.i)
def test_export_dict_item(self):
self.assertItemExportWorks(dict(self.i))
def test_serialize_field(self):
res = self.ie.serialize_field(self.i.fields['name'], 'name', self.i['name'])
self.assertEqual(res, 'John\xc2\xa3')
res = self.ie.serialize_field(self.i.fields['age'], 'age', self.i['age'])
self.assertEqual(res, '22')
def test_fields_to_export(self):
ie = self._get_exporter(fields_to_export=['name'])
self.assertEqual(list(ie._get_serialized_fields(self.i)), [('name', 'John\xc2\xa3')])
ie = self._get_exporter(fields_to_export=['name'], encoding='latin-1')
name = list(ie._get_serialized_fields(self.i))[0][1]
assert isinstance(name, str)
self.assertEqual(name, 'John\xa3')
def test_field_custom_serializer(self):
def custom_serializer(value):
return str(int(value) + 2)
class CustomFieldItem(Item):
name = Field()
age = Field(serializer=custom_serializer)
i = CustomFieldItem(name=u'John\xa3', age='22')
ie = self._get_exporter()
self.assertEqual(ie.serialize_field(i.fields['name'], 'name', i['name']), 'John\xc2\xa3')
self.assertEqual(ie.serialize_field(i.fields['age'], 'age', i['age']), '24')
class PythonItemExporterTest(BaseItemExporterTest):
def _get_exporter(self, **kwargs):
return PythonItemExporter(**kwargs)
def test_nested_item(self):
i1 = TestItem(name=u'Joseph', age='22')
i2 = dict(name=u'Maria', age=i1)
i3 = TestItem(name=u'Jesus', age=i2)
ie = self._get_exporter()
exported = ie.export_item(i3)
self.assertEqual(type(exported), dict)
self.assertEqual(exported, {'age': {'age': {'age': '22', 'name': u'Joseph'}, 'name': u'Maria'}, 'name': 'Jesus'})
self.assertEqual(type(exported['age']), dict)
self.assertEqual(type(exported['age']['age']), dict)
def test_export_list(self):
i1 = TestItem(name=u'Joseph', age='22')
i2 = TestItem(name=u'Maria', age=[i1])
i3 = TestItem(name=u'Jesus', age=[i2])
ie = self._get_exporter()
exported = ie.export_item(i3)
self.assertEqual(exported, {'age': [{'age': [{'age': '22', 'name': u'Joseph'}], 'name': u'Maria'}], 'name': 'Jesus'})
self.assertEqual(type(exported['age'][0]), dict)
self.assertEqual(type(exported['age'][0]['age'][0]), dict)
def test_export_item_dict_list(self):
i1 = TestItem(name=u'Joseph', age='22')
i2 = dict(name=u'Maria', age=[i1])
i3 = TestItem(name=u'Jesus', age=[i2])
ie = self._get_exporter()
exported = ie.export_item(i3)
self.assertEqual(exported, {'age': [{'age': [{'age': '22', 'name': u'Joseph'}], 'name': u'Maria'}], 'name': 'Jesus'})
self.assertEqual(type(exported['age'][0]), dict)
self.assertEqual(type(exported['age'][0]['age'][0]), dict)
class PprintItemExporterTest(BaseItemExporterTest):
def _get_exporter(self, **kwargs):
return PprintItemExporter(self.output, **kwargs)
def _check_output(self):
self._assert_expected_item(eval(self.output.getvalue()))
class PickleItemExporterTest(BaseItemExporterTest):
def _get_exporter(self, **kwargs):
return PickleItemExporter(self.output, **kwargs)
def _check_output(self):
self._assert_expected_item(pickle.loads(self.output.getvalue()))
def test_export_multiple_items(self):
i1 = TestItem(name='hello', age='world')
i2 = TestItem(name='bye', age='world')
f = BytesIO()
ie = PickleItemExporter(f)
ie.start_exporting()
ie.export_item(i1)
ie.export_item(i2)
ie.finish_exporting()
f.seek(0)
self.assertEqual(pickle.load(f), i1)
self.assertEqual(pickle.load(f), i2)
class CsvItemExporterTest(BaseItemExporterTest):
def _get_exporter(self, **kwargs):
return CsvItemExporter(self.output, **kwargs)
def assertCsvEqual(self, first, second, msg=None):
csvsplit = lambda csv: [sorted(re.split(r'(,|\s+)', line))
for line in csv.splitlines(True)]
return self.assertEqual(csvsplit(first), csvsplit(second), msg)
def _check_output(self):
self.assertCsvEqual(self.output.getvalue(), 'age,name\r\n22,John\xc2\xa3\r\n')
def assertExportResult(self, item, expected, **kwargs):
fp = BytesIO()
ie = CsvItemExporter(fp, **kwargs)
ie.start_exporting()
ie.export_item(item)
ie.finish_exporting()
self.assertCsvEqual(fp.getvalue(), expected)
def test_header_export_all(self):
self.assertExportResult(
item=self.i,
fields_to_export=self.i.fields.keys(),
expected='age,name\r\n22,John\xc2\xa3\r\n',
)
def test_header_export_all_dict(self):
self.assertExportResult(
item=dict(self.i),
expected='age,name\r\n22,John\xc2\xa3\r\n',
)
def test_header_export_single_field(self):
for item in [self.i, dict(self.i)]:
self.assertExportResult(
item=item,
fields_to_export=['age'],
expected='age\r\n22\r\n',
)
def test_header_export_two_items(self):
for item in [self.i, dict(self.i)]:
output = BytesIO()
ie = CsvItemExporter(output)
ie.start_exporting()
ie.export_item(item)
ie.export_item(item)
ie.finish_exporting()
self.assertCsvEqual(output.getvalue(), 'age,name\r\n22,John\xc2\xa3\r\n22,John\xc2\xa3\r\n')
def test_header_no_header_line(self):
for item in [self.i, dict(self.i)]:
self.assertExportResult(
item=item,
include_headers_line=False,
expected='22,John\xc2\xa3\r\n',
)
def test_join_multivalue(self):
class TestItem2(Item):
name = Field()
friends = Field()
for cls in TestItem2, dict:
self.assertExportResult(
item=cls(name='John', friends=['Mary', 'Paul']),
include_headers_line=False,
expected='"Mary,Paul",John\r\n',
)
class XmlItemExporterTest(BaseItemExporterTest):
def _get_exporter(self, **kwargs):
return XmlItemExporter(self.output, **kwargs)
def assertXmlEquivalent(self, first, second, msg=None):
def xmltuple(elem):
children = list(elem.iterchildren())
if children:
return [(child.tag, sorted(xmltuple(child)))
for child in children]
else:
return [(elem.tag, [(elem.text, ())])]
def xmlsplit(xmlcontent):
doc = lxml.etree.fromstring(xmlcontent)
return xmltuple(doc)
return self.assertEqual(xmlsplit(first), xmlsplit(second), msg)
def assertExportResult(self, item, expected_value):
fp = BytesIO()
ie = XmlItemExporter(fp)
ie.start_exporting()
ie.export_item(item)
ie.finish_exporting()
self.assertXmlEquivalent(fp.getvalue(), expected_value)
def _check_output(self):
expected_value = '<?xml version="1.0" encoding="utf-8"?>\n<items><item><age>22</age><name>John\xc2\xa3</name></item></items>'
self.assertXmlEquivalent(self.output.getvalue(), expected_value)
def test_multivalued_fields(self):
self.assertExportResult(
TestItem(name=[u'John\xa3', u'Doe']),
'<?xml version="1.0" encoding="utf-8"?>\n<items><item><name><value>John\xc2\xa3</value><value>Doe</value></name></item></items>'
)
def test_nested_item(self):
i1 = TestItem(name=u'foo\xa3hoo', age='22')
i2 = dict(name=u'bar', age=i1)
i3 = TestItem(name=u'buz', age=i2)
self.assertExportResult(i3,
'<?xml version="1.0" encoding="utf-8"?>\n'
'<items>'
'<item>'
'<age>'
'<age>'
'<age>22</age>'
'<name>foo\xc2\xa3hoo</name>'
'</age>'
'<name>bar</name>'
'</age>'
'<name>buz</name>'
'</item>'
'</items>'
)
def test_nested_list_item(self):
i1 = TestItem(name=u'foo')
i2 = dict(name=u'bar', v2={"egg": ["spam"]})
i3 = TestItem(name=u'buz', age=[i1, i2])
self.assertExportResult(i3,
'<?xml version="1.0" encoding="utf-8"?>\n'
'<items>'
'<item>'
'<age>'
'<value><name>foo</name></value>'
'<value><name>bar</name><v2><egg><value>spam</value></egg></v2></value>'
'</age>'
'<name>buz</name>'
'</item>'
'</items>'
)
class JsonLinesItemExporterTest(BaseItemExporterTest):
_expected_nested = {'name': u'Jesus', 'age': {'name': 'Maria', 'age': {'name': 'Joseph', 'age': '22'}}}
def _get_exporter(self, **kwargs):
return JsonLinesItemExporter(self.output, **kwargs)
def _check_output(self):
exported = json.loads(self.output.getvalue().strip())
self.assertEqual(exported, dict(self.i))
def test_nested_item(self):
i1 = TestItem(name=u'Joseph', age='22')
i2 = dict(name=u'Maria', age=i1)
i3 = TestItem(name=u'Jesus', age=i2)
self.ie.start_exporting()
self.ie.export_item(i3)
self.ie.finish_exporting()
exported = json.loads(self.output.getvalue())
self.assertEqual(exported, self._expected_nested)
def test_extra_keywords(self):
self.ie = self._get_exporter(sort_keys=True)
self.test_export_item()
self._check_output()
self.assertRaises(TypeError, self._get_exporter, foo_unknown_keyword_bar=True)
class JsonItemExporterTest(JsonLinesItemExporterTest):
_expected_nested = [JsonLinesItemExporterTest._expected_nested]
def _get_exporter(self, **kwargs):
return JsonItemExporter(self.output, **kwargs)
def _check_output(self):
exported = json.loads(self.output.getvalue().strip())
self.assertEqual(exported, [dict(self.i)])
def assertTwoItemsExported(self, item):
self.ie.start_exporting()
self.ie.export_item(item)
self.ie.export_item(item)
self.ie.finish_exporting()
exported = json.loads(self.output.getvalue())
self.assertEqual(exported, [dict(item), dict(item)])
def test_two_items(self):
self.assertTwoItemsExported(self.i)
def test_two_dict_items(self):
self.assertTwoItemsExported(dict(self.i))
def test_nested_item(self):
i1 = TestItem(name=u'Joseph\xa3', age='22')
i2 = TestItem(name=u'Maria', age=i1)
i3 = TestItem(name=u'Jesus', age=i2)
self.ie.start_exporting()
self.ie.export_item(i3)
self.ie.finish_exporting()
exported = json.loads(self.output.getvalue())
expected = {'name': u'Jesus', 'age': {'name': 'Maria', 'age': dict(i1)}}
self.assertEqual(exported, [expected])
def test_nested_dict_item(self):
i1 = dict(name=u'Joseph\xa3', age='22')
i2 = TestItem(name=u'Maria', age=i1)
i3 = dict(name=u'Jesus', age=i2)
self.ie.start_exporting()
self.ie.export_item(i3)
self.ie.finish_exporting()
exported = json.loads(self.output.getvalue())
expected = {'name': u'Jesus', 'age': {'name': 'Maria', 'age': i1}}
self.assertEqual(exported, [expected])
class CustomItemExporterTest(unittest.TestCase):
def test_exporter_custom_serializer(self):
class CustomItemExporter(BaseItemExporter):
def serialize_field(self, field, name, value):
if name == 'age':
return str(int(value) + 1)
else:
return super(CustomItemExporter, self).serialize_field(field, name, value)
i = TestItem(name=u'John', age='22')
ie = CustomItemExporter()
self.assertEqual(ie.serialize_field(i.fields['name'], 'name', i['name']), 'John')
self.assertEqual(ie.serialize_field(i.fields['age'], 'age', i['age']), '23')
i2 = {'name': u'John', 'age': '22'}
self.assertEqual(ie.serialize_field({}, 'name', i2['name']), 'John')
self.assertEqual(ie.serialize_field({}, 'age', i2['age']), '23')
if __name__ == '__main__':
unittest.main()
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import os
import shutil
import signal
import sys
import threading
import warnings
from threading import RLock
from tempfile import NamedTemporaryFile
from py4j.protocol import Py4JError
from pyspark import accumulators
from pyspark.accumulators import Accumulator
from pyspark.broadcast import Broadcast, BroadcastPickleRegistry
from pyspark.conf import SparkConf
from pyspark.files import SparkFiles
from pyspark.java_gateway import launch_gateway
from pyspark.serializers import PickleSerializer, BatchedSerializer, UTF8Deserializer, \
PairDeserializer, AutoBatchedSerializer, NoOpSerializer
from pyspark.storagelevel import StorageLevel
from pyspark.rdd import RDD, _load_from_socket, ignore_unicode_prefix
from pyspark.traceback_utils import CallSite, first_spark_call
from pyspark.status import StatusTracker
from pyspark.profiler import ProfilerCollector, BasicProfiler
if sys.version > '3':
xrange = range
__all__ = ['SparkContext']
# These are special default configs for PySpark, they will overwrite
# the default ones for Spark if they are not configured by user.
DEFAULT_CONFIGS = {
"spark.serializer.objectStreamReset": 100,
"spark.rdd.compress": True,
}
class SparkContext(object):
"""
Main entry point for Spark functionality. A SparkContext represents the
connection to a Spark cluster, and can be used to create L{RDD} and
broadcast variables on that cluster.
"""
_gateway = None
_jvm = None
_next_accum_id = 0
_active_spark_context = None
_lock = RLock()
_python_includes = None # zip and egg files that need to be added to PYTHONPATH
PACKAGE_EXTENSIONS = ('.zip', '.egg', '.jar')
def __init__(self, master=None, appName=None, sparkHome=None, pyFiles=None,
environment=None, batchSize=0, serializer=PickleSerializer(), conf=None,
gateway=None, jsc=None, profiler_cls=BasicProfiler):
"""
Create a new SparkContext. At least the master and app name should be set,
either through the named parameters here or through C{conf}.
:param master: Cluster URL to connect to
(e.g. mesos://host:port, spark://host:port, local[4]).
:param appName: A name for your job, to display on the cluster web UI.
:param sparkHome: Location where Spark is installed on cluster nodes.
:param pyFiles: Collection of .zip or .py files to send to the cluster
and add to PYTHONPATH. These can be paths on the local file
system or HDFS, HTTP, HTTPS, or FTP URLs.
:param environment: A dictionary of environment variables to set on
worker nodes.
:param batchSize: The number of Python objects represented as a single
Java object. Set 1 to disable batching, 0 to automatically choose
the batch size based on object sizes, or -1 to use an unlimited
batch size
:param serializer: The serializer for RDDs.
:param conf: A L{SparkConf} object setting Spark properties.
:param gateway: Use an existing gateway and JVM, otherwise a new JVM
will be instantiated.
:param jsc: The JavaSparkContext instance (optional).
:param profiler_cls: A class of custom Profiler used to do profiling
(default is pyspark.profiler.BasicProfiler).
>>> from pyspark.context import SparkContext
>>> sc = SparkContext('local', 'test')
>>> sc2 = SparkContext('local', 'test2') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
"""
self._callsite = first_spark_call() or CallSite(None, None, None)
SparkContext._ensure_initialized(self, gateway=gateway, conf=conf)
try:
self._do_init(master, appName, sparkHome, pyFiles, environment, batchSize, serializer,
conf, jsc, profiler_cls)
except:
# If an error occurs, clean up in order to allow future SparkContext creation:
self.stop()
raise
def _do_init(self, master, appName, sparkHome, pyFiles, environment, batchSize, serializer,
conf, jsc, profiler_cls):
self.environment = environment or {}
# java gateway must have been launched at this point.
if conf is not None and conf._jconf is not None:
# conf has been initialized in JVM properly, so use conf directly. This represent the
# scenario that JVM has been launched before SparkConf is created (e.g. SparkContext is
# created and then stopped, and we create a new SparkConf and new SparkContext again)
self._conf = conf
else:
self._conf = SparkConf(_jvm=SparkContext._jvm)
if conf is not None:
for k, v in conf.getAll():
self._conf.set(k, v)
self._batchSize = batchSize # -1 represents an unlimited batch size
self._unbatched_serializer = serializer
if batchSize == 0:
self.serializer = AutoBatchedSerializer(self._unbatched_serializer)
else:
self.serializer = BatchedSerializer(self._unbatched_serializer,
batchSize)
# Set any parameters passed directly to us on the conf
if master:
self._conf.setMaster(master)
if appName:
self._conf.setAppName(appName)
if sparkHome:
self._conf.setSparkHome(sparkHome)
if environment:
for key, value in environment.items():
self._conf.setExecutorEnv(key, value)
for key, value in DEFAULT_CONFIGS.items():
self._conf.setIfMissing(key, value)
# Check that we have at least the required parameters
if not self._conf.contains("spark.master"):
raise Exception("A master URL must be set in your configuration")
if not self._conf.contains("spark.app.name"):
raise Exception("An application name must be set in your configuration")
# Read back our properties from the conf in case we loaded some of them from
# the classpath or an external config file
self.master = self._conf.get("spark.master")
self.appName = self._conf.get("spark.app.name")
self.sparkHome = self._conf.get("spark.home", None)
for (k, v) in self._conf.getAll():
if k.startswith("spark.executorEnv."):
varName = k[len("spark.executorEnv."):]
self.environment[varName] = v
self.environment["PYTHONHASHSEED"] = os.environ.get("PYTHONHASHSEED", "0")
# Create the Java SparkContext through Py4J
self._jsc = jsc or self._initialize_context(self._conf._jconf)
# Reset the SparkConf to the one actually used by the SparkContext in JVM.
self._conf = SparkConf(_jconf=self._jsc.sc().conf())
# Create a single Accumulator in Java that we'll send all our updates through;
# they will be passed back to us through a TCP server
self._accumulatorServer = accumulators._start_update_server()
(host, port) = self._accumulatorServer.server_address
self._javaAccumulator = self._jvm.PythonAccumulatorV2(host, port)
self._jsc.sc().register(self._javaAccumulator)
self.pythonExec = os.environ.get("PYSPARK_PYTHON", 'python')
self.pythonVer = "%d.%d" % sys.version_info[:2]
# Broadcast's __reduce__ method stores Broadcast instances here.
# This allows other code to determine which Broadcast instances have
# been pickled, so it can determine which Java broadcast objects to
# send.
self._pickled_broadcast_vars = BroadcastPickleRegistry()
SparkFiles._sc = self
root_dir = SparkFiles.getRootDirectory()
sys.path.insert(1, root_dir)
# Deploy any code dependencies specified in the constructor
self._python_includes = list()
for path in (pyFiles or []):
self.addPyFile(path)
# Deploy code dependencies set by spark-submit; these will already have been added
# with SparkContext.addFile, so we just need to add them to the PYTHONPATH
for path in self._conf.get("spark.submit.pyFiles", "").split(","):
if path != "":
(dirname, filename) = os.path.split(path)
try:
filepath = os.path.join(SparkFiles.getRootDirectory(), filename)
if not os.path.exists(filepath):
# In case of YARN with shell mode, 'spark.submit.pyFiles' files are
# not added via SparkContext.addFile. Here we check if the file exists,
# try to copy and then add it to the path. See SPARK-21945.
shutil.copyfile(path, filepath)
if filename[-4:].lower() in self.PACKAGE_EXTENSIONS:
self._python_includes.append(filename)
sys.path.insert(1, filepath)
except Exception:
warnings.warn(
"Failed to add file [%s] speficied in 'spark.submit.pyFiles' to "
"Python path:\n %s" % (path, "\n ".join(sys.path)),
RuntimeWarning)
# Create a temporary directory inside spark.local.dir:
local_dir = self._jvm.org.apache.spark.util.Utils.getLocalDir(self._jsc.sc().conf())
self._temp_dir = \
self._jvm.org.apache.spark.util.Utils.createTempDir(local_dir, "pyspark") \
.getAbsolutePath()
# profiling stats collected for each PythonRDD
if self._conf.get("spark.python.profile", "false") == "true":
dump_path = self._conf.get("spark.python.profile.dump", None)
self.profiler_collector = ProfilerCollector(profiler_cls, dump_path)
else:
self.profiler_collector = None
# create a signal handler which would be invoked on receiving SIGINT
def signal_handler(signal, frame):
self.cancelAllJobs()
raise KeyboardInterrupt()
# see http://stackoverflow.com/questions/23206787/
if isinstance(threading.current_thread(), threading._MainThread):
signal.signal(signal.SIGINT, signal_handler)
def __repr__(self):
return "<SparkContext master={master} appName={appName}>".format(
master=self.master,
appName=self.appName,
)
def _repr_html_(self):
return """
<div>
<p><b>SparkContext</b></p>
<p><a href="{sc.uiWebUrl}">Spark UI</a></p>
<dl>
<dt>Version</dt>
<dd><code>v{sc.version}</code></dd>
<dt>Master</dt>
<dd><code>{sc.master}</code></dd>
<dt>AppName</dt>
<dd><code>{sc.appName}</code></dd>
</dl>
</div>
""".format(
sc=self
)
def _initialize_context(self, jconf):
"""
Initialize SparkContext in function to allow subclass specific initialization
"""
return self._jvm.JavaSparkContext(jconf)
@classmethod
def _ensure_initialized(cls, instance=None, gateway=None, conf=None):
"""
Checks whether a SparkContext is initialized or not.
Throws error if a SparkContext is already running.
"""
with SparkContext._lock:
if not SparkContext._gateway:
SparkContext._gateway = gateway or launch_gateway(conf)
SparkContext._jvm = SparkContext._gateway.jvm
if instance:
if (SparkContext._active_spark_context and
SparkContext._active_spark_context != instance):
currentMaster = SparkContext._active_spark_context.master
currentAppName = SparkContext._active_spark_context.appName
callsite = SparkContext._active_spark_context._callsite
# Raise error if there is already a running Spark context
raise ValueError(
"Cannot run multiple SparkContexts at once; "
"existing SparkContext(app=%s, master=%s)"
" created by %s at %s:%s "
% (currentAppName, currentMaster,
callsite.function, callsite.file, callsite.linenum))
else:
SparkContext._active_spark_context = instance
def __getnewargs__(self):
# This method is called when attempting to pickle SparkContext, which is always an error:
raise Exception(
"It appears that you are attempting to reference SparkContext from a broadcast "
"variable, action, or transformation. SparkContext can only be used on the driver, "
"not in code that it run on workers. For more information, see SPARK-5063."
)
def __enter__(self):
"""
Enable 'with SparkContext(...) as sc: app(sc)' syntax.
"""
return self
def __exit__(self, type, value, trace):
"""
Enable 'with SparkContext(...) as sc: app' syntax.
Specifically stop the context on exit of the with block.
"""
self.stop()
@classmethod
def getOrCreate(cls, conf=None):
"""
Get or instantiate a SparkContext and register it as a singleton object.
:param conf: SparkConf (optional)
"""
with SparkContext._lock:
if SparkContext._active_spark_context is None:
SparkContext(conf=conf or SparkConf())
return SparkContext._active_spark_context
def setLogLevel(self, logLevel):
"""
Control our logLevel. This overrides any user-defined log settings.
Valid log levels include: ALL, DEBUG, ERROR, FATAL, INFO, OFF, TRACE, WARN
"""
self._jsc.setLogLevel(logLevel)
@classmethod
def setSystemProperty(cls, key, value):
"""
Set a Java system property, such as spark.executor.memory. This must
must be invoked before instantiating SparkContext.
"""
SparkContext._ensure_initialized()
SparkContext._jvm.java.lang.System.setProperty(key, value)
@property
def version(self):
"""
The version of Spark on which this application is running.
"""
return self._jsc.version()
@property
@ignore_unicode_prefix
def applicationId(self):
"""
A unique identifier for the Spark application.
Its format depends on the scheduler implementation.
* in case of local spark app something like 'local-1433865536131'
* in case of YARN something like 'application_1433865536131_34483'
>>> sc.applicationId # doctest: +ELLIPSIS
u'local-...'
"""
return self._jsc.sc().applicationId()
@property
def uiWebUrl(self):
"""Return the URL of the SparkUI instance started by this SparkContext"""
return self._jsc.sc().uiWebUrl().get()
@property
def startTime(self):
"""Return the epoch time when the Spark Context was started."""
return self._jsc.startTime()
@property
def defaultParallelism(self):
"""
Default level of parallelism to use when not given by user (e.g. for
reduce tasks)
"""
return self._jsc.sc().defaultParallelism()
@property
def defaultMinPartitions(self):
"""
Default min number of partitions for Hadoop RDDs when not given by user
"""
return self._jsc.sc().defaultMinPartitions()
def stop(self):
"""
Shut down the SparkContext.
"""
if getattr(self, "_jsc", None):
try:
self._jsc.stop()
except Py4JError:
# Case: SPARK-18523
warnings.warn(
'Unable to cleanly shutdown Spark JVM process.'
' It is possible that the process has crashed,'
' been killed or may also be in a zombie state.',
RuntimeWarning
)
pass
finally:
self._jsc = None
if getattr(self, "_accumulatorServer", None):
self._accumulatorServer.shutdown()
self._accumulatorServer = None
with SparkContext._lock:
SparkContext._active_spark_context = None
def emptyRDD(self):
"""
Create an RDD that has no partitions or elements.
"""
return RDD(self._jsc.emptyRDD(), self, NoOpSerializer())
def range(self, start, end=None, step=1, numSlices=None):
"""
Create a new RDD of int containing elements from `start` to `end`
(exclusive), increased by `step` every element. Can be called the same
way as python's built-in range() function. If called with a single argument,
the argument is interpreted as `end`, and `start` is set to 0.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numSlices: the number of partitions of the new RDD
:return: An RDD of int
>>> sc.range(5).collect()
[0, 1, 2, 3, 4]
>>> sc.range(2, 4).collect()
[2, 3]
>>> sc.range(1, 7, 2).collect()
[1, 3, 5]
"""
if end is None:
end = start
start = 0
return self.parallelize(xrange(start, end, step), numSlices)
def parallelize(self, c, numSlices=None):
"""
Distribute a local Python collection to form an RDD. Using xrange
is recommended if the input represents a range for performance.
>>> sc.parallelize([0, 2, 3, 4, 6], 5).glom().collect()
[[0], [2], [3], [4], [6]]
>>> sc.parallelize(xrange(0, 6, 2), 5).glom().collect()
[[], [0], [], [2], [4]]
"""
numSlices = int(numSlices) if numSlices is not None else self.defaultParallelism
if isinstance(c, xrange):
size = len(c)
if size == 0:
return self.parallelize([], numSlices)
step = c[1] - c[0] if size > 1 else 1
start0 = c[0]
def getStart(split):
return start0 + int((split * size / numSlices)) * step
def f(split, iterator):
return xrange(getStart(split), getStart(split + 1), step)
return self.parallelize([], numSlices).mapPartitionsWithIndex(f)
# Make sure we distribute data evenly if it's smaller than self.batchSize
if "__len__" not in dir(c):
c = list(c) # Make it a list so we can compute its length
batchSize = max(1, min(len(c) // numSlices, self._batchSize or 1024))
serializer = BatchedSerializer(self._unbatched_serializer, batchSize)
jrdd = self._serialize_to_jvm(c, numSlices, serializer)
return RDD(jrdd, self, serializer)
def _serialize_to_jvm(self, data, parallelism, serializer):
"""
Calling the Java parallelize() method with an ArrayList is too slow,
because it sends O(n) Py4J commands. As an alternative, serialized
objects are written to a file and loaded through textFile().
"""
tempFile = NamedTemporaryFile(delete=False, dir=self._temp_dir)
try:
serializer.dump_stream(data, tempFile)
tempFile.close()
readRDDFromFile = self._jvm.PythonRDD.readRDDFromFile
return readRDDFromFile(self._jsc, tempFile.name, parallelism)
finally:
# readRDDFromFile eagerily reads the file so we can delete right after.
os.unlink(tempFile.name)
def pickleFile(self, name, minPartitions=None):
"""
Load an RDD previously saved using L{RDD.saveAsPickleFile} method.
>>> tmpFile = NamedTemporaryFile(delete=True)
>>> tmpFile.close()
>>> sc.parallelize(range(10)).saveAsPickleFile(tmpFile.name, 5)
>>> sorted(sc.pickleFile(tmpFile.name, 3).collect())
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
minPartitions = minPartitions or self.defaultMinPartitions
return RDD(self._jsc.objectFile(name, minPartitions), self)
@ignore_unicode_prefix
def textFile(self, name, minPartitions=None, use_unicode=True):
"""
Read a text file from HDFS, a local file system (available on all
nodes), or any Hadoop-supported file system URI, and return it as an
RDD of Strings.
If use_unicode is False, the strings will be kept as `str` (encoding
as `utf-8`), which is faster and smaller than unicode. (Added in
Spark 1.2)
>>> path = os.path.join(tempdir, "sample-text.txt")
>>> with open(path, "w") as testFile:
... _ = testFile.write("Hello world!")
>>> textFile = sc.textFile(path)
>>> textFile.collect()
[u'Hello world!']
"""
minPartitions = minPartitions or min(self.defaultParallelism, 2)
return RDD(self._jsc.textFile(name, minPartitions), self,
UTF8Deserializer(use_unicode))
@ignore_unicode_prefix
def wholeTextFiles(self, path, minPartitions=None, use_unicode=True):
"""
Read a directory of text files from HDFS, a local file system
(available on all nodes), or any Hadoop-supported file system
URI. Each file is read as a single record and returned in a
key-value pair, where the key is the path of each file, the
value is the content of each file.
If use_unicode is False, the strings will be kept as `str` (encoding
as `utf-8`), which is faster and smaller than unicode. (Added in
Spark 1.2)
For example, if you have the following files::
hdfs://a-hdfs-path/part-00000
hdfs://a-hdfs-path/part-00001
...
hdfs://a-hdfs-path/part-nnnnn
Do C{rdd = sparkContext.wholeTextFiles("hdfs://a-hdfs-path")},
then C{rdd} contains::
(a-hdfs-path/part-00000, its content)
(a-hdfs-path/part-00001, its content)
...
(a-hdfs-path/part-nnnnn, its content)
.. note:: Small files are preferred, as each file will be loaded
fully in memory.
>>> dirPath = os.path.join(tempdir, "files")
>>> os.mkdir(dirPath)
>>> with open(os.path.join(dirPath, "1.txt"), "w") as file1:
... _ = file1.write("1")
>>> with open(os.path.join(dirPath, "2.txt"), "w") as file2:
... _ = file2.write("2")
>>> textFiles = sc.wholeTextFiles(dirPath)
>>> sorted(textFiles.collect())
[(u'.../1.txt', u'1'), (u'.../2.txt', u'2')]
"""
minPartitions = minPartitions or self.defaultMinPartitions
return RDD(self._jsc.wholeTextFiles(path, minPartitions), self,
PairDeserializer(UTF8Deserializer(use_unicode), UTF8Deserializer(use_unicode)))
def binaryFiles(self, path, minPartitions=None):
"""
.. note:: Experimental
Read a directory of binary files from HDFS, a local file system
(available on all nodes), or any Hadoop-supported file system URI
as a byte array. Each file is read as a single record and returned
in a key-value pair, where the key is the path of each file, the
value is the content of each file.
.. note:: Small files are preferred, large file is also allowable, but
may cause bad performance.
"""
minPartitions = minPartitions or self.defaultMinPartitions
return RDD(self._jsc.binaryFiles(path, minPartitions), self,
PairDeserializer(UTF8Deserializer(), NoOpSerializer()))
def binaryRecords(self, path, recordLength):
"""
.. note:: Experimental
Load data from a flat binary file, assuming each record is a set of numbers
with the specified numerical format (see ByteBuffer), and the number of
bytes per record is constant.
:param path: Directory to the input data files
:param recordLength: The length at which to split the records
"""
return RDD(self._jsc.binaryRecords(path, recordLength), self, NoOpSerializer())
def _dictToJavaMap(self, d):
jm = self._jvm.java.util.HashMap()
if not d:
d = {}
for k, v in d.items():
jm[k] = v
return jm
def sequenceFile(self, path, keyClass=None, valueClass=None, keyConverter=None,
valueConverter=None, minSplits=None, batchSize=0):
"""
Read a Hadoop SequenceFile with arbitrary key and value Writable class from HDFS,
a local file system (available on all nodes), or any Hadoop-supported file system URI.
The mechanism is as follows:
1. A Java RDD is created from the SequenceFile or other InputFormat, and the key
and value Writable classes
2. Serialization is attempted via Pyrolite pickling
3. If this fails, the fallback is to call 'toString' on each key and value
4. C{PickleSerializer} is used to deserialize pickled objects on the Python side
:param path: path to sequncefile
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter:
:param valueConverter:
:param minSplits: minimum splits in dataset
(default min(2, sc.defaultParallelism))
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
minSplits = minSplits or min(self.defaultParallelism, 2)
jrdd = self._jvm.PythonRDD.sequenceFile(self._jsc, path, keyClass, valueClass,
keyConverter, valueConverter, minSplits, batchSize)
return RDD(jrdd, self)
def newAPIHadoopFile(self, path, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read a 'new API' Hadoop InputFormat with arbitrary key and value class from HDFS,
a local file system (available on all nodes), or any Hadoop-supported file system URI.
The mechanism is the same as for sc.sequenceFile.
A Hadoop configuration can be passed in as a Python dict. This will be converted into a
Configuration in Java
:param path: path to Hadoop file
:param inputFormatClass: fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.input.TextInputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop configuration, passed in as a dict
(None by default)
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.newAPIHadoopFile(self._jsc, path, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def newAPIHadoopRDD(self, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read a 'new API' Hadoop InputFormat with arbitrary key and value class, from an arbitrary
Hadoop configuration, which is passed in as a Python dict.
This will be converted into a Configuration in Java.
The mechanism is the same as for sc.sequenceFile.
:param inputFormatClass: fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.input.TextInputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop configuration, passed in as a dict
(None by default)
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.newAPIHadoopRDD(self._jsc, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def hadoopFile(self, path, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read an 'old' Hadoop InputFormat with arbitrary key and value class from HDFS,
a local file system (available on all nodes), or any Hadoop-supported file system URI.
The mechanism is the same as for sc.sequenceFile.
A Hadoop configuration can be passed in as a Python dict. This will be converted into a
Configuration in Java.
:param path: path to Hadoop file
:param inputFormatClass: fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapred.TextInputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop configuration, passed in as a dict
(None by default)
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.hadoopFile(self._jsc, path, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def hadoopRDD(self, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read an 'old' Hadoop InputFormat with arbitrary key and value class, from an arbitrary
Hadoop configuration, which is passed in as a Python dict.
This will be converted into a Configuration in Java.
The mechanism is the same as for sc.sequenceFile.
:param inputFormatClass: fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapred.TextInputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop configuration, passed in as a dict
(None by default)
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.hadoopRDD(self._jsc, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def _checkpointFile(self, name, input_deserializer):
jrdd = self._jsc.checkpointFile(name)
return RDD(jrdd, self, input_deserializer)
@ignore_unicode_prefix
def union(self, rdds):
"""
Build the union of a list of RDDs.
This supports unions() of RDDs with different serialized formats,
although this forces them to be reserialized using the default
serializer:
>>> path = os.path.join(tempdir, "union-text.txt")
>>> with open(path, "w") as testFile:
... _ = testFile.write("Hello")
>>> textFile = sc.textFile(path)
>>> textFile.collect()
[u'Hello']
>>> parallelized = sc.parallelize(["World!"])
>>> sorted(sc.union([textFile, parallelized]).collect())
[u'Hello', 'World!']
"""
first_jrdd_deserializer = rdds[0]._jrdd_deserializer
if any(x._jrdd_deserializer != first_jrdd_deserializer for x in rdds):
rdds = [x._reserialize() for x in rdds]
first = rdds[0]._jrdd
rest = [x._jrdd for x in rdds[1:]]
return RDD(self._jsc.union(first, rest), self, rdds[0]._jrdd_deserializer)
def broadcast(self, value):
"""
Broadcast a read-only variable to the cluster, returning a
L{Broadcast<pyspark.broadcast.Broadcast>}
object for reading it in distributed functions. The variable will
be sent to each cluster only once.
"""
return Broadcast(self, value, self._pickled_broadcast_vars)
def accumulator(self, value, accum_param=None):
"""
Create an L{Accumulator} with the given initial value, using a given
L{AccumulatorParam} helper object to define how to add values of the
data type if provided. Default AccumulatorParams are used for integers
and floating-point numbers if you do not provide one. For other types,
a custom AccumulatorParam can be used.
"""
if accum_param is None:
if isinstance(value, int):
accum_param = accumulators.INT_ACCUMULATOR_PARAM
elif isinstance(value, float):
accum_param = accumulators.FLOAT_ACCUMULATOR_PARAM
elif isinstance(value, complex):
accum_param = accumulators.COMPLEX_ACCUMULATOR_PARAM
else:
raise TypeError("No default accumulator param for type %s" % type(value))
SparkContext._next_accum_id += 1
return Accumulator(SparkContext._next_accum_id - 1, value, accum_param)
def addFile(self, path, recursive=False):
"""
Add a file to be downloaded with this Spark job on every node.
The C{path} passed can be either a local file, a file in HDFS
(or other Hadoop-supported filesystems), or an HTTP, HTTPS or
FTP URI.
To access the file in Spark jobs, use
L{SparkFiles.get(fileName)<pyspark.files.SparkFiles.get>} with the
filename to find its download location.
A directory can be given if the recursive option is set to True.
Currently directories are only supported for Hadoop-supported filesystems.
.. note:: A path can be added only once. Subsequent additions of the same path are ignored.
>>> from pyspark import SparkFiles
>>> path = os.path.join(tempdir, "test.txt")
>>> with open(path, "w") as testFile:
... _ = testFile.write("100")
>>> sc.addFile(path)
>>> def func(iterator):
... with open(SparkFiles.get("test.txt")) as testFile:
... fileVal = int(testFile.readline())
... return [x * fileVal for x in iterator]
>>> sc.parallelize([1, 2, 3, 4]).mapPartitions(func).collect()
[100, 200, 300, 400]
"""
self._jsc.sc().addFile(path, recursive)
def addPyFile(self, path):
"""
Add a .py or .zip dependency for all tasks to be executed on this
SparkContext in the future. The C{path} passed can be either a local
file, a file in HDFS (or other Hadoop-supported filesystems), or an
HTTP, HTTPS or FTP URI.
.. note:: A path can be added only once. Subsequent additions of the same path are ignored.
"""
self.addFile(path)
(dirname, filename) = os.path.split(path) # dirname may be directory or HDFS/S3 prefix
if filename[-4:].lower() in self.PACKAGE_EXTENSIONS:
self._python_includes.append(filename)
# for tests in local mode
sys.path.insert(1, os.path.join(SparkFiles.getRootDirectory(), filename))
if sys.version > '3':
import importlib
importlib.invalidate_caches()
def setCheckpointDir(self, dirName):
"""
Set the directory under which RDDs are going to be checkpointed. The
directory must be a HDFS path if running on a cluster.
"""
self._jsc.sc().setCheckpointDir(dirName)
def _getJavaStorageLevel(self, storageLevel):
"""
Returns a Java StorageLevel based on a pyspark.StorageLevel.
"""
if not isinstance(storageLevel, StorageLevel):
raise Exception("storageLevel must be of type pyspark.StorageLevel")
newStorageLevel = self._jvm.org.apache.spark.storage.StorageLevel
return newStorageLevel(storageLevel.useDisk,
storageLevel.useMemory,
storageLevel.useOffHeap,
storageLevel.deserialized,
storageLevel.replication)
def setJobGroup(self, groupId, description, interruptOnCancel=False):
"""
Assigns a group ID to all the jobs started by this thread until the group ID is set to a
different value or cleared.
Often, a unit of execution in an application consists of multiple Spark actions or jobs.
Application programmers can use this method to group all those jobs together and give a
group description. Once set, the Spark web UI will associate such jobs with this group.
The application can use L{SparkContext.cancelJobGroup} to cancel all
running jobs in this group.
>>> import threading
>>> from time import sleep
>>> result = "Not Set"
>>> lock = threading.Lock()
>>> def map_func(x):
... sleep(100)
... raise Exception("Task should have been cancelled")
>>> def start_job(x):
... global result
... try:
... sc.setJobGroup("job_to_cancel", "some description")
... result = sc.parallelize(range(x)).map(map_func).collect()
... except Exception as e:
... result = "Cancelled"
... lock.release()
>>> def stop_job():
... sleep(5)
... sc.cancelJobGroup("job_to_cancel")
>>> supress = lock.acquire()
>>> supress = threading.Thread(target=start_job, args=(10,)).start()
>>> supress = threading.Thread(target=stop_job).start()
>>> supress = lock.acquire()
>>> print(result)
Cancelled
If interruptOnCancel is set to true for the job group, then job cancellation will result
in Thread.interrupt() being called on the job's executor threads. This is useful to help
ensure that the tasks are actually stopped in a timely manner, but is off by default due
to HDFS-1208, where HDFS may respond to Thread.interrupt() by marking nodes as dead.
"""
self._jsc.setJobGroup(groupId, description, interruptOnCancel)
def setLocalProperty(self, key, value):
"""
Set a local property that affects jobs submitted from this thread, such as the
Spark fair scheduler pool.
"""
self._jsc.setLocalProperty(key, value)
def getLocalProperty(self, key):
"""
Get a local property set in this thread, or null if it is missing. See
L{setLocalProperty}
"""
return self._jsc.getLocalProperty(key)
def setJobDescription(self, value):
"""
Set a human readable description of the current job.
"""
self._jsc.setJobDescription(value)
def sparkUser(self):
"""
Get SPARK_USER for user who is running SparkContext.
"""
return self._jsc.sc().sparkUser()
def cancelJobGroup(self, groupId):
"""
Cancel active jobs for the specified group. See L{SparkContext.setJobGroup}
for more information.
"""
self._jsc.sc().cancelJobGroup(groupId)
def cancelAllJobs(self):
"""
Cancel all jobs that have been scheduled or are running.
"""
self._jsc.sc().cancelAllJobs()
def statusTracker(self):
"""
Return :class:`StatusTracker` object
"""
return StatusTracker(self._jsc.statusTracker())
def runJob(self, rdd, partitionFunc, partitions=None, allowLocal=False):
"""
Executes the given partitionFunc on the specified set of partitions,
returning the result as an array of elements.
If 'partitions' is not specified, this will run over all partitions.
>>> myRDD = sc.parallelize(range(6), 3)
>>> sc.runJob(myRDD, lambda part: [x * x for x in part])
[0, 1, 4, 9, 16, 25]
>>> myRDD = sc.parallelize(range(6), 3)
>>> sc.runJob(myRDD, lambda part: [x * x for x in part], [0, 2], True)
[0, 1, 16, 25]
"""
if partitions is None:
partitions = range(rdd._jrdd.partitions().size())
# Implementation note: This is implemented as a mapPartitions followed
# by runJob() in order to avoid having to pass a Python lambda into
# SparkContext#runJob.
mappedRDD = rdd.mapPartitions(partitionFunc)
sock_info = self._jvm.PythonRDD.runJob(self._jsc.sc(), mappedRDD._jrdd, partitions)
return list(_load_from_socket(sock_info, mappedRDD._jrdd_deserializer))
def show_profiles(self):
""" Print the profile stats to stdout """
if self.profiler_collector is not None:
self.profiler_collector.show_profiles()
else:
raise RuntimeError("'spark.python.profile' configuration must be set "
"to 'true' to enable Python profile.")
def dump_profiles(self, path):
""" Dump the profile stats into directory `path`
"""
if self.profiler_collector is not None:
self.profiler_collector.dump_profiles(path)
else:
raise RuntimeError("'spark.python.profile' configuration must be set "
"to 'true' to enable Python profile.")
def getConf(self):
conf = SparkConf()
conf.setAll(self._conf.getAll())
return conf
def _test():
import atexit
import doctest
import tempfile
globs = globals().copy()
globs['sc'] = SparkContext('local[4]', 'PythonTest')
globs['tempdir'] = tempfile.mkdtemp()
atexit.register(lambda: shutil.rmtree(globs['tempdir']))
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
|
# -*- coding: utf-8 -*-
import pytest
from sqlalchemy.orm import session
from skosprovider.uri import UriPatternGenerator
from skosprovider_sqlalchemy.models import Initialiser
from skosprovider_sqlalchemy.providers import (
SQLAlchemyProvider
)
from tests import DBTestCase
from tests.conftest import create_data, create_visitation
from skosprovider_sqlalchemy.models import (
Base
)
class TestSQLAlchemyProvider(DBTestCase):
def setUp(self):
Base.metadata.create_all(self.engine)
self.session = self.session_maker()
Initialiser(self.session).init_all()
create_data(self.session)
self.provider = SQLAlchemyProvider(
{'id': 'SOORTEN', 'conceptscheme_id': 1},
self.session,
uri_generator=UriPatternGenerator('urn:x-skosprovider-sa:test:%s')
)
def tearDown(self):
self.session.rollback()
session.close_all_sessions()
Base.metadata.drop_all(self.engine)
def test_session_maker(self):
self.provider = SQLAlchemyProvider(
{'id': 'SOORTEN', 'conceptscheme_id': 1},
self.session_maker
)
cs = self.provider.concept_scheme
assert 'urn:x-skosprovider:test' == cs.uri
assert 'en' in cs.languages
assert 'nl' in cs.languages
def test_default_recurse_strategy(self):
assert 'recurse' == self.provider.expand_strategy
def test_instance_scopes(self):
assert 'single' in self.provider.allowed_instance_scopes
assert 'threaded_thread' in self.provider.allowed_instance_scopes
def test_override_expand_strategy(self):
# Set up provider
provider = SQLAlchemyProvider(
{'id': 'SOORTEN', 'conceptscheme_id': 1},
self.session_maker,
expand_strategy='visit'
)
assert 'visit' == provider.expand_strategy
def test_set_invalid_expand_strategy(self):
with pytest.raises(ValueError):
SQLAlchemyProvider(
{'id': 'SOORTEN', 'conceptscheme_id': 1},
self.session,
expand_strategy='invalid'
)
def test_provider_without_cs_id(self):
with pytest.raises(ValueError):
SQLAlchemyProvider(
{'id': 'SOORTEN'},
self.session
)
def test_get_vocabulary_id(self):
assert 'SOORTEN' == self.provider.get_vocabulary_id()
def test_set_uri_generator(self):
from skosprovider.uri import UriPatternGenerator
# Set up provider
provider = SQLAlchemyProvider(
{'id': 'SOORTEN', 'conceptscheme_id': 1},
self.session,
uri_generator=UriPatternGenerator('http://id.example.com/trees/%s')
)
assert 'http://id.example.com/trees/1' == provider.uri_generator.generate(id=1)
def test_gen_uri(self):
from skosprovider_sqlalchemy.models import Concept, ConceptScheme
from skosprovider.uri import UriPatternGenerator
# Set up provider
provider = SQLAlchemyProvider(
{'id': 'SOORTEN', 'conceptscheme_id': 99},
self.session,
uri_generator=UriPatternGenerator('http://id.example.com/trees/%s')
)
c1 = Concept(concept_id=1, conceptscheme=ConceptScheme(id=99, uri='http://id.example.com/trees'))
session = self.session_maker()
session.add(c1)
session.commit()
assert c1.uri is None
c2 = provider.get_by_id(1)
assert c2.uri == 'http://id.example.com/trees/1'
def test_concept_scheme(self):
from skosprovider.skos import (
ConceptScheme
)
cs = self.provider.concept_scheme
assert isinstance(cs, ConceptScheme)
assert 'urn:x-skosprovider:test' == cs.uri
assert 2 == len(cs.languages)
assert 'en' in cs.languages
def test_concept_scheme_is_cached(self):
from skosprovider.skos import (
ConceptScheme
)
assert self.provider._conceptscheme is None
cs = self.provider.concept_scheme
assert self.provider._conceptscheme == cs
def test_get_concept_by_id(self):
from skosprovider.skos import Concept
con = self.provider.get_by_id(1)
assert isinstance(con, Concept)
assert 1 == con.id
assert [3] == con.related
assert [2, 8] == sorted(con.subordinate_arrays)
def test_concept_has_concept_scheme(self):
from skosprovider.skos import (
ConceptScheme
)
con = self.provider.get_by_id(1)
assert isinstance(con.concept_scheme, ConceptScheme)
assert 'urn:x-skosprovider:test' == con.concept_scheme.uri
def test_get_concept_by_id_string(self):
from skosprovider.skos import Concept
con = self.provider.get_by_id('1')
assert isinstance(con, Concept)
assert 1 == con.id
assert [3] == con.related
assert [2, 8] == sorted(con.subordinate_arrays)
def test_get_unexisting_by_id(self):
con = self.provider.get_by_id(404)
assert not con
def test_get_concept_by_uri(self):
cona = self.provider.get_by_id(1)
conb = self.provider.get_by_uri('urn:x-skosprovider:test:1')
assert cona.id == conb.id
assert cona.uri == conb.uri
def test_get_unexisting_by_uri(self):
con = self.provider.get_by_uri('urn:x-skosprovider:test:404')
assert not con
def test_concept_has_correct_note(self):
from skosprovider.skos import Note
cath = self.provider.get_by_id(4)
assert len(cath.notes) == 1
assert isinstance(cath.notes[0], Note)
def test_concept_has_matches(self):
cath = self.provider.get_by_id(4)
assert len(cath.matches.keys()) == 5
assert len(cath.matches['close']) == 1
assert cath.matches['close'][0] == 'http://vocab.getty.edu/aat/300007501'
def test_get_collection_by_id(self):
from skosprovider.skos import Collection
col = self.provider.get_by_id(2)
assert isinstance(col, Collection)
assert 2 == col.id
assert [4, 6] == sorted(col.members)
assert [1] == col.superordinates
def test_collection_has_no_matches(self):
col = self.provider.get_by_id(2)
assert not hasattr(col, 'matches')
def test_get_collection_by_uri(self):
from skosprovider.skos import Collection
cola = self.provider.get_by_id(2)
colb = self.provider.get_by_uri('urn:x-skosprovider:test:2')
assert isinstance(colb, Collection)
assert cola.id == colb.id
assert cola.uri == colb.uri
def test_get_all(self):
all = self.provider.get_all()
assert len(all) == 9
assert {
'id': 1,
'uri': 'urn:x-skosprovider:test:1',
'type': 'concept',
'label': 'Churches'
} in all
assert {
'id': 2,
'uri': 'urn:x-skosprovider:test:2',
'type': 'collection',
'label': 'Churches by function'
} in all
assert {
'id': 3,
'uri': 'urn:x-skosprovider:test:3',
'type': 'concept',
'label': 'Chapels'
} in all
assert {
'id': 4,
'uri': 'urn:x-skosprovider:test:4',
'type': 'concept',
'label': 'Cathedrals'
} in all
assert {
'id': 5,
'uri': 'urn:x-skosprovider:test:5',
'type': 'concept',
'label': 'Boomkapellen'
} in all
assert {
'id': 6,
'uri': 'urn:x-skosprovider:test:6',
'type': 'concept',
'label': 'Parochiekerken'
} in all
assert {
'id': 7,
'uri': 'urn:x-skosprovider:test:7',
'type': 'concept',
'label': 'Hulpkerken'
} in all
def test_get_all_sorted_id_desc(self):
all = self.provider.get_all(sort='id', sort_order='desc')
assert len(all) == 9
assert [9, 8, 7, 6, 5, 4, 3, 2, 1] == [c['id'] for c in all]
def test_get_all_sorted_label(self):
all = self.provider.get_all(sort='label')
assert len(all) == 9
assert [
'Boomkapellen', 'Cathedrals',
'Chapels', 'Churches',
'Churches by function',
'Churchtowers',
'Hulpkerken',
'Parochiekerken',
'Parts of churches'
] == [c['label'] for c in all]
def test_get_all_sorted_sortlabel_desc(self):
all = self.provider.get_all(sort='sortlabel', sort_order='desc')
assert len(all) == 9
assert [
'Parts of churches',
'Parochiekerken',
'Hulpkerken',
'Churchtowers',
'Churches',
'Chapels',
'Cathedrals',
'Boomkapellen',
'Churches by function'
] == [c['label'] for c in all]
def test_get_top_concepts(self):
all = self.provider.get_top_concepts()
assert len(all) == 3
assert {
'id': 1,
'uri': 'urn:x-skosprovider:test:1',
'type': 'concept',
'label': 'Churches'
} in all
assert {
'id': 3,
'uri': 'urn:x-skosprovider:test:3',
'type': 'concept',
'label': 'Chapels'
} in all
assert {
'id': 9,
'uri': 'urn:x-skosprovider:test:9',
'type': 'concept',
'label': 'Churchtowers'
} in all
def test_get_top_concepts_sort_uri_desc(self):
all = self.provider.get_top_concepts(sort='uri', sort_order='desc')
assert len(all) == 3
assert [
'urn:x-skosprovider:test:9',
'urn:x-skosprovider:test:3',
'urn:x-skosprovider:test:1',
] == [c['uri'] for c in all]
def test_get_top_display(self):
all = self.provider.get_top_display()
assert len(all) == 2
assert {
'id': 3,
'uri': 'urn:x-skosprovider:test:3',
'type': 'concept',
'label': 'Chapels'
} in all
assert {
'id': 1,
'uri': 'urn:x-skosprovider:test:1',
'type': 'concept',
'label': 'Churches'
} in all
def test_get_top_display_british_sort_label_desc(self):
all = self.provider.get_top_display(language='en-GB', sort='label', sort_order='desc')
assert len(all) == 2
assert [
'Churches',
'Chapels'
] == [c['label'] for c in all]
def test_get_children_display_unexisting(self):
children = self.provider.get_children_display(700)
assert not children
def test_get_children_display_collection(self):
children = self.provider.get_children_display(2)
assert len(children) == 2
assert {
'id': 4,
'uri': 'urn:x-skosprovider:test:4',
'type': 'concept',
'label': 'Cathedrals'
} in children
def test_get_children_display_collection_sort_id(self):
children = self.provider.get_children_display(2, sort='id')
assert len(children) == 2
assert {
'id': 4,
'uri': 'urn:x-skosprovider:test:4',
'type': 'concept',
'label': 'Cathedrals'
} in children
def test_get_children_display_concept_with_narrower_collection(self):
children = self.provider.get_children_display(1)
assert len(children) == 2
assert {
'id': 2,
'uri': 'urn:x-skosprovider:test:2',
'type': 'collection',
'label': 'Churches by function'
} in children
def test_get_children_display_concept_with_narrower_concept(self):
children = self.provider.get_children_display(3)
assert len(children) == 1
assert {
'id': 5,
'uri': 'urn:x-skosprovider:test:5',
'type': 'concept',
'label': 'Boomkapellen'
} in children
def test_get_children_display_concept_with_no_narrower(self):
children = self.provider.get_children_display(4)
assert len(children) == 0
def test_find_all(self):
all = self.provider.find({})
assert len(all) == 9
def test_find_type_all(self):
all = self.provider.find({'type': 'all'})
assert len(all) == 9
def test_find_type_concept(self):
all = self.provider.find({'type': 'concept'})
assert len(all) == 7
assert {
'id': 2,
'uri': 'urn:x-skosprovider:test:2',
'type': 'collection',
'label': 'Churches by function'
} not in all
def test_find_type_concept_sorted_uri_desc(self):
all = self.provider.find({'type': 'concept'}, sort='uri', sort_order='desc')
assert len(all) == 7
assert [
'urn:x-skosprovider:test:9',
'urn:x-skosprovider:test:7',
'urn:x-skosprovider:test:6',
'urn:x-skosprovider:test:5',
'urn:x-skosprovider:test:4',
'urn:x-skosprovider:test:3',
'urn:x-skosprovider:test:1',
] == [c['uri'] for c in all]
def test_find_type_collection(self):
all = self.provider.find({'type': 'collection'})
assert len(all) == 2
assert {
'id': 2,
'uri': 'urn:x-skosprovider:test:2',
'type': 'collection',
'label': 'Churches by function'
} in all
assert {
'id': 8,
'uri': 'urn:x-skosprovider:test:8',
'type': 'collection',
'label': 'Parts of churches'
} in all
def test_find_label_kerken(self):
all = self.provider.find({'label': 'kerken'})
assert len(all) == 3
assert {
'id': 1,
'uri': 'urn:x-skosprovider:test:1',
'type': 'concept',
'label': 'Churches'
} in all
assert {
'id': 6,
'uri': 'urn:x-skosprovider:test:6',
'type': 'concept',
'label': 'Parochiekerken'
} in all
assert {
'id': 7,
'uri': 'urn:x-skosprovider:test:7',
'type': 'concept',
'label': 'Hulpkerken'
} in all
def test_find_label_churches_type_concept(self):
all = self.provider.find({'label': 'churches', 'type': 'concept'})
assert len(all) == 1
assert {
'id': 1,
'uri': 'urn:x-skosprovider:test:1',
'type': 'concept',
'label': 'Churches'
} in all
def test_find_collection_unexisting(self):
with pytest.raises(ValueError):
self.provider.find({'collection': {'id': 404}})
def test_find_collection_2_depth_default_members(self):
nodepth = self.provider.find({'collection': {'id': 2}})
depth = self.provider.find({
'collection': {
'id': 2,
'depth': 'members'
}
})
assert len(depth) == len(nodepth)
def test_find_collection_2_depth_all(self):
all = self.provider.find({
'collection': {
'id': 2,
'depth': 'all'
}
})
assert len(all) == 3
assert {
'id': 4,
'uri': 'urn:x-skosprovider:test:4',
'type': 'concept',
'label': 'Cathedrals'
} in all
assert {
'id': 6,
'uri': 'urn:x-skosprovider:test:6',
'type': 'concept',
'label': 'Parochiekerken'
} in all
assert {
'id': 7,
'uri': 'urn:x-skosprovider:test:7',
'type': 'concept',
'label': 'Hulpkerken'
} in all
def test_find_collection_2_depth_members(self):
all = self.provider.find({
'collection': {
'id': 2,
'depth': 'members'
}
})
assert len(all) == 2
assert {
'id': 4,
'uri': 'urn:x-skosprovider:test:4',
'type': 'concept',
'label': 'Cathedrals'
} in all
assert {
'id': 6,
'uri': 'urn:x-skosprovider:test:6',
'type': 'concept',
'label': 'Parochiekerken'
} in all
def test_find_matches_no_uri(self):
with pytest.raises(ValueError):
all = self.provider.find({'matches': {}})
def test_find_matches_none(self):
all = self.provider.find({'matches': {
'uri': 'http://vocab.getty.edu/aat/notpresent'
}})
assert len(all) == 0
def test_find_matches_one(self):
all = self.provider.find({'matches': {
'uri': 'http://vocab.getty.edu/aat/300007501'
}})
assert len(all) == 1
assert {
'id': 4,
'uri': 'urn:x-skosprovider:test:4',
'type': 'concept',
'label': 'Cathedrals'
} in all
def test_find_matches_one_close(self):
all = self.provider.find({'matches': {
'type': 'close',
'uri': 'http://vocab.getty.edu/aat/300007501'
}})
assert len(all) == 1
assert {
'id': 4,
'uri': 'urn:x-skosprovider:test:4',
'type': 'concept',
'label': 'Cathedrals'
} in all
def test_find_matches_one_close_inherits_exact(self):
all = self.provider.find({'matches': {
'type': 'close',
'uri': 'http://vocab.getty.edu/aat/300003625'
}})
assert len(all) == 1
assert {
'id': 9,
'uri': 'urn:x-skosprovider:test:9',
'type': 'concept',
'label': 'Churchtowers'
} in all
def test_expand_concept(self):
ids = self.provider.expand(1)
assert [1, 4, 6, 7] == ids
def test_expand_collection(self):
ids = self.provider.expand(2)
assert [4, 6, 7] == ids
def test_expand_collection_without_inference(self):
ids = self.provider.expand(8)
assert [9] == ids
def test_expand_concept_without_narrower(self):
ids = self.provider.expand(5)
assert [5] == ids
def test_expand_unexisting(self):
ids = self.provider.expand(404)
assert not ids
class TestSQLAlchemyProviderExpandVisit(DBTestCase):
def setUp(self):
Base.metadata.create_all(self.engine)
self.session = self.session_maker()
Initialiser(self.session).init_all()
create_data(self.session)
create_visitation(self.session)
self.visitationprovider=SQLAlchemyProvider(
{'id': 'SOORTEN', 'conceptscheme_id': 1},
self.session,
expand_strategy='visit'
)
def tearDown(self):
self.session.rollback()
session.close_all_sessions()
Base.metadata.drop_all(self.engine)
def test_expand_concept_visit(self):
ids = self.visitationprovider.expand(1)
assert ids == [1, 4, 6, 7]
def test_expand_collection_visit(self):
ids = self.visitationprovider.expand(2)
assert ids == [4, 6, 7]
def test_expand_collection_without_inference_visit(self):
ids = self.visitationprovider.expand(8)
assert [9] == ids
def test_expand_concept_without_narrower_visit(self):
ids = self.visitationprovider.expand(4)
assert ids == [4]
def test_expand_unexisting_visit(self):
ids = self.visitationprovider.expand(404)
assert not ids
class TestSQLAlchemyProviderExpandVisitNoVisitation(DBTestCase):
def setUp(self):
Base.metadata.create_all(self.engine)
self.session = self.session_maker()
Initialiser(self.session).init_all()
self.visitationprovider=SQLAlchemyProvider(
{'id': 'SOORTEN', 'conceptscheme_id': 1},
self.session,
expand_strategy='visit'
)
def tearDown(self):
self.session.rollback()
session.close_all_sessions()
Base.metadata.drop_all(self.engine)
def test_expand_concept(self):
ids = self.visitationprovider.expand(1)
assert not ids
def test_expand_collection_visit(self):
ids = self.visitationprovider.expand(2)
assert not ids
def test_expand_concept_without_narrower_visit(self):
ids = self.visitationprovider.expand(3)
assert not ids
def test_expand_unexisting_visit(self):
ids = self.visitationprovider.expand(404)
assert not ids
|
|
#!/usr/bin/env python3
# Copyright 2020 Google LLC
#
# Licensed under the the Apache License v2.0 with LLVM Exceptions (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
from enum import Enum
from git import Repo
import os
import platform
import shutil
import subprocess
import stat
import sys
from typing import List, Dict
import yaml
from choose_projects import ChooseProjects
class OperatingSystem(Enum):
Linux = 'linux'
Windows = 'windows'
class Configuration:
"""Configuration for running cmake.
The data is mostly read from the file `run_cmake_config.yaml`
residing in the same folder as this script.
"""
def __init__(self, config_file_path: str):
with open(config_file_path) as config_file:
config = yaml.load(config_file, Loader=yaml.SafeLoader)
self._environment = config['environment'] # type: Dict[OperatingSystem, Dict[str, str]]
self.general_cmake_arguments = config['arguments']['general'] # type: List[str]
self._specific_cmake_arguments = config[
'arguments'] # type: Dict[OperatingSystem, List[str]]
self.operating_system = self._detect_os() # type: OperatingSystem
@property
def environment(self) -> Dict[str, str]:
return self._environment[self.operating_system.value]
@property
def specific_cmake_arguments(self) -> List[str]:
return self._specific_cmake_arguments[self.operating_system.value]
@property
def default_projects(self) -> str:
"""Get string of projects enabled by default.
This returns all projects in the mono repo minus the project that were
excluded for the current platform.
"""
cp = ChooseProjects(None)
return ';'.join(cp.get_all_enabled_projects())
@staticmethod
def _detect_os() -> OperatingSystem:
"""Detect the current operating system."""
if platform.system() == 'Windows':
return OperatingSystem.Windows
return OperatingSystem.Linux
def _select_projects(config: Configuration, projects: str, repo_path: str) -> str:
"""select which projects to build.
if projects == "default", a default configuraiton will be used.
if project == "detect", ChooseProjects is used to magically detect the projects
based on the files modified in HEAD
"""
if projects == "default" or projects is None or len(projects) == 0:
return config.default_projects
if projects == "detect":
cp = ChooseProjects(repo_path)
repo = Repo('.')
patch = repo.git.diff("HEAD~1")
logging.debug(f'diff {patch}')
enabled_projects = ';'.join(cp.choose_projects(patch))
if enabled_projects is None or len(enabled_projects) == 0:
logging.warning('Cannot detect affected projects. Enable all projects')
enabled_projects = cp.get_all_enabled_projects()
return enabled_projects
return projects
def _create_env(config: Configuration) -> Dict[str, str]:
"""Generate the environment variables for cmake."""
env = os.environ.copy()
env.update(config.environment)
return env
def _create_args(config: Configuration, llvm_enable_projects: str, use_cache: bool) -> List[str]:
"""Generate the command line arguments for cmake."""
arguments = [
os.path.join('..', 'llvm'),
'-D LLVM_ENABLE_PROJECTS="{}"'.format(llvm_enable_projects),
]
arguments.extend(config.general_cmake_arguments)
arguments.extend(config.specific_cmake_arguments)
if use_cache:
if 'SCCACHE_DIR' in os.environ:
logging.info("using sccache")
arguments.extend([
'-DCMAKE_C_COMPILER_LAUNCHER=sccache',
'-DCMAKE_CXX_COMPILER_LAUNCHER=sccache',
])
# enable ccache if the path is set in the environment
elif 'CCACHE_DIR' in os.environ:
logging.info("using ccache")
arguments.extend([
'-D LLVM_CCACHE_BUILD=ON',
])
return arguments
def run(projects: str, repo_path: str, config_file_path: str = None, *, dry_run: bool = False):
"""Use cmake to configure the project and create build directory.
Returns build directory and path to created artifacts.
This version works on Linux and Windows.
Returns: exit code of cmake command, build directory, path to CMakeCache.txt, commands.
"""
commands = []
if config_file_path is None:
script_dir = os.path.dirname(__file__)
config_file_path = os.path.join(script_dir, 'run_cmake_config.yaml')
config = Configuration(config_file_path)
build_dir = os.path.abspath(os.path.join(repo_path, 'build'))
if not dry_run:
secure_delete(build_dir)
os.makedirs(build_dir)
commands.append("rm -rf build")
commands.append("mkdir build")
commands.append("cd build")
for k, v in config.environment.items():
if config.operating_system == OperatingSystem.Linux:
commands.append(f'export {k}="{v}"')
else:
commands.append(f'set {k}={v}')
env = _create_env(config)
llvm_enable_projects = _select_projects(config, projects, repo_path)
print('Enabled projects: {}'.format(llvm_enable_projects), flush=True)
arguments = _create_args(config, llvm_enable_projects, True)
cmd = 'cmake ' + ' '.join(arguments)
print('Running cmake with these arguments:\n{}'.format(cmd), flush=True)
if dry_run:
print('Dry run, not invoking CMake!')
return 0, build_dir, [], []
result = subprocess.call(cmd, env=env, shell=True, cwd=build_dir)
commands.append('cmake ' + ' '.join(_create_args(config, llvm_enable_projects, False)))
commands.append('# ^note that compiler cache arguments are omitted')
_link_compile_commands(config, repo_path, build_dir, commands)
return result, build_dir, [os.path.join(build_dir, 'CMakeCache.txt')], commands
def secure_delete(path: str):
"""Try do delete a local folder.
Handle read-only files.
"""
if not os.path.exists(path):
return
def del_rw(action, name, exc):
os.chmod(name, stat.S_IWRITE)
os.unlink(name)
shutil.rmtree(path, onerror=del_rw)
def _link_compile_commands(config: Configuration, repo_path: str, build_dir: str, commands: List[str]):
"""Link compile_commands.json from build to root dir"""
if config.operating_system != OperatingSystem.Linux:
return
source_path = os.path.join(build_dir, 'compile_commands.json')
target_path = os.path.join(repo_path, 'compile_commands.json')
if os.path.exists(target_path):
os.remove(target_path)
os.symlink(source_path, target_path)
commands.append(f'ln -s $PWD/compile_commands.json ../compile_commands.json')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Run CMake for LLVM.')
parser.add_argument('projects', type=str, nargs='?', default='default')
parser.add_argument('repo_path', type=str, nargs='?', default=os.getcwd())
parser.add_argument('--dryrun', action='store_true')
parser.add_argument('--log-level', type=str, default='WARNING')
args = parser.parse_args()
logging.basicConfig(level=args.log_level, format='%(levelname)-7s %(message)s')
result, _, _, _ = run(args.projects, args.repo_path, dry_run=args.dryrun)
sys.exit(result)
|
|
from .testutils import FullStackTests
import time
import os
from pywb.utils.loaders import load as load_test
from webrecorder.models import User, Collection, Recording
from webrecorder.models.base import BaseAccess
from mock import patch
from itertools import count
load_counter = 0
# ============================================================================
def slow_load(filename):
time.sleep(0.4)
global load_counter
load_counter += 1
return load_test(filename)
# ============================================================================
REC_CDXJ = 'r:500:cdxj'
REC_OPEN = 'r:500:open'
REC_WARC = 'r:500:warc'
REC_INFO = 'r:500:info'
REC_CDXJ_T = REC_CDXJ + ':_'
COLL_CDXJ = 'c:100:cdxj'
# ============================================================================
class BaseCDXJCache(FullStackTests):
@classmethod
def setup_class(cls, *args, **kwargs):
super(BaseCDXJCache, cls).setup_class(*args, **kwargs)
cls.set_uuids('Recording', count(500))
cls.set_uuids('Collection', count(100))
global load_counter
load_counter = 0
def assert_exists(self, key, exists):
def func():
assert exists == self.redis.exists(key.format(user=self.anon_user))
return func
def test_record_1(self):
res = self.testapp.get('/_new/temp/rec/record/mp_/http://httpbin.org/get?food=bar')
assert res.status_code == 302
res = res.follow()
res.charset = 'utf-8'
assert '"food": "bar"' in res.text, res.text
self.sleep_try(0.1, 1.0, self.assert_exists(REC_CDXJ, True))
def test_record_2(self):
# ensure duration of at least 1 sec
time.sleep(1.0)
res = self.testapp.get('/' + self.anon_user + '/temp/500/record/mp_/http://httpbin.org/get?bood=far')
res.charset = 'utf-8'
assert '"bood": "far"' in res.text, res.text
def assert_cdx():
assert len(self.redis.zrange(REC_CDXJ, 0, -1)) == 2
self.sleep_try(0.1, 2.0, assert_cdx)
def test_expire_or_commit_cdxj(self):
assert self.redis.exists(REC_OPEN)
assert len(self.runner.rec_serv.server.application.wr.writer.fh_cache) == 1
self.do_expire_or_commit()
def assert_files_closed():
assert len(self.runner.rec_serv.server.application.wr.writer.fh_cache) == 0
self.sleep_try(0.1, 3.0, assert_files_closed)
def test_download(self):
assert self.redis.hget(REC_INFO, Recording.INDEX_FILE_KEY) != None
res = self.testapp.get('/{user}/temp/$download'.format(user=self.anon_user))
assert len(res.body) == int(res.headers['Content-Length'])
assert res.headers['Content-Disposition'].startswith("attachment; filename*=UTF-8''temp-")
def test_record_2_closed_not_found(self):
res = self.testapp.get('/' + self.anon_user + '/temp/rec/record/mp_/http://httpbin.org/get?food=bar', status=404)
def test_replay_load_cdxj(self):
assert not self.redis.exists(COLL_CDXJ)
res = self.testapp.get('/{user}/temp/mp_/http://httpbin.org/get?food=bar'.format(user=self.anon_user))
res.charset = 'utf-8'
assert '"food": "bar"' in res.text, res.text
self.sleep_try(0.1, 0.5, self.assert_exists(COLL_CDXJ, True))
assert len(self.redis.zrange(COLL_CDXJ, 0, -1)) == 2
self.do_expire_coll_cdxj()
@patch('webrecorder.models.collection.load', slow_load)
def test_sync_avoid_double_load(self):
self.assert_exists(COLL_CDXJ, False)()
self.assert_exists(REC_CDXJ, False)()
collection = User(redis=self.redis,
my_id=self.anon_user,
access=BaseAccess()).get_collection_by_name('temp')
collection.sync_coll_index(exists=False, do_async=True)
time.sleep(0.1)
self.assert_exists(REC_CDXJ_T, True)()
collection.sync_coll_index(exists=True, do_async=True)
time.sleep(0.1)
self.assert_exists(REC_CDXJ_T, True)()
self.sleep_try(0.1, 0.5, self.assert_exists(REC_CDXJ_T, False))
assert load_counter == 1
def test_check_duration(self):
res = self.testapp.get('/api/v1/collection/temp?user={user}'.format(user=self.anon_user))
assert res.json['collection']['duration'] > 0
assert res.json['collection']['timespan'] > 0
def test_ensure_all_files_delete(self):
user_dir = os.path.join(self.warcs_dir, self.anon_user)
files = os.listdir(user_dir)
assert len(files) == 2
# verify .cdxj is written
assert ((files[0].endswith('.cdxj') and files[1].endswith('.warc.gz')) or
(files[1].endswith('.cdxj') and files[0].endswith('.warc.gz')))
res = self.testapp.delete('/api/v1/recording/500?user={user}&coll=temp'.format(user=self.anon_user))
assert res.json == {'deleted_id': '500'}
def assert_deleted():
assert len(os.listdir(user_dir)) == 0
assert not os.path.isdir(self.storage_today)
self.sleep_try(0.1, 10.0, assert_deleted)
def test_user_timespan(self):
res = self.testapp.get('/api/v1/user/' + self.anon_user)
# modified after delete, should have taken more than 2 seconds to get here
assert res.json['user']['timespan'] > self.min_timespan
# ============================================================================
class TestCDXJCache(BaseCDXJCache):
@classmethod
def setup_class(cls):
super(TestCDXJCache, cls).setup_class(extra_config_file='test_cdxj_cache_config.yaml',
storage_worker=True)
cls.min_timespan = 2
def do_expire_or_commit(self):
self.sleep_try(0.5, 5.0, self.assert_exists(REC_OPEN, False))
self.sleep_try(0.1, 5.0, self.assert_exists(REC_CDXJ, False))
def do_expire_coll_cdxj(self):
self.sleep_try(1.0, 1.0, self.assert_exists(COLL_CDXJ, False))
# ============================================================================
class TestCDXJCacheCommit(BaseCDXJCache):
@classmethod
def setup_class(cls):
super(TestCDXJCacheCommit, cls).setup_class(storage_worker=True)
cls.min_timespan = 1
def do_expire_or_commit(self):
self.params = {}
def assert_committed():
res = self.testapp.post_json('/api/v1/collection/temp/commit?user={user}'.format(user=self.anon_user), params=self.params)
self.params = res.json
assert self.params['success'] == True
self.sleep_try(0.2, 10.0, assert_committed)
assert self.redis.exists(REC_OPEN) == False
assert self.redis.exists(REC_CDXJ) == False
def do_expire_coll_cdxj(self):
self.redis.delete(COLL_CDXJ)
|
|
#!/usr/bin/env python
#
# Root command that delegates to all GreatFET subcommands.
# This file is part of GreatFET.
from __future__ import print_function
import difflib
import errno
import sys
import os
# The prefix which all greatfet-subcommands start with.
GREATFET_PREFIX = 'greatfet_'
def looks_like_valid_greatfet_subcommand(directory, executable):
"""
Returns true iff the given directory/binary pair seem to represent
a valid GreatFET subcommand.
@param directory The directory in the PATH we're currently looking in.
@param executable The binary name, which should be presnent in the given directory.
@return True iff the given binary appares to be a valid GreatFET command.
"""
full_path = os.path.join(directory, executable)
# Valid GreatFET subcommands start with our prefix.
if not executable.startswith(GREATFET_PREFIX):
return False
# Windows likes to put the binaries right next to their .py scripts
if executable.endswith('.py'):
return False
# Valid GreatFET subcommands are files.
if not os.path.isfile(full_path):
return False
# Valid GreatFET subcommands are executable.
if not os.access(full_path, os.X_OK):
return False
# If all of the commands above are met, we have what looks like a subcommand.
return True
def find_all_subcommands():
"""
Locates all GreatFET subcommands in the user's path -- that is, all
binaries that start with GREATFET_PREFIX.
"""
# If we don't have a PATH variable, we can't have subcommands.
# Bail out.
if 'PATH' not in os.environ:
return []
# Parse the system's PATH variable and get each of the relevant directories.
sys_path = os.environ['PATH']
path_entires = sys_path.split(os.pathsep)
# Search each entry in the path for GreatFET subcommands.
subcommands = {}
for directory in path_entires:
# Skip any path entries that don't seem to be represented on the real system...
if not os.path.isdir(directory):
continue
# ... and search each entry that is a real directory.
for executable in os.listdir(directory):
# If the executable seems to be a GreatFET command, use it.
if looks_like_valid_greatfet_subcommand(directory, executable):
# Cache the relationships between subcommands and their executables.
full_path = os.path.join(directory, executable)
# Strip .exe suffix if applicable
executable = executable[:-4] if executable.endswith('.exe') else executable
subcommand_name = executable[len(GREATFET_PREFIX):]
subcommands[subcommand_name] = full_path
return subcommands
def find_subcommand(name, allow_partial_matches=True, print_errors=True):
"""
Returns the full path to the current subcommand, if one exists.
@param name The name of the subcommand to look for.
@param allow_partial_matches If set, this function will accept abbreviated subcommands.
@param print_errors If set, errors will be printed.
@return The full path to the subcommand, or None if none exists.
"""
subcommands = find_all_subcommands()
# If we have a direct match, return it.
if name in subcommands:
return subcommands[name]
# If we're accepting partial matches, look for one.
if allow_partial_matches:
matches = [subcommand for subcommand in subcommands.keys() if subcommand.startswith(name)]
# If we've found exactly one, use it.
if len(matches) == 1:
return subcommands[matches[0]]
# Otherwise, print the error.
elif matches and print_errors:
matches = "\n\t".join(matches)
print("Subcommand short-name '{}' is ambiguous; it could refer to:\n\t{}\n".format(name, matches))
return False
if print_errors:
print("ERROR: Unsupported subcommand '{}'.\nCheck to ensure the package providing the " \
"subcommand is installed.\n".format(name))
return False
def find_corrections_message(name):
"""
Generates a message that provides potential corrections to the user if
their command doesn't match.
"""
# Find a list of "did you mean" style corrections.
corrections = difflib.get_close_matches(name, find_all_subcommands())
# If we didn't find any, don't provide a message.
if not corrections:
return ''
# Otherwise, generate a string that informs the user of their potential error.
plural_suffix = "s are" if len(corrections) > 1 else " is"
corrections.insert(0, "The most similar sub-command{}:".format(plural_suffix))
return "\n\t".join(corrections)
def print_usage(argv):
# If we don't have argument name information, assume this was called "greatfet"
name = os.path.basename(argv[0]) if len(argv) else "greatfet"
print("usage: {} <subcommand>\n".format(name))
print("Top-level utility for working with GreatFET devices.\n")
print("supported subcommands:")
for subcommand in find_all_subcommands():
print("\t{}".format(subcommand))
print("\nTo get help for a subcommand, use '{} <subcommand> --help'.".format(name))
print("For example, for help with the firmware subcommand, use '{} firmware --help'.\n\n".format(name))
print("You can create and install your own subcommands. Simply create an executable with")
print("the name greatfet_<subcommand>, and add it to your path. See the (forthcoming)")
print("documentation for more information.\n")
def main():
"""
Main routine that delegates all commands to GreatFET subcommands.
"""
argv = sys.argv[:]
# If we don't have a subcommand, abort.
if len(argv) < 2:
print_usage(argv)
sys.exit(errno.EINVAL)
# Get the subcommand name.
subcommand_name = argv[1]
# If help is passed as a special-case subcommand, and we have
# more arguments, help the user out by generating a likely-valid help request.
if subcommand_name == "help" and len(argv) > 2:
subcommand_name = argv[2]
argv[1], argv[2] = argv[2], "--help"
# Find the binary to execute...
binary = find_subcommand(subcommand_name)
# If we couldn't find the relevant binary, print a message.
if not binary:
# If there are words that are similar to the relevant word, suggest them as corrections.
corrections = find_corrections_message(subcommand_name)
if corrections:
print(corrections)
print('')
sys.exit(errno.EINVAL)
# Compute the arguments that should be passed to the subcommand,
# which include the subcommand name and all arguments beyond the subcommand.
binary_name = os.path.basename(binary)
arguments = [binary_name] + argv[2:]
# Pass control entirely to the subcommand.
sys.exit(os.spawnv(os.P_WAIT, binary, arguments))
if __name__ == '__main__':
main()
|
|
# -*- coding: utf-8 -*-
import os
from copy import deepcopy
from functools import partial
from functools import update_wrapper
from io import StringIO
from itertools import chain
from tqdm import tqdm
import click
from sacremoses.tokenize import MosesTokenizer, MosesDetokenizer
from sacremoses.truecase import MosesTruecaser, MosesDetruecaser
from sacremoses.normalize import MosesPunctNormalizer
from sacremoses.chinese import simplify, tradify
from sacremoses.util import parallelize_preprocess
# Hack to enable Python2.7 to use encoding.
import sys
import warnings
if sys.version_info[0] < 3:
import io
import warnings
open = io.open
warnings.warn(
str(
"You should really be using Python3!!! "
"Tick tock, tick tock, https://pythonclock.org/"
)
)
CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"])
@click.group(chain=True, context_settings=CONTEXT_SETTINGS)
@click.option(
"--language", "-l", default="en", help="Use language specific rules when tokenizing"
)
@click.option("--processes", "-j", default=1, help="No. of processes.")
@click.option("--encoding", "-e", default="utf8", help="Specify encoding of file.")
@click.option(
"--quiet", "-q", is_flag=True, default=False, help="Disable progress bar."
)
@click.version_option()
def cli(language, encoding, processes, quiet):
pass
@cli.resultcallback()
def process_pipeline(processors, encoding, **kwargs):
with click.get_text_stream("stdin", encoding=encoding) as fin:
iterator = fin # Initialize fin as the first iterator.
for proc in processors:
iterator = proc(list(iterator), **kwargs)
if iterator:
for item in iterator:
click.echo(item)
def processor(f, **kwargs):
"""Helper decorator to rewrite a function so that
it returns another function from it.
"""
def new_func(**kwargs):
def processor(stream, **kwargs):
return f(stream, **kwargs)
return partial(processor, **kwargs)
return update_wrapper(new_func, f, **kwargs)
def parallel_or_not(iterator, func, processes, quiet):
if processes == 1:
for line in iterator:
yield func(line)
else:
for outline in parallelize_preprocess(
func, iterator, processes, progress_bar=(not quiet)
):
yield outline
########################################################################
# Tokenize
########################################################################
@cli.command("tokenize")
@click.option(
"--aggressive-dash-splits",
"-a",
default=False,
is_flag=True,
help="Triggers dash split rules.",
)
@click.option(
"--xml-escape",
"-x",
default=True,
is_flag=True,
help="Escape special characters for XML.",
)
@click.option(
"--protected-patterns",
"-p",
help="Specify file with patters to be protected in tokenisation. Special values: :basic: :web:",
)
@click.option(
"--custom-nb-prefixes",
"-c",
help="Specify a custom non-breaking prefixes file, add prefixes to the default ones from the specified language.",
)
@processor
def tokenize_file(
iterator,
language,
processes,
quiet,
xml_escape,
aggressive_dash_splits,
protected_patterns,
custom_nb_prefixes,
):
moses = MosesTokenizer(
lang=language, custom_nonbreaking_prefixes_file=custom_nb_prefixes
)
if protected_patterns:
if protected_patterns == ":basic:":
protected_patterns = moses.BASIC_PROTECTED_PATTERNS
elif protected_patterns == ":web:":
protected_patterns = moses.WEB_PROTECTED_PATTERNS
else:
with open(protected_patterns, encoding="utf8") as fin:
protected_patterns = [pattern.strip() for pattern in fin.readlines()]
moses_tokenize = partial(
moses.tokenize,
return_str=True,
aggressive_dash_splits=aggressive_dash_splits,
escape=xml_escape,
protected_patterns=protected_patterns,
)
return parallel_or_not(iterator, moses_tokenize, processes, quiet)
########################################################################
# Detokenize
########################################################################
@cli.command("detokenize")
@click.option(
"--xml-unescape",
"-x",
default=True,
is_flag=True,
help="Unescape special characters for XML.",
)
@processor
def detokenize_file(
iterator,
language,
processes,
quiet,
xml_unescape,
):
moses = MosesDetokenizer(lang=language)
moses_detokenize = partial(moses.detokenize, return_str=True, unescape=xml_unescape)
return parallel_or_not(
list(map(str.split, iterator)), moses_detokenize, processes, quiet
)
########################################################################
# Normalize
########################################################################
@cli.command("normalize")
@click.option(
"--normalize-quote-commas",
"-q",
default=True,
is_flag=True,
help="Normalize quotations and commas.",
)
@click.option(
"--normalize-numbers", "-d", default=True, is_flag=True, help="Normalize number."
)
@click.option(
"--replace-unicode-puncts",
"-p",
default=False,
is_flag=True,
help="Replace unicode punctuations BEFORE normalization.",
)
@click.option(
"--remove-control-chars",
"-c",
default=False,
is_flag=True,
help="Remove control characters AFTER normalization.",
)
@processor
def normalize_file(
iterator,
language,
processes,
quiet,
normalize_quote_commas,
normalize_numbers,
replace_unicode_puncts,
remove_control_chars,
):
moses = MosesPunctNormalizer(
language,
norm_quote_commas=normalize_quote_commas,
norm_numbers=normalize_numbers,
pre_replace_unicode_punct=replace_unicode_puncts,
post_remove_control_chars=remove_control_chars,
)
moses_normalize = partial(moses.normalize)
return parallel_or_not(iterator, moses_normalize, processes, quiet)
########################################################################
# Train Truecase
########################################################################
@cli.command("train-truecase")
@click.option(
"--modelfile", "-m", required=True, help="Filename to save the modelfile."
)
@click.option(
"--is-asr",
"-a",
default=False,
is_flag=True,
help="A flag to indicate that model is for ASR.",
)
@click.option(
"--possibly-use-first-token",
"-p",
default=False,
is_flag=True,
help="Use the first token as part of truecasing.",
)
@processor
def train_truecaser(
iterator, language, processes, quiet, modelfile, is_asr, possibly_use_first_token
):
moses = MosesTruecaser(is_asr=is_asr)
# iterator_copy = deepcopy(iterator)
model = moses.train(
iterator,
possibly_use_first_token=possibly_use_first_token,
processes=processes,
progress_bar=(not quiet),
)
moses.save_model(modelfile)
########################################################################
# Truecase
########################################################################
@cli.command("truecase")
@click.option(
"--modelfile", "-m", required=True, help="Filename to save/load the modelfile."
)
@click.option(
"--is-asr",
"-a",
default=False,
is_flag=True,
help="A flag to indicate that model is for ASR.",
)
@click.option(
"--possibly-use-first-token",
"-p",
default=False,
is_flag=True,
help="Use the first token as part of truecase training.",
)
@processor
def truecase_file(
iterator, language, processes, quiet, modelfile, is_asr, possibly_use_first_token
):
# If model file doesn't exists, train a model.
if not os.path.isfile(modelfile):
iterator_copy = deepcopy(iterator)
truecaser = MosesTruecaser(is_asr=is_asr)
model = truecaser.train(
iterator_copy,
possibly_use_first_token=possibly_use_first_token,
processes=processes,
progress_bar=(not quiet),
)
truecaser.save_model(modelfile)
# Truecase the file.
moses = MosesTruecaser(load_from=modelfile, is_asr=is_asr)
moses_truecase = partial(moses.truecase, return_str=True)
return parallel_or_not(iterator, moses_truecase, processes, quiet)
########################################################################
# Detruecase
########################################################################
@cli.command("detruecase")
@click.option(
"--is-headline",
"-a",
default=False,
is_flag=True,
help="Whether the file are headlines.",
)
@processor
def detruecase_file(iterator, language, processes, quiet, is_headline):
moses = MosesDetruecaser()
moses_detruecase = partial(
moses.detruecase, return_str=True, is_headline=is_headline
)
return parallel_or_not(iterator, moses_detruecase, processes, quiet)
|
|
import random
import itertools
import sys
lfalf=[ chr(i) for i in range(ord('a'),ord('z')) ]+[ chr(i) for i in range(ord('A'),ord('Z')) ]
gencomment="""
Maddisson formula:
DC(G,S) is the sum of k(v) over all nodes of G except the root,
where
k(v)=||M(v),M(parent(v))||-1,
M:G->S is the lca-mapping
and
||v,w|| is the number of edges on the path connecting v and w in S.
The costs from the MAX DC paper should be adjusted by adding 2*|G|-2.
"""
def cluster(s):
if type(s)==str: return set([s])
return cluster(s[0]).union(cluster(s[1]))
# tuple -> string
def pt(s): return str(s).replace("'",'').replace(" ",'')
# string -> tuple
def str2tree(s):
def _st(s):
s=s.strip()
if not s:
raise Exception("String too short: <%s>"%s)
if s[0]=='(':
t1,s=_st(s[1:])
lst=(t1,)
while s[0]==',':
t1,s=_st(s[1:])
lst=lst+(t1,)
if s[0]!=')': raise Exception(") expected")
return (lst,s[1:])
lab=''
while s and s[0].isalnum():
lab=lab+s[0]
s=s[1:]
if not lab:
print("Label expected in tree string")
sys.exit(1)
return (lab,s)
if not s.strip():
print("Warning: empty string")
return []
return _st(s)[0]
def randtree(n,chnum=0):
l=lfalf[:]
c=0
while n>len(l):
l.extend(lab+"%d"%c for lab in lfalf)
c+=1
return _randtree(l[:n],chnum)
def randmultitree(t,mprob):
if type(t)==str: t=str2tree(t)
return _randmultitree(t,mprob)
def _randmultitree(t,mprob):
if type(t)==str: return t
t=tuple([_randmultitree(c,mprob) for c in t])
res=[]
for c in t:
if c!=str and random.random()<mprob:
# flattenif
res.extend(list(c))
continue
res.append(c)
return tuple(res)
def _randtree(lst,chnum=0):
if len(lst)==1: return lst[0]
if chnum<=0:
dp=random.randint(1,len(lst)-1)
return (_randtree(lst[:dp]),_randtree(lst[dp:]))
lf=lst[:]
ch=[]
if chnum:
cher=[]
for i in range(chnum):
cher.append((lf.pop(random.randint(0,len(lf)-1)),lf.pop(random.randint(0,len(lf)-1))))
lf.extend(cher)
while len(lf)>1:
curl1=lf.pop(random.randint(0,len(lf)-1))
curl2=lf.pop(random.randint(0,len(lf)-1))
if chnum and type(curl1)==str==type(curl2):
lf.append(curl1)
lf.append(curl2)
continue
lf.append((curl1,curl2))
return lf[0]
class Tree:
def __init__(self,tup):
if type(tup)==str: tup=str2tree(tup)
self.root=Node(tup,None)
self.nodes=self.root.nodes()
for i,n in enumerate(self.nodes):
n.num=len(self.nodes)-i-1
n.nodeid=i
self.src=tup
self.n=len(self.root.cluster)
# print tup,self.n
# for n in self.nodes:
# print n,n.cluster
# print "LEAV",self.leaves()
def weight(self):
return sum( n.depth for n in self.leaves() )
def leaves(self):
return self.root.leaves()
def __str__(self):
return self.root.__str__()
def lcacluster(self,cluster):
c=set(cluster)
for n in self.nodes:
if c.issubset(set(n.cluster)): return n
def inferinternalmaps(self):
for g in self.nodes:
if g.c:
newmap = g.c[0].map
for c in g.c:
newmap = newmap.lca(c.map)
g.map = newmap
#g.map=reduce(lambda s,g: s.lca(g.map),g.c,g.c[0].map)
def setlcamapping(self,st):
#def setlcamapping(gt,st,**kwargs):
stleaves=st.leaves()
#print "STL",stleaves
for n in self.leaves():
#print "LCA",n,n.cluster
stl=[s for s in stleaves if s.cluster==n.cluster ]
if len(stl)!=1:
raise Exception("Ambiguous mapping: %d candidates. Label: %s" % (len(stl),n.cluster ))
n.map=stl[0]
self.inferinternalmaps()
def cut(self,lf):
return self.root.cut(lf)
def cutedges(self):
return self.root.cutedges()
# Naive alg.
def genrank(self,s):
for n in s.nodes: n.rank=n.height+1
self.setlcamapping(s)
for g in self.nodes:
if not g.c: continue
if len(g.c)!=2:
raise Exception("Binary gene tree expected. Found %s"%g)
r=100000
for i in g.c[0].leaves():
for j in g.c[1].leaves():
r=min(r,i.map.lca(j.map).rank)
g.esr=r
#print g,r
#_setlabel(g,'esr',"%s"%r)
#_setlabel(g,'lcarank',"%s"%g.lcamapping.rank)
# O(d|G|logd +|S|)
def genrankbyalg(self,s,verboselevel=0):
sl=s.leaves()
gl=self.leaves()
for c in sl: c.glist=[]
for i,c in enumerate(gl):
c.id=i+1
c.map.glist.append(c)
for n in self.nodes: n.esrbyalg=0
snodes=sorted(s.nodes[:],lambda x,y: x.rank-y.rank) # can be directly traversed
def np(Lambda):
return " ".join( "["+",".join("%s%d"%(k,k.id) for k in l)+"]" for l in Lambda)
def merge(n,lst):
res=[]
if verboselevel & 8:
print("==== MERGE %s rank=%d Lambda=%s"%(n,n.rank,np(lst)))
while lst:
minleafid=min(lst[i][0].id for i in range(len(lst)))
for i in range(len(lst)):
if lst[i][0].id==minleafid:
lst[i].pop(0)
if not lst[i]: lst.pop(i)
break
minleaf=gl[minleafid-1]
if res:
lcagnode=res[-1].lca(minleaf)
if verboselevel & 8:
print( " Checking %s%d %s%d g=%s g.rank=%d:"%(res[-1],res[-1].id,
minleaf,minleaf.id,lcagnode,lcagnode.esrbyalg),)
if not lcagnode.esrbyalg:
if verboselevel & 8:
print(" Rank SET!",n.rank)
lcagnode.esrbyalg=n.rank
lcagnode.minr=[res[-1],minleaf]
else:
if verboselevel & 8: print(" Rank Ignored :(")
res.append(minleaf)
if verboselevel & 8:
print("MergeResult",np([res]))
return res
for n in snodes:
if n.leaf(): n.glist=merge(n,[n.glist])
else: n.glist=merge(n,[ c.glist for c in n.c])
# Check correctness with naive alg
for n in self.nodes:
if not n.leaf():
#print n.esr,n.esrbyalg,n
if n.esr!=n.esrbyalg:
print("ERR")
sys.exit(-1)
# O(d|G|+|S|) algorithm
def genrankbyalg2(self,s,verboselevel=0):
sl=s.leaves()
glprefix=self.leaves()
for c in sl: c.glist=[]
for i,c in enumerate(glprefix):
c.id=i+1
c.smap=c.map # set to its label mapping
for n in self.nodes: n.esrbyalg2=0 # init
for n in s.nodes: n.lastgleaf=0 # init
d=max(n.rank for n in s.nodes)
for r in range(1,d+1):
if verboselevel & 8: print("="*30,"rank=%d"%r)
for v in glprefix:
if verboselevel & 8: print(v,v.smap,v.smap.rank)
if v.smap.rank!=r: continue
if v.smap.lastgleaf:
l=v.smap.lastgleaf
lcagnode=v.smap.lastgleaf.lca(v)
if verboselevel & 8:
print("Checking %s%d %s%d g=%s g.rank=%d:"%(l,l.id,
v,v.id,lcagnode,lcagnode.esrbyalg2),)
if not lcagnode.esrbyalg2:
if verboselevel & 8:
print(" Rank SET!",r)
lcagnode.esrbyalg2=r # set rank
lcagnode.minr=[v.smap.lastgleaf,v]
v.smap.lastgleaf=v
v.smap=v.smap.parent # climb
# Check correctness with naive alg
for n in self.nodes:
if not n.leaf():
if n.esr!=n.esrbyalg2:
print(n.esr,n.esrbyalg2,n)
print("ERR2")
sys.exit(-1)
def ppgse(self):
return self.root.ppgse()
def dupcost(self,st):
self.setlcamapping(st)
c=0
for n in self.nodes:
if n.leaf(): continue
if n.c[0].map==n.map or n.c[1].map==n.map: c+=1
return c
def losscost(self,st):
#tylko dla binarnych
return self.dccost(st)+2*self.dupcost(st)
def losscost2(self,st):
# root has no losses
# single loss in multifurcations
# duplications are not allowed (not checked here)
self.setlcamapping(st)
c=0
for n in self.nodes:
if n==self.root: continue
c+=n.map.depth-n.parent.map.depth-1 # single losses inside the path
# check bottom node
if not n.leaf() and len(n.map.c)>2: c+=1
return c
def dccost(self,st):
self.setlcamapping(st)
c=0
for n in self.nodes:
if n!=self.root:
c+=n.map.depth-n.parent.map.depth-1
return c
class Node:
def __init__(self, tup, par):
self.src=tup
self.cluster=cluster(tup)
self.parent=par
if self.parent:
self.depth=self.parent.depth+1
else:
self.depth=0
if type(tup)==str:
self.height=0
self.c=None
# leaf
else:
self.c = [ Node(t,self) for t in tup ]
self.height=max(c.height for c in self.c)+1
def siblinggen(self):
if self.parent:
for i in self.parent.c:
if i!=self: yield i
def siblingclustern(self):
c=set([])
for i in self.siblinggen(): c.union(i.cluster())
return c
def _cutedges(self):
if self.leaf():
return self.label(),[]
t1,f1=self.c[0]._cutedges()
t2,f2=self.c[1]._cutedges()
if self._cut==0:
return (t1,t2),f1+f2
if self._cut==1:
return t2,f1+f2+[t1]
return t1,[t2]+f1+f2
def cutedges(self):
t,f=self._cutedges()
return [t]+f
def cut(self,lf):
if self.leaf():
if self.label() in lf:
return self.label()
return None
lst=[ c.cut(lf) for c in self.c ]
lst=[ c for c in lst if c ]
if len(lst)>1:
return "("+",".join(lst)+")"
if len(lst)==1: return lst[0]
return None
def leaf(self):
return not self.c
def label(self):
return list(self.cluster)[0]
def smp(self):
return "".join(c.label() for c in self.leaves())
def smpd(self):
if self.leaf(): return self.label()
return "|".join(c.smp() for c in self.c)
def __str__(self):
if self.leaf(): return list(self.cluster)[0]
return "("+",".join( c.__str__() for c in self.c)+")"
def __repr__(self):
return self.__str__()
def nodes(self): # postfix
if self.leaf(): return [ self ]
# children first
return sum((c.nodes() for c in self.c),[])+[self]
def leaves(self):
if self.leaf(): return [ self ]
return sum((c.leaves() for c in self.c),[])
def comparable(self,x):
return self.geq(x) or self.leq(x)
def leq(self,x):
return x.geq(self)
# x = self or x below sel.
def geq(self,x):
while x:
if x==self: return True
x=x.parent
return False
def lca(self,y):
a,b=self,y
if a.depth>b.depth: a,b=b,a
# a.h <= b.h
while b.depth!=a.depth: b=b.parent
# a.h == b.h
while True:
if a==b: return a
a=a.parent
b=b.parent
def ppgse(self):
s=''
if hasattr(self,'esr'): s+=" esr=%d"%self.esr
if hasattr(self,'rank') and not self.leaf(): s+=" rank=%s"%self.rank
#if hasattr(self,'minr'): s+=" minr='%s_%d-%s_%d' "%(self.minr[0],self.minr[0].id,self.minr[1],self.minr[1].id)
if hasattr(self,'minr'): s+=" minr='%s_{%d}%s_{%d}'"%(self.minr[0],self.minr[0].id,self.minr[1],self.minr[1].id)
if self.leaf():
if hasattr(self,'id'): return self.label()+s+" leaflabel='%s_{%d}'"%(self.label(),self.id)
return self.label()+s
return "("+",".join(c.ppgse() for c in self.c)+")"+s
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from website.models import Question
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding index on 'Question', fields ['reviewed']
db.create_index('website_question', ['reviewed'])
'''
print "migrate_temmplatequestion_to_question"
question = Question()
question.migrate_temmplatequestion_to_question()
print "migrate_temmplatequestion_to_question - done"
'''
def backwards(self, orm):
# Removing index on 'Question', fields ['reviewed']
db.delete_index('website_question', ['reviewed'])
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'website.action': {
'Meta': {'object_name': 'Action'},
'action_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.ActionCategory']", 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'question_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.actioncategory': {
'Meta': {'object_name': 'ActionCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'points': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'rating_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'})
},
'website.actiontutorial': {
'Meta': {'object_name': 'ActionTutorial'},
'action_identifier': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Tutorial']", 'null': 'True', 'blank': 'True'})
},
'website.address': {
'Meta': {'object_name': 'Address'},
'address1': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'address2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'})
},
'website.answerchoice': {
'Meta': {'object_name': 'AnswerChoice'},
'answer_choice_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.AnswerChoiceGroup']"}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'website.answerchoicegroup': {
'Meta': {'object_name': 'AnswerChoiceGroup'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'})
},
'website.answerreference': {
'Meta': {'object_name': 'AnswerReference'},
'approval_status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'file_upload': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_callout': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_current': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'migrated_answer_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Question']"}),
'rating': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'rating_status': ('django.db.models.fields.CharField', [], {'default': "'U'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'status_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'website.applicability': {
'Meta': {'object_name': 'Applicability'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'website.application': {
'Meta': {'object_name': 'Application'},
'address': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Address']", 'null': 'True', 'blank': 'True'}),
'applicant': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'current_status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'status_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Template']", 'null': 'True', 'blank': 'True'})
},
'website.applicationanswer': {
'Meta': {'object_name': 'ApplicationAnswer'},
'application': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Application']"}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'file_upload': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Question']"}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Template']"}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'website.applicationhistory': {
'Meta': {'object_name': 'ApplicationHistory'},
'application': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Application']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'}),
'status_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'status_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'})
},
'website.comment': {
'Meta': {'object_name': 'Comment'},
'approval_status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'comment_type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'parent_comment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'parent_reference'", 'null': 'True', 'to': "orm['website.Comment']"}),
'rating': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'rating_status': ('django.db.models.fields.CharField', [], {'default': "'U'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.document': {
'Meta': {'object_name': 'Document'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'file_path': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Region']", 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'})
},
'website.documentcategory': {
'Meta': {'object_name': 'DocumentCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.entityview': {
'Meta': {'object_name': 'EntityView'},
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latest_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'session_key': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.entityviewcount': {
'Meta': {'object_name': 'EntityViewCount'},
'count_30_days': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'total_count': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'website.jurisdiction': {
'Meta': {'object_name': 'Jurisdiction'},
'city': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'county': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction_type': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'last_contributed': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'last_contributed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'last_contributed_by_org': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'_org_contributor'", 'null': 'True', 'to': "orm['website.Organization']"}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'_org_jurisdiction'", 'null': 'True', 'to': "orm['website.Organization']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'parent_jurisdiction'", 'null': 'True', 'to': "orm['website.Jurisdiction']"}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Region']", 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'})
},
'website.jurisdictioncontributor': {
'Meta': {'object_name': 'JurisdictionContributor'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'points': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'question_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.organization': {
'Meta': {'object_name': 'Organization'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.OrganizationCategory']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'fax': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'logo_scaled': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'parent_org': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'phone': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'A'", 'max_length': '8', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'status_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'website.organizationaddress': {
'Meta': {'object_name': 'OrganizationAddress'},
'address': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Address']", 'null': 'True', 'blank': 'True'}),
'address_type': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'})
},
'website.organizationcategory': {
'Meta': {'object_name': 'OrganizationCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.organizationmember': {
'Meta': {'object_name': 'OrganizationMember'},
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invitation_key': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'invitor': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'_member_invitor'", 'null': 'True', 'to': "orm['auth.User']"}),
'join_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Person']", 'null': 'True', 'blank': 'True'}),
'requested_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RoleType']", 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '8', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'_member_user'", 'null': 'True', 'to': "orm['auth.User']"})
},
'website.organizationrating': {
'Meta': {'object_name': 'OrganizationRating'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'updated_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'website.person': {
'Meta': {'object_name': 'Person'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'phone_mobile': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'phone_primary': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'phone_secondary': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.personaddress': {
'Meta': {'object_name': 'PersonAddress'},
'address': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Address']", 'null': 'True', 'blank': 'True'}),
'address_type': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Person']", 'null': 'True', 'blank': 'True'})
},
'website.question': {
'Meta': {'object_name': 'Question'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'answer_choice_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.AnswerChoiceGroup']", 'null': 'True', 'blank': 'True'}),
'applicability': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Applicability']", 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'default_value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'field_attributes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'form_type': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'has_multivalues': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instruction': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'js': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'label': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'qtemplate': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Template']", 'null': 'True'}),
'question': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'terminology': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'validation_class': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'website.questioncategory': {
'Meta': {'object_name': 'QuestionCategory'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'website.questiondependency': {
'Meta': {'object_name': 'QuestionDependency'},
'answer_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'question1': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'_questionDependency_question1'", 'to': "orm['website.Question']"}),
'question2': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'_questionDependency_question2'", 'to': "orm['website.Question']"}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'strength': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'website.ratingcategory': {
'Meta': {'object_name': 'RatingCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'rating_type': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'})
},
'website.ratinglevel': {
'Meta': {'object_name': 'RatingLevel'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'rank': ('django.db.models.fields.PositiveSmallIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'website.reaction': {
'Meta': {'object_name': 'Reaction'},
'action': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Action']", 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.ReactionCategory']", 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'question_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}),
'reaction_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.reactioncategory': {
'Meta': {'object_name': 'ReactionCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'points': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'rating_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'})
},
'website.region': {
'Meta': {'object_name': 'Region'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'})
},
'website.rewardcategory': {
'Meta': {'object_name': 'RewardCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.roletype': {
'Meta': {'object_name': 'RoleType'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.template': {
'Meta': {'object_name': 'Template'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template_type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '8', 'db_index': 'True', 'blank': 'True'})
},
'website.templatequestion': {
'Meta': {'object_name': 'TemplateQuestion'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Question']"}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Template']"})
},
'website.tutorial': {
'Meta': {'object_name': 'Tutorial'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'end_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'start_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'website.tutorialpage': {
'Meta': {'object_name': 'TutorialPage'},
'display_order': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'selector': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'tip': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Tutorial']", 'null': 'True', 'blank': 'True'})
},
'website.usercommentview': {
'Meta': {'object_name': 'UserCommentView'},
'comments_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'last_comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Comment']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'view_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'})
},
'website.userdetail': {
'Meta': {'object_name': 'UserDetail'},
'display_preference': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'notification_preference': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'old_password': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'reset_password_key': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '124', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.userfavorite': {
'Meta': {'object_name': 'UserFavorite'},
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.userrating': {
'Meta': {'object_name': 'UserRating'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'updated_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.userreward': {
'Meta': {'object_name': 'UserReward'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reward': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RewardCategory']", 'null': 'True', 'blank': 'True'}),
'reward_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.usersearch': {
'Meta': {'object_name': 'UserSearch'},
'entity_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'search_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.usertutorialhistory': {
'Meta': {'object_name': 'UserTutorialHistory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Tutorial']", 'null': 'True', 'blank': 'True'}),
'user_email': ('django.db.models.fields.EmailField', [], {'db_index': 'True', 'max_length': '75', 'null': 'True', 'blank': 'True'}),
'view_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'website.usertutorialpagehistory': {
'Meta': {'object_name': 'UserTutorialPageHistory'},
'checked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.TutorialPage']", 'null': 'True', 'blank': 'True'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Tutorial']", 'null': 'True', 'blank': 'True'}),
'user_email': ('django.db.models.fields.EmailField', [], {'db_index': 'True', 'max_length': '75', 'null': 'True', 'blank': 'True'})
},
'website.zipcode': {
'Meta': {'object_name': 'Zipcode'},
'city': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'county': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'db_index': 'True', 'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'db_index': 'True', 'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '2', 'null': 'True', 'blank': 'True'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '10', 'db_index': 'True'})
}
}
complete_apps = ['website']
|
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.com/license.html.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/.
from __future__ import with_statement
from .core import Component
from .util import arity
from .util.concurrency import ThreadLocal, threading
__all__ = ['CacheManager', 'cached']
_id_to_key = {}
def key_to_id(s):
"""Return a hash of the given property key."""
# This is almost the same algorithm as Python's string hash,
# except we only keep a 31-bit result.
result = ord(s[0]) << 7 if s else 0
for c in s:
result = ((1000003 * result) & 0x7fffffff) ^ ord(c)
result ^= len(s)
_id_to_key[result] = s
return result
class CachedPropertyBase(object):
"""Base class for cached property descriptors"""
def __init__(self, retriever):
self.retriever = retriever
self.__doc__ = retriever.__doc__
def make_key(self, cls):
attr = self.retriever.__name__
for base in cls.mro():
if base.__dict__.get(attr) is self:
cls = base
break
return '%s.%s.%s' % (cls.__module__, cls.__name__, attr)
class CachedSingletonProperty(CachedPropertyBase):
"""Cached property descriptor for classes behaving as singletons
in the scope of one `~trac.env.Environment` instance.
This means there will be no more than one cache to monitor in the
database for this kind of cache. Therefore, using only "static"
information for the key is enough. For the same reason it is also
safe to store the corresponding id as a property of the descriptor
instance.
"""
def __get__(self, instance, owner):
if instance is None:
return self
try:
id = self.id
except AttributeError:
id = self.id = key_to_id(self.make_key(owner))
return CacheManager(instance.env).get(id, self.retriever, instance)
def __delete__(self, instance):
try:
id = self.id
except AttributeError:
id = self.id = key_to_id(self.make_key(instance.__class__))
CacheManager(instance.env).invalidate(id)
class CachedProperty(CachedPropertyBase):
"""Cached property descriptor for classes having potentially
multiple instances associated to a single `~trac.env.Environment`
instance.
As we'll have potentiall many different caches to monitor for this
kind of cache, the key needs to be augmented by a string unique to
each instance of the owner class. As the resulting id will be
different for each instance of the owner class, we can't store it
as a property of the descriptor class, so we store it back in the
attribute used for augmenting the key (``key_attr``).
"""
def __init__(self, retriever, key_attr):
super(CachedProperty, self).__init__(retriever)
self.key_attr = key_attr
def __get__(self, instance, owner):
if instance is None:
return self
id = getattr(instance, self.key_attr)
if isinstance(id, str):
id = key_to_id(self.make_key(owner) + ':' + id)
setattr(instance, self.key_attr, id)
return CacheManager(instance.env).get(id, self.retriever, instance)
def __delete__(self, instance):
id = getattr(instance, self.key_attr)
if isinstance(id, str):
id = key_to_id(self.make_key(instance.__class__) + ':' + id)
setattr(instance, self.key_attr, id)
CacheManager(instance.env).invalidate(id)
def cached(fn_or_attr=None):
"""Method decorator creating a cached attribute from a data
retrieval method.
Accessing the cached attribute gives back the cached value. The
data retrieval method is transparently called by the
`CacheManager` on first use after the program start or after the
cache has been invalidated. Invalidating the cache for this value
is done by ``del``\ eting the attribute.
Note that the cache validity is maintained using the `cache` table
in the database. Cache invalidation is performed within a
transaction block, and can be nested within another transaction
block.
When the decorator is used in a class for which instances behave
as singletons within the scope of a given `~trac.env.Environment`
(typically `~trac.core.Component` classes), the key used to
identify the attribute in the database is constructed from the
names of the containing module, class and retriever method::
class WikiSystem(Component):
@cached
def pages(self):
return set(name for name, in self.env.db_query(
"SELECT DISTINCT name FROM wiki"))
Otherwise, when the decorator is used in non-"singleton" objects,
a string specifying the name of an attribute containing a string
unique to the instance must be passed to the decorator. This value
will be appended to the key constructed from module, class and
method name::
class SomeClass(object):
def __init__(self, env, name):
self.env = env
self.name = name
self._metadata_id = name
@cached('_metadata_id')
def metadata(self):
...
Note that in this case the key attribute is overwritten with a
hash of the key on first access, so it should not be used for any
other purpose.
In either case, this decorator requires that the object on which
it is used has an ``env`` attribute containing the application
`~trac.env.Environment`.
.. versionchanged:: 1.0
The data retrieval method used to be called with a single
argument ``db`` containing a reference to a database
connection. This is the same connection that can be retrieved
via the normal `~trac.env.Environment.db_query` or
`~trac.env.Environment.db_transaction`, so this is no longer
needed, though methods supporting that argument are still
supported (but will be removed in version 1.1.1).
"""
if hasattr(fn_or_attr, '__call__'):
return CachedSingletonProperty(fn_or_attr)
def decorator(fn):
return CachedProperty(fn, fn_or_attr)
return decorator
class CacheManager(Component):
"""Cache manager."""
required = True
def __init__(self):
self._cache = {}
self._local = ThreadLocal(meta=None, cache=None)
self._lock = threading.RLock()
# Public interface
def reset_metadata(self):
"""Reset per-request cache metadata."""
self._local.meta = self._local.cache = None
def get(self, id, retriever, instance):
"""Get cached or fresh data for the given id."""
# Get cache metadata
local_meta = self._local.meta
local_cache = self._local.cache
if local_meta is None:
# First cache usage in this request, retrieve cache metadata
# from the database and make a thread-local copy of the cache
meta = self.env.db_query("SELECT id, generation FROM cache")
self._local.meta = local_meta = dict(meta)
self._local.cache = local_cache = self._cache.copy()
db_generation = local_meta.get(id, -1)
# Try the thread-local copy first
try:
(data, generation) = local_cache[id]
if generation == db_generation:
return data
except KeyError:
pass
with self.env.db_query as db:
with self._lock:
# Get data from the process cache
try:
(data, generation) = local_cache[id] = self._cache[id]
if generation == db_generation:
return data
except KeyError:
generation = None # Force retrieval from the database
# Check if the process cache has the newest version, as it may
# have been updated after the metadata retrieval
for db_generation, in db(
"SELECT generation FROM cache WHERE id=%s", (id,)):
break
else:
db_generation = -1
if db_generation == generation:
return data
# Retrieve data from the database
if arity(retriever) == 2:
data = retriever(instance, db)
else:
data = retriever(instance)
local_cache[id] = self._cache[id] = (data, db_generation)
local_meta[id] = db_generation
return data
def invalidate(self, id):
"""Invalidate cached data for the given id."""
with self.env.db_transaction as db:
with self._lock:
# Invalidate in other processes
# The row corresponding to the cache may not exist in the table
# yet.
# - If the row exists, the UPDATE increments the generation,
# the SELECT returns a row and we're done.
# - If the row doesn't exist, the UPDATE does nothing, but
# starts a transaction. The SELECT then returns nothing,
# and we can safely INSERT a new row.
db("UPDATE cache SET generation=generation+1 WHERE id=%s",
(id,))
if not db("SELECT generation FROM cache WHERE id=%s", (id,)):
db("INSERT INTO cache VALUES (%s, %s, %s)",
(id, 0, _id_to_key.get(id, '<unknown>')))
# Invalidate in this process
self._cache.pop(id, None)
# Invalidate in this thread
try:
del self._local.cache[id]
except (KeyError, TypeError):
pass
|
|
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Creates chroots to be used for building.
Uses the chroothelper program to do final processing and chrooting.
"""
import errno
import fcntl
import grp
import itertools
import os
import pwd
import shutil
import sys
import stat
#conary
from conary import conarycfg
from conary import conaryclient
from conary import callbacks
from conary.deps import deps
from conary.lib import util, openpgpkey, sha1helper
#rmake
from rmake import errors
from rmake import compat
from rmake import constants
from rmake.lib import flavorutil
from rmake.lib import rootfactory
def _addModeBits(path, bits):
s = os.lstat(path)
if not stat.S_ISLNK(s.st_mode) and not (s.st_mode & bits == bits):
os.chmod(path, stat.S_IMODE(s.st_mode) | bits)
class ConaryBasedChroot(rootfactory.BasicChroot):
"""
The root manages a root environment, creating and installing
the necessary files for the root to be usuable, and cleaning up
after itself as much as possible.
"""
def __init__(self, jobList, crossJobList, bootstrapJobList, logger, cfg,
csCache=None, chrootCache=None, targetFlavor=None, oldRoot=None):
rootfactory.BasicChroot.__init__(self)
self.cfg = cfg
self.jobList = jobList
self.crossJobList = crossJobList
self.bootstrapJobList = bootstrapJobList
self.callback = None
self.logger = logger
self.csCache = csCache
self.chrootCache = chrootCache
self.chrootFingerprint = None
self.oldRoot = oldRoot
if targetFlavor is not None:
cfg.initializeFlavors()
self.sysroot = flavorutil.getSysRootPath(targetFlavor)
self.rpmRoot = None
self.addDir('/tmp', mode=01777)
self.addDir('/var/tmp', mode=01777)
self.addDir('/etc')
self.addDir('/etc/rmake')
self.addDir('/etc/conary')
self.addDir(self.cfg.tmpDir, mode=01777)
if self.crossJobList:
self.addDir('%s/lib' % self.sysroot)
self.addDir('%s/usr/lib' % self.sysroot)
def moveOldRoot(self, oldRoot, newRoot):
self.logger.info('Moving root from %s to %s for reuse' % (oldRoot,
newRoot))
if os.path.exists(newRoot):
self.logger.warning('Root already exists at %s - cannot move old root to that spot')
return False
try:
os.rename(oldRoot, newRoot)
except OSError, err:
self.logger.warning('Could not rename old root %s to %s: %s' % (oldRoot, newRoot, err))
return False
self.cfg.root = newRoot
client = conaryclient.ConaryClient(self.cfg)
try:
assert(client.db.db.schemaVersion)
except Exception, err:
self.logger.warning('Could not access database in old root %s: %s. Removing old root' % (oldRoot, err))
os.rename(newRoot, oldRoot)
return False
return True
def create(self, root):
self.cfg.root = root
rootfactory.BasicChroot.create(self, root)
def install(self):
self.cfg.root = self.root
self._lock(self.root, fcntl.LOCK_SH)
if self.oldRoot:
if self.serverCfg.reuseChroots:
self._moveOldRoot(self.oldRoot, self.root)
if not self.jobList and not self.crossJobList:
# should only be true in debugging situations
return
client = conaryclient.ConaryClient(self.cfg)
repos = client.getRepos()
if self.chrootCache and hasattr(repos, 'getChangeSetFingerprints'):
self.chrootFingerprint = self._getChrootFingerprint(client)
if self.chrootCache.hasChroot(self.chrootFingerprint):
strFingerprint = sha1helper.sha1ToString(
self.chrootFingerprint)
self.logger.info('restoring cached chroot with '
'fingerprint %s', strFingerprint)
self.chrootCache.restore(self.chrootFingerprint, self.cfg.root)
self.logger.info('chroot fingerprint %s '
'restore done', strFingerprint)
return
def _install(jobList):
self.cfg.flavor = []
openpgpkey.getKeyCache().setPublicPath(
self.cfg.root + '/root/.gnupg/pubring.gpg')
openpgpkey.getKeyCache().setPrivatePath(
self.cfg.root + '/root/.gnupg/secring.gpg')
self.cfg.pubRing = [self.cfg.root + '/root/.gnupg/pubring.gpg']
client = conaryclient.ConaryClient(self.cfg)
client.setUpdateCallback(self.callback)
if self.csCache:
changeSetList = self.csCache.getChangeSets(client.getRepos(),
jobList,
callback=self.callback)
else:
changeSetList = []
try:
updJob, suggMap = client.updateChangeSet(
jobList, keepExisting=False, resolveDeps=False,
recurse=False, checkPathConflicts=False,
fromChangesets=changeSetList,
migrate=True)
except conaryclient.update.NoNewTrovesError:
# since we're migrating, this simply means there were no
# operations to be performed
pass
else:
util.mkdirChain(self.cfg.root + '/root')
client.applyUpdate(updJob, replaceFiles=True,
tagScript=self.cfg.root + '/root/tagscripts')
self._installRPM()
self._touchShadow()
if self.bootstrapJobList:
self.logger.info("Installing initial chroot bootstrap requirements")
oldRoot = self.cfg.dbPath
try:
# Bootstrap troves are installed outside the system DB,
# although it doesn't matter as much in trove builds as it does
# in image builds.
self.cfg.dbPath += '.bootstrap'
_install(self.bootstrapJobList)
finally:
self.cfg.dbPath = oldRoot
if self.jobList:
self.logger.info("Installing chroot requirements")
_install(self.jobList)
if self.crossJobList:
self.logger.info("Installing chroot cross-compile requirements")
oldRoot = self.cfg.root
try:
self.cfg.root += self.sysroot
_install(self.crossJobList)
finally:
self.cfg.root = oldRoot
self._uninstallRPM()
# directories must be traversable and files readable (RMK-1006)
for root, dirs, files in os.walk(self.cfg.root, topdown=True):
for directory in dirs:
_addModeBits(os.sep.join((root, directory)), 05)
for filename in files:
_addModeBits(os.sep.join((root, filename)), 04)
if self.chrootFingerprint:
strFingerprint = sha1helper.sha1ToString(self.chrootFingerprint)
self.logger.info('caching chroot with fingerprint %s',
strFingerprint)
self.chrootCache.store(self.chrootFingerprint, self.cfg.root)
self.logger.info('caching chroot %s done',
strFingerprint)
def _copyInConary(self):
conaryDir = os.path.dirname(sys.modules['conary'].__file__)
self.copyDir(conaryDir)
#self.copyDir(conaryDir,
# '/usr/lib/python2.4/site-packages/conary')
#self.copyDir(conaryDir,
# '/usr/lib64/python2.4/site-packages/conary')
self.copyDir(conaryDir,
'/usr/share/rmake/conary')
if conaryDir.endswith('site-packages/conary'):
self.copyFile('/usr/bin/conary')
self.copyFile('/usr/bin/cvc')
elif os.path.exists(os.path.join(conaryDir, '../commands')):
commandDir = os.path.realpath(os.path.join(conaryDir,'../commands'))
for fname in ['cvc', 'conary']:
self.copyFile(os.path.join(commandDir, fname),
os.path.join('/usr/bin', fname))
# Need to copy perlreqs.pl too
scriptsDir = os.path.realpath(os.path.join(conaryDir,'../scripts'))
if os.path.exists(scriptsDir):
self.copyDir(scriptsDir)
self.copyFile(os.path.join(scriptsDir, 'perlreqs.pl'),
'/usr/libexec/conary/perlreqs.pl')
def _installRPM(self):
"""If needed, choose a version of RPM to use to install the chroot."""
self._uninstallRPM()
if not self.cfg.rpmRequirements:
return
ccfg = conarycfg.ConaryConfiguration(False)
cli = conaryclient.ConaryClient(ccfg)
# Find troves that provide the necessary RPM dep.
found = cli.db.getTrovesWithProvides(self.cfg.rpmRequirements)
tups = list(itertools.chain(*found.values()))
if not tups:
raise errors.ServerError("Could not locate a RPM trove meeting "
"one of these requirements:\n %s"
% ("\n ".join(str(x) for x in self.cfg.rpmRequirements)))
# Search those troves for the python import root.
targetRoot = '/python%s.%s/site-packages' % sys.version_info[:2]
targetPaths = [ targetRoot + '/rpm/__init__.py',
targetRoot + '/rpmmodule.so' ]
roots = set()
for trove in cli.db.getTroves(tups, pristine=False):
for pathId, path, fileId, fileVer in trove.iterFileList():
for targetPath in targetPaths:
if path.endswith(targetPath):
root = path[:-len(targetPath)] + targetRoot
roots.add(root)
# Insert into the search path and do a test import.
if not roots:
raise errors.ServerError("A required RPM trove was found but "
"did not contain a suitable python module "
"(expected python%s.%s)" % sys.version_info[:2])
self.rpmRoot = sorted(roots)[0]
self.logger.info("Using RPM in root %s", self.rpmRoot)
sys.path.insert(0, self.rpmRoot)
__import__('rpm')
def _uninstallRPM(self):
"""Remove a previously-installed RPM from the python path and clear the
module cache."""
if self.rpmRoot:
assert sys.path[0] == self.rpmRoot
del sys.path[0]
self.rpmRoot = None
for name in sys.modules.keys():
if name.split('.')[0] == 'rpm':
del sys.modules[name]
def _touchShadow(self):
# Create shadow files with owner-writable permissions before RPM can
# create them with no permissions. (RMK-1079)
etc = os.path.join(self.root, 'etc')
util.mkdirChain(etc)
for name in (etc + '/shadow', etc + '/gshadow'):
open(name, 'a').close()
os.chmod(name, 0600)
def _getChrootFingerprint(self, client):
job = (sorted(self.jobList) + sorted(self.crossJobList) +
sorted(self.bootstrapJobList))
fingerprints = client.repos.getChangeSetFingerprints(job,
recurse=False, withFiles=True, withFileContents=True,
excludeAutoSource=True, mirrorMode=False)
a = len(self.jobList)
b = a + len(self.crossJobList)
# Make backwards-compatible chroot fingerprints by only appending more
# info if it is set.
# version 1 or later fingerprint
blob = ''.join(fingerprints[:a]) # jobList
if (self.crossJobList or self.bootstrapJobList or
self.cfg.rpmRequirements):
# version 2 or later fingerprint
blob += '\n'
blob += ''.join(fingerprints[a:b]) + '\n' # crossJobList
blob += ''.join(fingerprints[b:]) + '\n' # bootstrapJobList
blob += '\t'.join(str(x) for x in self.cfg.rpmRequirements) + '\n'
return sha1helper.sha1String(blob)
def invalidateCachedChroot(self):
"""Destroy a cached chroot archive associated with this chroot."""
if self.chrootFingerprint:
self.logger.warning("Removing cached chroot with fingerprint %s",
sha1helper.sha1ToString(self.chrootFingerprint))
self.chrootCache.remove(self.chrootFingerprint)
class rMakeChroot(ConaryBasedChroot):
busyboxDir = '/rbin'
def __init__(self,
buildTrove,
chrootHelperPath,
cfg,
serverCfg,
jobList,
crossJobList,
bootstrapJobList,
logger,
uid=None,
gid=None,
csCache=None,
chrootCache=None,
copyInConary=True,
oldRoot=None,
):
"""
uid/gid: the uid/gid which special files in the chroot should be
owned by
"""
ConaryBasedChroot.__init__(self,
jobList,
crossJobList,
bootstrapJobList,
logger,
cfg,
csCache,
chrootCache,
buildTrove.getFlavor(),
oldRoot=None,
)
self.jobId = buildTrove.jobId
self.buildTrove = buildTrove
self.chrootHelperPath = chrootHelperPath
self.serverCfg = serverCfg
self.callback = ChrootCallback(self.buildTrove, logger,
caching=bool(csCache))
self.copyInConary = copyInConary
self.lockFile = None
if copyInConary:
self._copyInConary()
for dir in self.cfg.policyDirs:
if os.path.exists(dir):
self.copyDir(dir)
self._copyInRmake()
def getRoot(self):
return self.cfg.root
def checkSanity(self):
if self.copyInConary:
# we're just overriding the version of conary used
# as long as that't the only sanity check we can return
# immediately
return
for job in self.jobList:
if job[0] == 'conary:python':
version = job[2][0].trailingRevision().getVersion()
try:
compat.ConaryVersion(version).checkRequiredVersion()
except errors.RmakeError, error:
errorMsg = str(error) + (' - tried to install version %s in chroot' % version)
raise error.__class__(errorMsg)
def useStandardRoot(self):
return True
def install(self):
self.logger.info('Creating chroot')
ConaryBasedChroot.install(self)
# copy in the tarball files needed for building this package from
# the cache.
self._cacheBuildFiles()
def _cacheBuildFiles(self):
if not self.csCache:
return
client = conaryclient.ConaryClient(self.cfg)
sourceTup = self.buildTrove.getNameVersionFlavor()
sourceTup = (sourceTup[0], sourceTup[1], deps.parseFlavor(''))
trv = self.csCache.getTroves(client.getRepos(), [sourceTup],
withFiles=True)[0]
allFiles = list(trv.iterFileList())
fileContents = [(x[2], x[3]) for x in allFiles]
oldRootLen = len(self.csCache.root)
if fileContents:
self.logger.info('Caching %s files' % len(fileContents))
for path in self.csCache.getFileContentsPaths(client.getRepos(),
fileContents):
newPath = path[oldRootLen:]
self.copyFile(path, '/tmp/cscache/' + newPath,
mode=0755)
def _copyInRmake(self):
# should this be controlled by strict mode too?
rmakeDir = os.path.dirname(sys.modules['rmake'].__file__)
# don't copy in rmake into /usr/lib/python2.4/site-packages
# as its important that we don't muck with the standard file
# system location for some test runs of rmake inside of rmake
#self.copyDir(rmakeDir)
# just copy to a standard path
self.copyDir(rmakeDir, '/usr/share/rmake/rmake')
def _postInstall(self):
self.createConaryRc()
self.createRmakeUsers()
def createConaryRc(self):
conaryrc = None
try:
if self.canChroot(): # then we will be chrooting into this dir
conaryrc = open('%s/etc/conaryrc.prechroot' % self.cfg.root, 'w')
oldroot = self.cfg.root
self.cfg.root = '/'
try:
self.cfg.storeConaryCfg(conaryrc)
finally:
self.cfg.root = oldroot
else:
conaryrc = open('%s/etc/conaryrc.rmake' % self.cfg.root, 'w')
self.cfg.storeConaryCfg(conaryrc)
except Exception, msg:
self.logger.error("Error writing conaryrc: %s", msg)
conaryrc.close()
def createRmakeUsers(self):
"""Copy passwd/group entries for rmake and rmake-chroot into the chroot.
"""
passwd = open(os.path.join(self.cfg.root, 'etc/passwd'), 'a')
group = open(os.path.join(self.cfg.root, 'etc/group'), 'a')
for name in (constants.rmakeUser, constants.chrootUser):
pwdata = pwd.getpwnam(name)
print >> passwd, ":".join(str(x) for x in pwdata)
grpdata = grp.getgrgid(pwdata.pw_gid)
print >> group, ":".join(str(x) for x in grpdata)
def canChroot(self):
return (pwd.getpwnam(constants.rmakeUser).pw_uid == os.getuid())
def _lock(self, root, mode):
if not self.lockFile:
util.mkdirChain(root)
self.lockFile = open(root + '/lock', 'w+')
os.fchmod(self.lockFile.fileno(), 0666)
try:
fcntl.lockf(self.lockFile.fileno(), mode | fcntl.LOCK_NB)
except IOError, err:
if err.errno != errno.EAGAIN:
raise
return False
else:
return True
def _unlock(self, root):
if not self.lockFile:
return
self.lockFile.close()
self.lockFile = None
def unmount(self, root, raiseError=True):
if not os.path.exists(root):
return True
if self.canChroot():
if self._lock(root, fcntl.LOCK_EX):
self.logger.info('Running chroot helper to unmount...')
util.mkdirChain(root + self.busyboxDir)
rc = os.system('%s --unmount %s' % (self.chrootHelperPath, root))
if rc:
if raiseError:
raise errors.ServerError('Could not unmount old chroot')
return False
else:
self.logger.info("Not unmounting chroot because it is locked "
"by another process")
self._unlock(root)
return True
def clean(self, root, raiseError=True):
if self.canChroot():
if not self._lock(root, fcntl.LOCK_EX):
self.logger.info("Not cleaning chroot because it is locked "
"by another process")
return False
self.logger.info('Running chroot helper to clean/unmount...')
util.mkdirChain(root + self.busyboxDir)
shutil.copy('/sbin/busybox', root + self.busyboxDir + '/busybox')
rc = os.system('%s %s --clean' % (self.chrootHelperPath, root))
if rc:
if raiseError:
raise errors.ServerError(
'Cannot create chroot - chroot helper failed'
' to clean old chroot')
else:
return False
self.logger.debug("removing old chroot tree: %s", root)
# First, remove the conary database
try:
os.unlink(util.joinPaths(root, '/var/lib/conarydb/conarydb'))
except OSError:
pass
# attempt to remove just the /tmp dir first.
# that's where the chroot process should have had all
# of its files. Doing this makes sure we don't remove
# /bin/rm while it might still be needed the next time around.
os.system('rm -rf %s/tmp' % root)
removeFailed = False
if os.path.exists(root + '/tmp'):
removeFailed = True
else:
os.system('rm -rf %s' % root)
if os.path.exists(root):
removeFailed = True
if removeFailed and raiseError:
raise errors.ServerError(
'Cannot create chroot - old root at %s could not be removed.'
' This may happen due to permissions problems such as root'
' owned files, or earlier build processes that have not'
' completely died. Please shut down rmake, kill any remaining'
' rmake processes, and then retry. If that does not work,'
' please remove the old root by hand.' % root)
self._unlock(root)
return not removeFailed
class ExistingChroot(rMakeChroot):
def __init__(self, rootPath, logger, chrootHelperPath):
self.root = rootPath
self.logger = logger
self.chrootHelperPath = chrootHelperPath
self.chrootFingerprint = None
self.lockFile = None
rootfactory.BasicChroot.__init__(self)
self._copyInRmake()
def create(self, root):
rootfactory.BasicChroot.create(self, root)
self._lock(root, fcntl.LOCK_SH)
def install(self):
pass
def getRoot(self):
return self.root
def _postInstall(self):
pass
def checkSanity(self):
pass
class FullRmakeChroot(rMakeChroot):
"""
This chroot contains everything needed to start the rMake chroot.
"""
def __init__(self, *args, **kw):
rMakeChroot.__init__(self, *args, **kw)
self.addMount('/proc', '/proc', type='proc')
self.addMount('/dev/pts', '/dev/pts', type='devpts')
self.addMount('tmpfs', '/dev/shm', type='tmpfs')
self.addDeviceNode('urandom') # needed for ssl and signing
self.addDeviceNode('ptmx') # needed for pty use
self.copyFile('/etc/hosts')
self.copyFile('/etc/resolv.conf')
# make time outputs accurate
if os.path.exists('/etc/localtime'):
self.copyFile('/etc/localtime')
# glibc:runtime should provide a good default nsswitch
if os.path.exists('/etc/nsswitch.conf'):
self.copyFile('/etc/nsswitch.conf')
if self.cfg.copyInConfig:
for option in ['archDirs', 'mirrorDirs',
'siteConfigPath', 'useDirs', 'componentDirs']:
for dir in self.cfg[option]:
if os.path.exists(dir):
self.copyDir(dir)
for option in ['defaultMacros']:
for path in self.cfg[option]:
if os.path.exists(path):
self.copyFile(path)
class ChrootCallback(callbacks.UpdateCallback):
"""
Callback to update trove log as the chroot is created.
@param buildTrove: trove we're creating a chroot for
@type: build.buildtrove.BuildTrove
"""
def __init__(self, buildTrove, logger, caching=True):
callbacks.UpdateCallback.__init__(self)
self.hunk = (0,0)
self.buildTrove = buildTrove
self.logger = logger
self.showedHunk = False
self.caching = caching
def _message(self, text):
self.buildTrove.log(text)
def setChangesetHunk(self, num, total):
self.showedHunk = False
self.hunk = (num, total)
def setUpdateHunk(self, num, total):
self.hunk = (num, total)
def setUpdateJob(self, jobs):
descriptions = []
jobs.sort()
for job in jobs:
if job[2][0]:
n,v,f = job[0], job[2][0], job[2][1]
else:
n,v,f = job[0], job[1][0], job[1][1]
v = '%s/%s' % (v.trailingLabel(), v.trailingRevision())
archDeps = [x.name for x in f.iterDepsByClass(deps.InstructionSetDependency)]
if archDeps:
f = '[is: %s]' % ' '.join(archDeps)
else:
f = ''
if job[2][0]:
action = ''
else:
action = 'Erase '
descriptions.append('%s%s=%s%s' % (action, n,v,f))
if self.hunk[1] > 1:
self._message("installing %d of %d:\n %s" % \
(self.hunk[0], self.hunk[1],
'\n '.join(descriptions)))
else:
self._message("installing: \n %s" % \
('\n '.join(descriptions),))
def downloadingChangeSet(self, got, need):
if self.caching and not self.showedHunk:
# we display our message here because here we have the size...
# but we only want to display the message once per changeset
self._message("Caching changeset %s of %s (%sKb)" % (
self.hunk + (need/1024 or 1,)))
self.showedHunk = True
|
|
"""Utilities for writing code that runs on Python 2 and 3"""
# Copyright (c) 2010-2015 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
import contextlib
import functools
import itertools
import operator
import sys
import types
__author__ = "Benjamin Peterson <[email protected]>"
__version__ = "1.9.0"
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result) # Invokes __set__.
try:
# This is a bit ugly, but it avoids running this again by
# removing this descriptor.
delattr(obj.__class__, self.name)
except AttributeError:
pass
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
def __getattr__(self, attr):
_module = self._resolve()
value = getattr(_module, attr)
setattr(self, attr, value)
return value
class _LazyModule(types.ModuleType):
def __init__(self, name):
super(_LazyModule, self).__init__(name)
self.__doc__ = self.__class__.__doc__
def __dir__(self):
attrs = ["__doc__", "__name__"]
attrs += [attr.name for attr in self._moved_attributes]
return attrs
# Subclasses should override this
_moved_attributes = []
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _SixMetaPathImporter(object):
"""
A meta path importer to import six.moves and its submodules.
This class implements a PEP302 finder and loader. It should be compatible
with Python 2.5 and all existing versions of Python3
"""
def __init__(self, six_module_name):
self.name = six_module_name
self.known_modules = {}
def _add_module(self, mod, *fullnames):
for fullname in fullnames:
self.known_modules[self.name + "." + fullname] = mod
def _get_module(self, fullname):
return self.known_modules[self.name + "." + fullname]
def find_module(self, fullname, path=None):
if fullname in self.known_modules:
return self
return None
def __get_module(self, fullname):
try:
return self.known_modules[fullname]
except KeyError:
raise ImportError("This loader does not know module " + fullname)
def load_module(self, fullname):
try:
# in case of a reload
return sys.modules[fullname]
except KeyError:
pass
mod = self.__get_module(fullname)
if isinstance(mod, MovedModule):
mod = mod._resolve()
else:
mod.__loader__ = self
sys.modules[fullname] = mod
return mod
def is_package(self, fullname):
"""
Return true, if the named module is a package.
We need this method to get correct spec objects with
Python 3.4 (see PEP451)
"""
return hasattr(self.__get_module(fullname), "__path__")
def get_code(self, fullname):
"""Return None
Required, if is_package is implemented"""
self.__get_module(fullname) # eventually raises ImportError
return None
get_source = get_code # same as get_code
_importer = _SixMetaPathImporter(__name__)
class _MovedItems(_LazyModule):
"""Lazy loading of moved objects"""
__path__ = [] # mark as package
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("intern", "__builtin__", "sys"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserDict", "UserDict", "collections"),
MovedAttribute("UserList", "UserList", "collections"),
MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("_thread", "thread", "_thread"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
if isinstance(attr, MovedModule):
_importer._add_module(attr, "moves." + attr.name)
del attr
_MovedItems._moved_attributes = _moved_attributes
moves = _MovedItems(__name__ + ".moves")
_importer._add_module(moves, "moves")
class Module_six_moves_urllib_parse(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_parse"""
_urllib_parse_moved_attributes = [
MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
MovedAttribute("urljoin", "urlparse", "urllib.parse"),
MovedAttribute("urlparse", "urlparse", "urllib.parse"),
MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
MovedAttribute("quote", "urllib", "urllib.parse"),
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
MovedAttribute("splitquery", "urllib", "urllib.parse"),
MovedAttribute("splittag", "urllib", "urllib.parse"),
MovedAttribute("splituser", "urllib", "urllib.parse"),
MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
MovedAttribute("uses_params", "urlparse", "urllib.parse"),
MovedAttribute("uses_query", "urlparse", "urllib.parse"),
MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
]
for attr in _urllib_parse_moved_attributes:
setattr(Module_six_moves_urllib_parse, attr.name, attr)
del attr
Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
"moves.urllib_parse", "moves.urllib.parse")
class Module_six_moves_urllib_error(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_error"""
_urllib_error_moved_attributes = [
MovedAttribute("URLError", "urllib2", "urllib.error"),
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr
Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
"moves.urllib_error", "moves.urllib.error")
class Module_six_moves_urllib_request(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_request"""
_urllib_request_moved_attributes = [
MovedAttribute("urlopen", "urllib2", "urllib.request"),
MovedAttribute("install_opener", "urllib2", "urllib.request"),
MovedAttribute("build_opener", "urllib2", "urllib.request"),
MovedAttribute("pathname2url", "urllib", "urllib.request"),
MovedAttribute("url2pathname", "urllib", "urllib.request"),
MovedAttribute("getproxies", "urllib", "urllib.request"),
MovedAttribute("Request", "urllib2", "urllib.request"),
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr
Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
"moves.urllib_request", "moves.urllib.request")
class Module_six_moves_urllib_response(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_response"""
_urllib_response_moved_attributes = [
MovedAttribute("addbase", "urllib", "urllib.response"),
MovedAttribute("addclosehook", "urllib", "urllib.response"),
MovedAttribute("addinfo", "urllib", "urllib.response"),
MovedAttribute("addinfourl", "urllib", "urllib.response"),
]
for attr in _urllib_response_moved_attributes:
setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr
Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
"moves.urllib_response", "moves.urllib.response")
class Module_six_moves_urllib_robotparser(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
_urllib_robotparser_moved_attributes = [
MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
]
for attr in _urllib_robotparser_moved_attributes:
setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
del attr
Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
"moves.urllib_robotparser", "moves.urllib.robotparser")
class Module_six_moves_urllib(types.ModuleType):
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
__path__ = [] # mark as package
parse = _importer._get_module("moves.urllib_parse")
error = _importer._get_module("moves.urllib_error")
request = _importer._get_module("moves.urllib_request")
response = _importer._get_module("moves.urllib_response")
robotparser = _importer._get_module("moves.urllib_robotparser")
def __dir__(self):
return ['parse', 'error', 'request', 'response', 'robotparser']
_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
"moves.urllib")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
create_bound_method = types.MethodType
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
if PY3:
def iterkeys(d, **kw):
return iter(d.keys(**kw))
def itervalues(d, **kw):
return iter(d.values(**kw))
def iteritems(d, **kw):
return iter(d.items(**kw))
def iterlists(d, **kw):
return iter(d.lists(**kw))
viewkeys = operator.methodcaller("keys")
viewvalues = operator.methodcaller("values")
viewitems = operator.methodcaller("items")
else:
def iterkeys(d, **kw):
return iter(d.iterkeys(**kw))
def itervalues(d, **kw):
return iter(d.itervalues(**kw))
def iteritems(d, **kw):
return iter(d.iteritems(**kw))
def iterlists(d, **kw):
return iter(d.iterlists(**kw))
viewkeys = operator.methodcaller("viewkeys")
viewvalues = operator.methodcaller("viewvalues")
viewitems = operator.methodcaller("viewitems")
_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
_add_doc(iteritems,
"Return an iterator over the (key, value) pairs of a dictionary.")
_add_doc(iterlists,
"Return an iterator over the (key, [values]) pairs of a dictionary.")
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
unichr = chr
if sys.version_info[1] <= 1:
def int2byte(i):
return bytes((i,))
else:
# This is about 2x faster than the implementation above on 3.2+
int2byte = operator.methodcaller("to_bytes", 1, "big")
byte2int = operator.itemgetter(0)
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
_assertCountEqual = "assertCountEqual"
_assertRaisesRegex = "assertRaisesRegex"
_assertRegex = "assertRegex"
else:
def b(s):
return s
# Workaround for standalone backslash
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
unichr = unichr
int2byte = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
iterbytes = functools.partial(itertools.imap, ord)
import StringIO
StringIO = BytesIO = StringIO.StringIO
_assertCountEqual = "assertItemsEqual"
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
def assertCountEqual(self, *args, **kwargs):
return getattr(self, _assertCountEqual)(*args, **kwargs)
def assertRaisesRegex(self, *args, **kwargs):
return getattr(self, _assertRaisesRegex)(*args, **kwargs)
def assertRegex(self, *args, **kwargs):
return getattr(self, _assertRegex)(*args, **kwargs)
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None):
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
if sys.version_info[:2] == (3, 2):
exec_("""def raise_from(value, from_value):
if from_value is None:
raise value
raise value from from_value
""")
elif sys.version_info[:2] > (3, 2):
exec_("""def raise_from(value, from_value):
raise value from from_value
""")
else:
def raise_from(value, from_value):
raise value
print_ = getattr(moves.builtins, "print", None)
if print_ is None:
def print_(*args, **kwargs):
"""The new-style print function for Python 2.4 and 2.5."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
# If the file has an encoding, encode unicode with it.
if (isinstance(fp, file) and
isinstance(data, unicode) and
fp.encoding is not None):
errors = getattr(fp, "errors", None)
if errors is None:
errors = "strict"
data = data.encode(fp.encoding, errors)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
if sys.version_info[:2] < (3, 3):
_print = print_
def print_(*args, **kwargs):
fp = kwargs.get("file", sys.stdout)
flush = kwargs.pop("flush", False)
_print(*args, **kwargs)
if flush and fp is not None:
fp.flush()
_add_doc(reraise, """Reraise an exception.""")
if sys.version_info[0:2] < (3, 4):
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
def wrapper(f):
f = functools.wraps(wrapped, assigned, updated)(f)
f.__wrapped__ = wrapped
return f
return wrapper
else:
wraps = functools.wraps
if PY3:
@contextlib.contextmanager
def nested(*contexts):
with contextlib.ExitStack() as stack:
yield [stack.enter_context(c) for c in contexts]
else:
nested = contextlib.nested
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if PY2:
if '__str__' not in klass.__dict__:
raise ValueError("@python_2_unicode_compatible cannot be applied "
"to %s because it doesn't define __str__()." %
klass.__name__)
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
# Complete the moves implementation.
# This code is at the end of this module to speed up module loading.
# Turn this module into a package.
__path__ = [] # required for PEP 302 and PEP 451
__package__ = __name__ # see PEP 366 @ReservedAssignment
if globals().get("__spec__") is not None:
__spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
# Remove other six meta path importers, since they cause problems. This can
# happen if six is removed from sys.modules and then reloaded. (Setuptools does
# this for some reason.)
if sys.meta_path:
for i, importer in enumerate(sys.meta_path):
# Here's some real nastiness: Another "instance" of the six module might
# be floating around. Therefore, we can't use isinstance() to check for
# the six meta path importer, since the other six instance will have
# inserted an importer with different class.
if (type(importer).__name__ == "_SixMetaPathImporter" and
importer.name == __name__):
del sys.meta_path[i]
break
del i, importer
# Finally, add the importer to the meta path import hook.
sys.meta_path.append(_importer)
|
|
#!/usr/bin/env python3
# pylint: disable=C0111, redefined-outer-name
from collections import defaultdict, OrderedDict
import os
import time
import tempfile
import copy
import numpy as np
import xarray as xr
import pandas as pd
import pytest
from pyndl import ndl, count, io
TEST_ROOT = os.path.join(os.path.pardir, os.path.dirname(__file__))
FILE_PATH_SIMPLE = os.path.join(TEST_ROOT, "resources/event_file_simple.tab.gz")
FILE_PATH_MULTIPLE_CUES = os.path.join(TEST_ROOT, "resources/event_file_multiple_cues.tab.gz")
REFERENCE_PATH = os.path.join(TEST_ROOT, 'reference/weights_event_file_simple.csv')
REFERENCE_PATH_NDL2 = os.path.join(TEST_ROOT, 'reference/weights_event_file_simple_ndl2.csv')
REFERENCE_PATH_MULTIPLE_CUES_NDL2 = os.path.join(TEST_ROOT, 'reference/weights_event_file_multiple_cues_ndl2.csv')
TMP_PATH = tempfile.mkdtemp()
LAMBDA_ = 1.0
ALPHA = 0.1
BETAS = (0.1, 0.1)
CONTINUE_SPLIT_POINT = 3
@pytest.fixture(scope='module')
def result_ndl_threading():
return ndl.ndl(FILE_PATH_SIMPLE, ALPHA, BETAS, method='threading')
@pytest.fixture(scope='module')
def result_ndl_openmp():
return ndl.ndl(FILE_PATH_SIMPLE, ALPHA, BETAS, method='openmp')
@pytest.fixture(scope='module')
def result_dict_ndl():
return ndl.dict_ndl(FILE_PATH_SIMPLE, ALPHA, BETAS)
@pytest.fixture(scope='module')
def result_dict_ndl_generator():
return ndl.dict_ndl(io.events_from_file(FILE_PATH_SIMPLE), ALPHA, BETAS)
@pytest.fixture(scope='module')
def result_dict_ndl_data_array():
return ndl.dict_ndl(FILE_PATH_SIMPLE, ALPHA, BETAS, make_data_array=True)
@pytest.fixture(scope='module')
def result_continue_learning():
events_simple = pd.read_csv(FILE_PATH_SIMPLE, sep="\t")
part_1 = events_simple.head(CONTINUE_SPLIT_POINT)
part_2 = events_simple.tail(len(events_simple) - CONTINUE_SPLIT_POINT)
assert len(part_1) > 0 # pylint: disable=len-as-condition
assert len(part_2) > 0 # pylint: disable=len-as-condition
part_path_1 = os.path.join(TMP_PATH, "event_file_simple_1.tab.gz")
part_path_2 = os.path.join(TMP_PATH, "event_file_simple_2.tab.gz")
part_1.to_csv(part_path_1, header=True, index=None,
sep='\t', columns=["cues", "outcomes"],
compression='gzip')
part_2.to_csv(part_path_2, header=True, index=None,
sep='\t', columns=["cues", "outcomes"],
compression='gzip')
del events_simple, part_1, part_2
result_part = ndl.ndl(part_path_1, ALPHA, BETAS)
result = ndl.ndl(part_path_2, ALPHA, BETAS, weights=result_part)
return result
def test_exceptions():
with pytest.raises(ValueError) as e_info:
ndl.ndl(FILE_PATH_SIMPLE, ALPHA, BETAS, method='threading', weights=1)
assert e_info == 'weights need to be None or xarray.DataArray with method=threading'
with pytest.raises(ValueError) as e_info:
ndl.ndl(FILE_PATH_SIMPLE, ALPHA, BETAS, method='magic')
assert e_info == 'method needs to be either "threading" or "openmp"'
with pytest.raises(ValueError) as e_info:
ndl.dict_ndl(FILE_PATH_SIMPLE, ALPHA, BETAS, weights=1)
assert e_info == 'weights needs to be either defaultdict or None'
with pytest.raises(ValueError) as e_info:
ndl.dict_ndl(FILE_PATH_MULTIPLE_CUES, ALPHA, BETAS, remove_duplicates=None)
assert e_info == 'cues or outcomes needs to be unique: cues "a a"; outcomes "A"; use remove_duplicates=True'
with pytest.raises(ValueError) as e_info:
ndl.ndl(FILE_PATH_SIMPLE, ALPHA, BETAS, method='threading',
n_outcomes_per_job=-1)
assert e_info == "'n_outcomes_per_job' must be larger then one"
with pytest.raises(ValueError) as e_info:
ndl.dict_ndl(FILE_PATH_SIMPLE, ALPHA, BETAS, make_data_array="magic")
assert e_info == "make_data_array must be True or False"
with pytest.raises(ValueError) as e_info:
ndl.dict_ndl(FILE_PATH_SIMPLE, ALPHA, BETAS, remove_duplicates="magic")
assert e_info == "remove_duplicates must be None, True or False"
with pytest.raises(ValueError) as e_info:
ndl.ndl(FILE_PATH_SIMPLE, ALPHA, BETAS, method='threading', remove_duplicates="magic")
assert e_info == "remove_duplicates must be None, True or False"
with pytest.raises(FileNotFoundError, match="No such file or directory") as e_info:
ndl.ndl(FILE_PATH_SIMPLE, ALPHA, BETAS, method='threading', temporary_directory="./magic")
with pytest.raises(ValueError, match="events_per_file has to be larger than 1") as e_info:
ndl.ndl(FILE_PATH_SIMPLE, ALPHA, BETAS, method='threading', events_per_temporary_file=1)
with pytest.raises(AttributeError, match="weights does not have attributes "
"and no attrs argument is given.") as e_info:
ndl.data_array(dict())
# # Test usually exeeds memory limit; It demands ~32GB of RAM.
# with pytest.raises(ValueError, match="Neither number of cues nor outcomes "
# "shall exceed 4294967295 for now. See "
# "https://github.com/quantling/pyndl/issues/169") as e_info:
# ndl.ndl(FILE_PATH_SIMPLE, ALPHA, BETAS,
# weights=xr.DataArray(np.zeros(shape=(4294967295 + 1, 1))))
def test_generator_learning():
events = io.events_from_file(FILE_PATH_SIMPLE)
result_ndl_gen = ndl.ndl(events, ALPHA, BETAS, method='threading')
result_ndl = ndl.ndl(FILE_PATH_SIMPLE, ALPHA, BETAS, method='threading')
unequal, unequal_ratio = compare_arrays(FILE_PATH_SIMPLE,
result_ndl_gen,
result_ndl)
print(result_ndl_gen)
print('%.2f ratio unequal' % unequal_ratio)
assert len(unequal) == 0 # pylint: disable=len-as-condition
def test_data_array_cast():
result_ndl = ndl.ndl(FILE_PATH_SIMPLE, ALPHA, BETAS, method='threading')
casted_result = ndl.data_array(result_ndl)
assert isinstance(casted_result, xr.DataArray) and (result_ndl == casted_result).all()
def test_continue_learning_dict():
events_simple = pd.read_csv(FILE_PATH_SIMPLE, sep="\t")
part_1 = events_simple.head(CONTINUE_SPLIT_POINT)
part_2 = events_simple.tail(len(events_simple) - CONTINUE_SPLIT_POINT)
assert len(part_1) > 0 # pylint: disable=len-as-condition
assert len(part_2) > 0 # pylint: disable=len-as-condition
part_path_1 = os.path.join(TMP_PATH, "event_file_simple_1.tab.gz")
part_path_2 = os.path.join(TMP_PATH, "event_file_simple_2.tab.gz")
part_1.to_csv(part_path_1, header=True, index=None,
sep='\t', columns=["cues", "outcomes"],
compression='gzip')
part_2.to_csv(part_path_2, header=True, index=None,
sep='\t', columns=["cues", "outcomes"],
compression='gzip')
del events_simple, part_1, part_2
result_part = ndl.dict_ndl(part_path_1,
ALPHA, BETAS)
result_part_copy = copy.deepcopy(result_part)
result_inplace = ndl.dict_ndl(part_path_2, ALPHA, BETAS,
weights=result_part, inplace=True)
assert result_part is result_inplace
assert result_part != result_part_copy
result_part = ndl.dict_ndl(part_path_1,
ALPHA, BETAS)
result = ndl.dict_ndl(part_path_2,
ALPHA, BETAS, weights=result_part)
assert result_part != result
def test_continue_learning_dict_ndl_data_array(result_dict_ndl, result_dict_ndl_data_array):
continue_from_dict = ndl.dict_ndl(FILE_PATH_SIMPLE, ALPHA, BETAS,
weights=result_dict_ndl)
continue_from_data_array = ndl.dict_ndl(FILE_PATH_SIMPLE, ALPHA, BETAS,
weights=result_dict_ndl_data_array)
unequal, unequal_ratio = compare_arrays(FILE_PATH_SIMPLE,
continue_from_dict,
continue_from_data_array)
print(continue_from_data_array)
print('%.2f ratio unequal' % unequal_ratio)
assert len(unequal) == 0 # pylint: disable=len-as-condition
@pytest.mark.nolinux
def test_continue_learning(result_continue_learning, result_ndl_openmp):
assert result_continue_learning.shape == result_ndl_openmp.shape
assert set(result_continue_learning.coords["outcomes"].values) == set(result_ndl_openmp.coords["outcomes"].values)
assert set(result_continue_learning.coords["cues"].values) == set(result_ndl_openmp.coords["cues"].values)
unequal, unequal_ratio = compare_arrays(FILE_PATH_SIMPLE,
result_continue_learning,
result_ndl_openmp)
print('%.2f ratio unequal' % unequal_ratio)
assert len(unequal) == 0 # pylint: disable=len-as-condition
@pytest.mark.nolinux
def test_save_to_netcdf4(result_ndl_openmp):
weights = result_ndl_openmp.copy() # avoids changing shared test data
path = os.path.join(TMP_PATH, "weights.nc")
weights.to_netcdf(path)
weights_read = xr.open_dataarray(path)
# does not preserves the order of the OrderedDict
for key, value in weights.attrs.items():
assert value == weights_read.attrs[key]
weights_continued = ndl.ndl(FILE_PATH_SIMPLE, ALPHA, BETAS, method='openmp', weights=weights)
path_continued = os.path.join(TMP_PATH, "weights_continued.nc")
weights_continued.to_netcdf(path_continued)
weights_continued_read = xr.open_dataarray(path_continued)
for key, value in weights_continued.attrs.items():
assert value == weights_continued_read.attrs[key]
@pytest.mark.nolinux
def test_return_values(result_dict_ndl, result_dict_ndl_data_array, result_ndl_threading, result_ndl_openmp):
# dict_ndl
assert isinstance(result_dict_ndl, defaultdict)
assert isinstance(result_dict_ndl_data_array, xr.DataArray)
# openmp
assert isinstance(result_ndl_openmp, xr.DataArray)
# threading
assert isinstance(result_ndl_threading, xr.DataArray)
@pytest.mark.nolinux
def test_provide_temporary_directory():
with tempfile.TemporaryDirectory(dir=TMP_PATH) as temporary_directory:
ndl.ndl(FILE_PATH_SIMPLE, ALPHA, BETAS, temporary_directory=temporary_directory)
# Test internal consistency
def test_dict_ndl_vs_ndl_threading(result_dict_ndl, result_ndl_threading):
unequal, unequal_ratio = compare_arrays(FILE_PATH_SIMPLE, result_dict_ndl,
result_ndl_threading)
print('%.2f ratio unequal' % unequal_ratio)
assert len(unequal) == 0 # pylint: disable=len-as-condition
def test_dict_ndl_vs_dict_ndl_generator(result_dict_ndl, result_dict_ndl_generator):
unequal, unequal_ratio = compare_arrays(FILE_PATH_SIMPLE, result_dict_ndl,
result_dict_ndl_generator)
print('%.2f ratio unequal' % unequal_ratio)
assert len(unequal) == 0 # pylint: disable=len-as-condition
def test_dict_ndl_data_array_vs_ndl_threading(result_ndl_threading):
result_dict_ndl = ndl.dict_ndl(FILE_PATH_SIMPLE, ALPHA, BETAS, make_data_array=True)
unequal, unequal_ratio = compare_arrays(FILE_PATH_SIMPLE, result_dict_ndl,
result_ndl_threading)
print('%.2f ratio unequal' % unequal_ratio)
assert len(unequal) == 0 # pylint: disable=len-as-condition
def test_ordering_of_temporary_event_files(result_dict_ndl):
result_ndl = ndl.ndl(FILE_PATH_SIMPLE, ALPHA, BETAS, method='threading',
events_per_temporary_file=2)
unequal, unequal_ratio = compare_arrays(FILE_PATH_SIMPLE, result_dict_ndl,
result_ndl)
print('%.2f ratio unequal' % unequal_ratio)
assert len(unequal) == 0
def test_multiple_cues_dict_ndl_vs_ndl_threading():
result_dict_ndl = ndl.dict_ndl(FILE_PATH_MULTIPLE_CUES, ALPHA, BETAS, remove_duplicates=True)
result_ndl_threading = ndl.ndl(FILE_PATH_MULTIPLE_CUES, ALPHA, BETAS, remove_duplicates=True, method='threading')
unequal, unequal_ratio = compare_arrays(FILE_PATH_MULTIPLE_CUES, result_dict_ndl,
result_ndl_threading)
print('%.2f ratio unequal' % unequal_ratio)
assert len(unequal) == 0 # pylint: disable=len-as-condition
@pytest.mark.nolinux
def test_dict_ndl_vs_ndl_openmp(result_dict_ndl, result_ndl_openmp):
result_dict_ndl = ndl.dict_ndl(FILE_PATH_SIMPLE, ALPHA, BETAS)
unequal, unequal_ratio = compare_arrays(FILE_PATH_SIMPLE, result_dict_ndl,
result_ndl_openmp)
print('%.2f ratio unequal' % unequal_ratio)
assert len(unequal) == 0 # pylint: disable=len-as-condition
@pytest.mark.nolinux
def test_meta_data(result_dict_ndl, result_dict_ndl_data_array, result_ndl_openmp, result_ndl_threading):
attributes = {'cython', 'cpu_time', 'hostname', 'xarray', 'wall_time',
'event_path', 'number_events', 'username', 'method', 'date', 'numpy',
'betas', 'lambda', 'pyndl', 'alpha', 'pandas', 'method',
'function'}
results = [result_dict_ndl, result_dict_ndl_data_array, result_ndl_threading, result_ndl_openmp]
for result in results:
assert set(result.attrs.keys()) == attributes
assert int(result_dict_ndl_data_array.attrs['number_events']) > 0
assert len(set(
[result.attrs['number_events'].strip()
for result in results]
)) == 1
# Test against external ndl2 results
def test_compare_weights_ndl2(result_dict_ndl):
"""
Checks whether the output of the R learner implemented in ndl2 and the
python implementation of dict_ndl is equal.
R code to generate the results::
library(ndl2)
learner <- learnWeightsTabular('event_file_simple.tab.gz', alpha=0.1, beta=0.1, lambda=1.0)
wm <- learner$getWeights()
wm <- wm[order(rownames(wm)), order(colnames(wm))]
write.csv(wm, 'weights_event_file_simple_ndl2.csv')
"""
result_ndl2 = defaultdict(lambda: defaultdict(float))
with open(REFERENCE_PATH, 'rt') as reference_file:
first_line = reference_file.readline().strip()
outcomes = first_line.split(',')[1:]
outcomes = [outcome.strip('"') for outcome in outcomes]
for line in reference_file:
cue, *cue_weights = line.strip().split(',')
cue = cue.strip('"')
for ii, outcome in enumerate(outcomes):
result_ndl2[outcome][cue] = float(cue_weights[ii])
unequal, unequal_ratio = compare_arrays(FILE_PATH_SIMPLE, result_ndl2, result_dict_ndl)
print(set(outcome for outcome, *_ in unequal))
print('%.2f ratio unequal' % unequal_ratio)
assert len(unequal) == 0 # pylint: disable=len-as-condition
def test_multiple_cues_dict_ndl_vs_ndl2():
"""
Checks whether the output of the R learner implemented in ndl2 and the
python implementation of dict_ndl is equal.
R code to generate the results::
library(ndl2)
learner <- learnWeightsTabular('tests/resources/event_file_multiple_cues.tab.gz',
alpha=0.1, beta=0.1, lambda=1.0, removeDuplicates=FALSE)
wm <- learner$getWeights()
wm <- wm[order(rownames(wm)), order(colnames(wm))]
write.csv(wm, 'tests/reference/weights_event_file_multiple_cues_ndl2.csv')
"""
result_ndl2 = defaultdict(lambda: defaultdict(float))
with open(REFERENCE_PATH_MULTIPLE_CUES_NDL2, 'rt') as reference_file:
first_line = reference_file.readline().strip()
outcomes = first_line.split(',')[1:]
outcomes = [outcome.strip('"') for outcome in outcomes]
for line in reference_file:
cue, *cue_weights = line.strip().split(',')
cue = cue.strip('"')
for ii, outcome in enumerate(outcomes):
result_ndl2[outcome][cue] = float(cue_weights[ii])
result_python = ndl.dict_ndl(FILE_PATH_MULTIPLE_CUES, ALPHA, BETAS, remove_duplicates=False)
unequal, unequal_ratio = compare_arrays(FILE_PATH_MULTIPLE_CUES, result_ndl2, result_python)
print(set(outcome for outcome, *_ in unequal))
print('%.2f ratio unequal' % unequal_ratio)
assert len(unequal) == 0 # pylint: disable=len-as-condition
def test_compare_weights_rescorla_vs_ndl2():
"""
Checks whether the output of the R learner implemented in ndl2 and the
python implementation of dict_ndl is equal.
R code to generate the results::
library(ndl2)
learner <- learnWeightsTabular('tests/resources/event_file_simple.tab.gz', alpha=0.1, beta=0.1, lambda=1.0)
wm <- learner$getWeights()
wm <- wm[order(rownames(wm)), order(colnames(wm))]
write.csv(wm, 'tests/reference/weights_event_file_simple_ndl2.csv')
"""
result_ndl2 = defaultdict(lambda: defaultdict(float))
with open(REFERENCE_PATH, 'rt') as reference_file:
first_line = reference_file.readline().strip()
outcomes = first_line.split(',')[1:]
outcomes = [outcome.strip('"') for outcome in outcomes]
for line in reference_file:
cue, *cue_weights = line.strip().split(',')
cue = cue.strip('"')
for ii, outcome in enumerate(outcomes):
result_ndl2[outcome][cue] = float(cue_weights[ii])
result_rescorla = defaultdict(lambda: defaultdict(float))
with open(REFERENCE_PATH_NDL2, 'rt') as reference_file:
first_line = reference_file.readline().strip()
outcomes = first_line.split(',')[1:]
outcomes = [outcome.strip('"') for outcome in outcomes]
for line in reference_file:
cue, *cue_weights = line.strip().split(',')
cue = cue.strip('"')
for ii, outcome in enumerate(outcomes):
result_rescorla[outcome][cue] = float(cue_weights[ii])
unequal, unequal_ratio = compare_arrays(FILE_PATH_SIMPLE, result_ndl2, result_rescorla)
print('%.2f ratio unequal' % unequal_ratio)
assert len(unequal) == 0 # pylint: disable=len-as-condition
@pytest.mark.runslow
def test_compare_time_dict_inplace_parallel_thread():
file_path = os.path.join(TEST_ROOT, 'resources/event_file_many_cues.tab.gz')
result_dict_ndl, duration_not_parallel = clock(ndl.dict_ndl, (file_path, ALPHA, BETAS, LAMBDA_))
result_thread_ndl, duration_parallel = clock(ndl.ndl,
(file_path, ALPHA, BETAS, LAMBDA_),
n_jobs=4, method='threading')
assert len(result_dict_ndl) == len(result_thread_ndl)
unequal, unequal_ratio = compare_arrays(file_path, result_thread_ndl, result_dict_ndl)
print('%.2f ratio unequal' % unequal_ratio)
assert len(unequal) == 0 # pylint: disable=len-as-condition
print('parallel: %.3e dict: %.3e' % (duration_parallel, duration_not_parallel))
assert duration_parallel < duration_not_parallel
def test_slice_list():
lst = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
res = ndl.slice_list(lst, 2)
assert res == [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]]
res2 = ndl.slice_list(lst, 3)
assert res2 == [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
def clock(func, args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
stop = time.time()
duration = stop - start
return result, duration
def compare_arrays(file_path, arr1, arr2):
_, cues, outcomes = count.cues_outcomes(file_path)
cue_map, outcome_map, _ = generate_mapping(file_path)
unequal = list()
for outcome in outcomes:
for cue in cues:
values = list()
for array in (arr1, arr2):
if isinstance(array, np.ndarray):
outcome_index = outcome_map[outcome]
cue_index = cue_map[cue]
values.append(array[outcome_index][cue_index])
elif isinstance(array, xr.DataArray):
values.append(array.loc[{'outcomes': outcome, 'cues': cue}].values)
elif isinstance(array, pd.DataFrame):
values.append(array.loc[outcome][cue])
else:
values.append(array[outcome][cue])
value1, value2 = values # pylint: disable=unbalanced-tuple-unpacking
if not np.isclose(value1, value2, rtol=1e-02, atol=1e-05):
unequal.append((outcome, cue, value1, value2))
unequal_ratio = len(unequal) / (len(outcomes) * len(cues))
return (unequal, unequal_ratio)
def generate_mapping(event_path):
_, cues, outcomes = count.cues_outcomes(event_path)
all_cues = list(cues.keys())
all_outcomes = list(outcomes.keys())
cue_map = OrderedDict(((cue, ii) for ii, cue in enumerate(all_cues)))
outcome_map = OrderedDict(((outcome, ii) for ii, outcome in enumerate(all_outcomes)))
return (cue_map, outcome_map, all_outcomes)
|
|
import unittest, time, re
from selenium import selenium
import sys
import os
import csv
sys.path.append("actions")
from authentication import Authenticate
from search import Search
from messages import Message
from form import Form, FormTemplate, rHeaderTemplate
### TODO ###
# Change addUser to use the addForm methods
# Change addRole to use the addForm methods
# addRole currently toggles the roles, need a check to see if it is selected first
# The Coverage class helps to measure coverage of the testing API,
# not I repeat NOT coverage of the Eden code base.
class Coverage:
def __init__ (self, methodName):
self.name = methodName
self.visits = 1
self.callList = []
def inc (self):
self.visits += 1
def calling (self, callMethod):
if callMethod in self.callList:
return
else:
self.callList.append(callMethod)
def toString (self):
data = "%s %s\n" % (self.name, self.visits)
if len(self.callList) != 0:
for method in self.callList:
data += "\t%s\n" % method
return data
class Action(unittest.TestCase):
def __init__ (self, selenium):
self.sel = selenium
self._diag = True # make True for profiling diagnostics
self._diagResults = None
self._diag_sleepTime = None
self._diag_performCalls = None
self.openReport()
self.coverage = False # make False to turn off coverage data
self.coverageMap = {}
self.coverageTrace = []
def openReport(self):
""" used to save the diagnostics to a file """
if self._diag:
self._diagResults = open("../results/diagResults.txt", "a")
self._diagResults.write(time.strftime("New Search run %d %b %Y (%H:%M:%S)\n"))
def closeReport(self, msg):
""" Close the file that is recording the diagnostics """
if self._diag:
self._diagResults.write(msg)
self._diagResults.close()
# Methods for managing the code coverage within the API
def startCoverage(self, methodName):
if self.coverage:
if len(self.coverageTrace)!= 0:
parentName = self.coverageTrace[0]
self.coverageMap[parentName].calling(methodName)
self.coverageTrace.insert(0, methodName)
self.add(methodName)
def endCoverage(self):
if self.coverage:
self.coverageTrace.pop()
def add(self, methodName):
if self.coverage:
if methodName in self.coverageMap:
self.coverageMap[methodName].inc()
else:
self.coverageMap[methodName] = Coverage(methodName)
def coverageToString(self):
if self.coverage:
file = open("../results/coverageResults.txt", "a")
file.write(time.strftime("New run %d %b %Y (%H:%M:%S)\n"))
for key, value in self.coverageMap.items():
file.write(value.toString())
self._diagResults.close()
# END OF Methods for managing the code coverage within the API
def prePopulateCSV(self, file, testType = "regression"):
"""
load the details from the pre-populate file
into a list of dictionary items
"""
result = []
key = []
fullPath = os.path.join("..",
"..",
"..",
"private",
"prepopulate",
testType,
file)
if os.path.exists(fullPath) == False:
print >> sys.stderr, "Failed to find pre-populate file %s" % file
return []
else:
try:
csvFile = open(fullPath, "rb")
except IOError:
print >> sys.stderr, "Failed to open pre-populate file %s" % file
return []
reader = csv.reader(csvFile,)
for line in reader:
if key == []:
key=line
else:
data = {}
for cnt in range(len(key)):
if cnt < len(line):
data[key[cnt]] = line[cnt]
else:
data[key[cnt]] = None
result.append(data)
return result
def openPage(self, page, force=False, heading=None):
""" Open the specified Page if not already open or if forced """
index = -1 * len(page)
location = self.sel.get_location()[index:]
if force or location != page:
print "Opening page %s" % page
# True stops a http HEAD being sent
self.sel.do_command("open", [page,True])
if heading != None:
self.checkPageHeading(heading)
def checkPageHeading(self, heading, level="h2"):
""" Check that the heading matches the expected value """
self.assertEqual(heading, self.sel.get_text("//%s"%level))
def openLink(self, link):
""" click on the specific anchor """
if self.sel.is_element_present(link):
self.sel.click(link)
self.sel.wait_for_page_to_load("30000")
return True
else:
return False
def quickLink(self, link):
""" click on the specific anchor """
if self.sel.is_element_present(link):
self.sel.click(link)
return True
else:
return False
# Methods for managing authentication into Sahana
# login
# logout
def login(self, username, password, reveal=True):
"""
login to the system using the name provided
@param username: the username to be used
@param password: the password of the user
@param reveal: show the password on any error message
"""
self.startCoverage("login")
a = Authenticate(self.sel)
a.login(self, username, password, reveal);
self.endCoverage()
def logout(self):
""" logout of the system """
self.startCoverage("logout")
a = Authenticate(self.sel)
a.logout(self)
self.endCoverage()
# Searching methods
# search
# searchUnique
# clearSearch
# searchMatchesFound - returns the filter string
def search(self, searchString, expected):
self.startCoverage("search")
s = Search(self)
result = s.search(searchString, expected)
self.endCoverage()
return result
def searchUnique(self, uniqueName):
"""
Perform a search when one and only one result will be returned
@param uniqueName: the value to search for
"""
self.startCoverage("searchUnique")
result = self.search(uniqueName, r"1 entries")
self.endCoverage()
return result
def clearSearch(self):
""" Helper function used to clear the search results """
self.startCoverage("clearSearch")
self.search("", r"entries")
self.endCoverage()
def searchMatchesFound(self, searchString=None):
""" Helper function used to return the number of search results """
self.startCoverage("searchMatchesFound")
s = Search(self)
result = s.searchMatchesFound(searchString)
self.endCoverage()
return result
# Many actions are reported on in Sahana by displaying a banner at the top of the page
# Methods to check each banner for the desired message
# successMsg
# errorMsg
# findResponse
def successMsg(self, message):
"""
Method used to check for confirmation messages
@param message: the message to be searched for in the banner
returns: boolean reporting success
"""
self.startCoverage("successMsg")
m = Message(self)
result = m._findMsg(message, "confirmation")
self.endCoverage()
return result
def errorMsg(self, message):
"""
Method used to check for error messages
@param message: the message to be searched for in the banner
returns: boolean reporting success
"""
self.startCoverage("errorMsg")
m = Message(self)
result = m._findMsg(message, "error")
self.endCoverage()
return result
def findResponse(self, successMsg, errorMsg):
"""
Method to check on the response of an action by looking at the message
@param SuccessMsg: the message to be searched for in the banner upon success
@param errorMsg: the message to be searched for in the banner upon failure
returns: boolean reflecting the type of message found
side effect: exception if neither message found
"""
self.startCoverage("findResponse")
m = Message(self)
result = m.findResponse(successMsg, errorMsg)
self.endCoverage()
return result
# Methods to manage form manipulation
# saveForm
# getFormTemplate
# checkForm
def fillForm(self, id, value, type="text"):
self.startCoverage("fillForm")
f = Form(self)
result = f.fillForm(id, value, type)
self.endCoverage()
return result
def fillAutoComplete(self, fieldID, value, throbber=None):
self.startCoverage("fillAutoComplete")
f = Form(self)
result = f.fillAutoComplete(fieldID, value, throbber)
self.endCoverage()
return result
def saveForm(self, submit, message=None, success=True):
"""
Method to save the details
@param message: the success message to check (optional)
@param success: whether we're looking for a confirmation (default) or failure
"""
self.startCoverage("saveForm")
f = Form(self)
result = f.saveForm(submit, message, success)
self.endCoverage()
return result
def getFormTemplate(self):
""" Method to return a new empty form element """
self.startCoverage("getFormTemplate")
f = FormTemplate(self)
self.endCoverage()
return f
def addFormElement(self, formTemplate, id=None, tag=None, type=None, visible=True, value=None, elementDetails=()):
""" Method to add a form element to the template """
self.startCoverage("addFormElement")
formTemplate.addFormElement(id, tag, type, visible, value, elementDetails)
self.endCoverage()
return formTemplate
def removeElement(self, formTemplate, id):
""" Method to remove an element from the template """
self.startCoverage("removeElement")
formTemplate.removeElement(id)
self.endCoverage()
return formTemplate
def getFormElements(self, formName):
self.startCoverage("getFormElements")
f = Form(self)
elementList = f.getFormElements(formName)
self.endCoverage()
return elementList
def addButton(self, formTemplate, value):
""" Method to add a submit button to the template """
self.startCoverage("addButton")
formTemplate.addButton(value)
self.endCoverage()
return formTemplate
def showElement(self, formTemplate, elementId):
""" Method to set an element to be visible """
self.startCoverage("showElement")
formTemplate.getElementFromKey(value).setVisible(True)
self.endCoverage()
return formTemplate
def hideElement(self, formTemplate, elementDetails):
""" Method to set an element to be hidden """
self.startCoverage("showElement")
formTemplate.getElementFromKey(value).setVisible(False)
self.endCoverage()
return formTemplate
def checkForm (self, formTemplate, readonly=False):
"""
Method to check the layout of a form
elementList: data to check the elements on the form
buttonList: data to check the buttons on the form
helpList: data to check the help balloons
side effects: TestCase::fail() is called if any check failed
side effects: messages are written out reflecting what was verified
"""
self.startCoverage("checkForm")
f = Form(self)
f.checkForm(formTemplate, readonly)
self.endCoverage()
return f
def checkFormStrict(self, formTemplate, formName=None):
"""
Method to check that the visible element in the template
Are all displayed and that they are the only ones displayed
NOTE this is an *experimental method* it tries to check that the template
matches what is displayed. It is not guaranteed to manage all possible form elements.
If you have a element that you would like to be added to this method raise a ticket on Trac
"""
self.startCoverage("checkFormStrict")
error = []
f = Form(self)
error = f.checkFormStrict(formTemplate, formName)
self.endCoverage()
return error
def getrHeaderTemplate(self):
""" Method to return a new empty rHeader template """
self.startCoverage("getrHeaderTemplate")
r = rHeaderTemplate(self)
self.endCoverage()
return r
def addrHeaderLine(self, template, header, value):
""" Method to add a line to an existing rHeader template """
self.startCoverage("addrHeaderLine")
template.addValue(header, value)
self.endCoverage()
return template
def checkHeading(self, template):
""" Method to check the details that are displayed in the heading """
self.startCoverage("checkHeading")
template.checkrHeader()
self.endCoverage()
return template
# def checkForm (self, elementList, buttonList, helpList):
# # Method to check the layout of a form
# # elementList: data to check the elements on the form
# # buttonList: data to check the buttons on the form
# # helpList: data to check the help balloons
# # side effects: TestCase::fail() is called if any check failed
# # side effects: messages are written out reflecting what was verified
# elements = []
# failed = []
# for element in elementList:
# result = self._element(element)
# if result == True:
# if len(element) > 2 and element[2]: elements.append(element[1])
# else: failed.append(result)
# for name in buttonList:
# self._button(name)
# for title in helpList:
# self._helpBalloon(title)
# if len(failed) > 0:
# msg = '/n'.join(failed)
# self.fail(msg)
# if len(elements) > 0:
# print "Verified the following form elements %s" % elements
# def _button(self, name):
# # Method to check that form button is present
# sel = self.sel
# element = '//input[@value="%s"]' % (name)
# errmsg = "%s button is missing" % (name)
# self.assertTrue(sel.is_element_present(element), errmsg)
# print "%s button is present" % (name)
# def _helpBalloon(self, helpTitle):
# # Method to check that the help message is displayed
# # helpTitle: the balloon help that is displyed on the form
# sel = self.sel
# element = "//div[contains(@title,'%s')]" % (helpTitle)
# self.assertTrue(sel.is_element_present(element))
# sel.mouse_over(element)
# self.assertFalse(sel.is_element_present(element), "Help %s is missing" % (helpTitle))
# print "Help %s is present" % (helpTitle)
# def _element(self, elementDetails):
# # Method to check that form _element is present
# # The elementDetails parameter is a list of up to 4 elements
# # elementDetails[0] the type of HTML tag
# # elementDetails[1] the id associated with the HTML tag
# # elementDetails[2] *optional* the visibility of the HTML tag
# # elementDetails[3] *optional* the value or text of the HTML tag
# # return True on success error message on failure
# sel = self.sel
# type = elementDetails[0]
# id = elementDetails[1]
# msg = ""
# if (len(elementDetails) >= 3):
# visible = elementDetails[2]
# else:
# visible = True
# if (len(elementDetails) >= 4):
# value = elementDetails[3]
# else:
# value = None
# elementDetails = '//%s[@id="%s"]' % (type, id)
# if visible:
# if not sel.is_element_present(elementDetails): return "%s element %s is missing" % (type, id)
# if sel.is_visible(elementDetails) != visible: return "%s element %s doesn't have a visibility of %s" % (type, id, visible)
# if value != None:
# actual = sel.get_value(elementDetails)
# msg = "expected %s for element %s doesn't equal the actual value of %s" % (value, id, actual)
# if value != actual: return msg
# return True
def showNamedElement(self, name, elementList):
"""
Method to set an element to be visible
@param name: The id of the element
@param elementList: The element list
"""
for element in elementList:
if element[1] == name:
self.showElement(element)
return True
return False
def hideNamedElement(self, name, elementList):
"""
Method to set an element to be hidden
@param name: The id of the element
@param elementList: The element list
"""
for element in elementList:
if element[1] == name:
self.hideElement(element)
return True
return False
# # Method to check on the rheading table that displays read only data related to a form
# def checkHeading(self, detailMap):
# # Method to check the details that are displayed in the heading
# # detailMap: A (name, value) pair of the data which is displayed in Sahana as a table
# # side effect: Assert the values are present
# sel = self.sel
# heading = sel.get_text("//div[@id='rheader']/div/table/tbody")
# searchString = ""
# for key, value in detailMap.items():
# msg = "Unable to find details of %s in the header of %s"
# self.assertTrue(key in heading, msg % (key, heading))
# self.assertTrue(value in heading, msg % (value, heading))
def checkTab(self, name):
""" Method to check if a tab is present """
sel = self.sel
element = "//div[@class='tabs']/span/a[text()='%s']" % (name)
return sel.is_element_present(element)
def clickTab(self, name):
""" Method to click on a tab """
sel = self.sel
element = "//div[@class='tabs']/span/a[text()='%s']" % (name)
sel.click(element)
sel.wait_for_page_to_load("30000")
def findLink(self, expression):
sel = self.sel
text = sel.get_text("//div[@id='content']")
m = re.search(expression, text)
if m != None:
result = m.group(0)
self.openLink("link=%s"%result)
return True
else:
return False
def btnLink(self, id, name):
""" Method to check button link """
sel = self.sel
element = '//a[@id="%s"]' % (id)
errMsg = "%s button is missing" % (name)
self.assertTrue(sel.is_element_present(element), errMsg)
self.assertTrue(sel.get_text(element), errMsg)
print "%s button is present" % (name)
def noBtnLink(self, id, name):
""" Method to check button link is not present """
sel = self.sel
element = '//a[@id="%s"]' % (id)
errMsg = "Unexpected presence of %s button" % (name)
if sel.is_element_present(element):
self.assertFalse(sel.get_text(element), errMsg)
print "%s button is not present" % (name)
def clickBtn(self, name="Open"):
""" Method to click on a button """
sel = self.sel
element = "link=%s" % (name)
sel.click(element)
sel.wait_for_page_to_load("30000")
def deleteObject(self, page, objName, type="Object"):
sel = self.sel
# need the following line which reloads the page otherwise the search gets stuck
sel.open(page)
try:
self.searchUnique(objName)
sel.click("link=Delete")
#self.confirmDelete()
if self.findResponse("%s deleted" % type, "Integrity error:"):
print "%s %s deleted" % (type, objName)
return True
else:
print "Failed to delete %s %s" % (type, objName)
return False
except:
print "Failed to delete %s %s from page %s" % (type, objName, page)
return False
def confirmDelete(self):
sel = self.sel
confirm = ""
result = None
try:
confirm = sel.get_confirmation()
search = r"^Sure you want to delete this object?[\s\S]$"
result = re.search(search, confirm)
self.assertTrue(result)
except:
# Not working with FF4:
# http://code.google.com/p/selenium/issues/detail?id=1604
# Can workaround by setting deployment_settings.ui.confirm = False
print "Failed to properly manage a delete confirmation. " \
"Looking for %s. Got %s as the message " \
"result was %s" % (search, confirm, result)
pass
def registerUser(self, first_name, last_name, email, password):
first_name = first_name.strip()
last_name = last_name.strip()
email = email.strip()
password = password.strip()
sel = self.sel
sel.open("default/user/register")
sel.type("auth_user_first_name", first_name)
sel.type("auth_user_last_name", last_name)
sel.select("auth_user_language", "label=English")
sel.type("auth_user_email", email)
sel.type("auth_user_password", password)
sel.type("password_two", password)
sel.click("//input[@value='Register']")
sel.wait_for_page_to_load("30000")
msg = "Unable to register user %s %s with email %s" % (first_name, last_name, email)
self.assertTrue(self.successMsg("Registration successful"), msg)
# Only open this page if on another page
print sel.get_location()[-10:]
if sel.get_location()[-10:]=="admin/user":
print "Already on page admin/user"
else:
sel.open("admin/user")
self.searchUnique(email)
self.assertTrue(re.search(r"Showing 1 to 1 of 1 entries", sel.get_text("//div[@class='dataTables_info']")))
print "User %s created" % (email)
def addUser(self, first_name, last_name, email, password):
first_name = first_name.strip()
last_name = last_name.strip()
email = email.strip()
password = password.strip()
sel = self.sel
# Only open this page if on another page
print sel.get_location()[-10:]
self.openPage("admin/user")
if self.searchMatchesFound(email) > 0:
sel.click("link=Open")
sel.wait_for_page_to_load("30000")
sel.type("auth_user_first_name", first_name)
sel.type("auth_user_last_name", last_name)
sel.select("auth_user_language", "label=English")
sel.type("auth_user_email", email)
sel.type("auth_user_password", password)
sel.type("password_two", password)
sel.select("auth_user_registration_key", "")
sel.click("//input[@value='Save']")
sel.wait_for_page_to_load("30000")
msg = "Unable to update user %s %s with email %s" % (first_name, last_name, email)
self.assertTrue(self.successMsg("User updated"), msg)
self.searchUnique(email)
self.assertTrue(re.search(r"Showing 1 to 1 of 1 entries", sel.get_text("//div[@class='dataTables_info']")))
print "User %s enabled" % (email)
else:
# This may need changing if the embedded add user page is reinstalled
# It has been removed because it broke the searching
self.assertTrue(sel.is_element_present("add-btn"))
sel.click("add-btn")
sel.wait_for_page_to_load("30000")
sel.type("auth_user_first_name", first_name)
sel.type("auth_user_last_name", last_name)
sel.select("auth_user_language", "label=English")
sel.type("auth_user_email", email)
sel.type("auth_user_password", password)
sel.type("password_two", password)
sel.click("//input[@value='Save']")
sel.wait_for_page_to_load("30000")
msg = "Unable to create user %s %s with email %s" % (first_name, last_name, email)
self.assertTrue(self.successMsg("User added"), msg)
self.searchUnique(email)
self.assertTrue(re.search(r"Showing 1 to 1 of 1 entries", sel.get_text("//div[@class='dataTables_info']")))
print "User %s created" % (email)
def addRole(self, email, roles):
email = email.strip()
roles = roles.strip()
roleList = roles.split(" ")
sel = self.sel
if sel.get_location()[-10:]!="admin/user":
sel.open("admin/user")
sel.wait_for_page_to_load("30000")
self.searchUnique(email)
# Cannot click on link=Roles because that will trigger off the menu item admin/roles
sel.click("//table[@id='list']/tbody/tr[1]/td[1]/a[2]")
sel.wait_for_page_to_load("30000")
for role in roleList:
sel.click("//input[@name='roles' and @value='%s']" % role.strip())
sel.click("//input[@value='Save']")
sel.wait_for_page_to_load("30000")
# @ToDo: Message to get all roles (if multiple) not just last 1
msg = "Failed to add role %s to user %s" % (role.strip() , email)
self.assertTrue(self.successMsg("User Updated"), msg)
print "User %s added to group %s" % (email, role.strip())
def delUser(self, email):
email = email.strip()
print "Deleting user %s" % email
sel = self.sel
if sel.get_location()[-10:]!="admin/user":
sel.open("admin/user")
sel.wait_for_page_to_load("30000")
self.searchUnique(email)
sel.click("link=Disable")
self.assertTrue(self.successMsg("User Account has been Disabled"))
print "User %s disabled" % (email)
|
|
r"""
Backrefs for the 'regex' module.
Add the ability to use the following backrefs with re:
- `\Q` and `\Q...\E` - Escape/quote chars (search)
- `\c` and `\C...\E` - Uppercase char or chars (replace)
- `\l` and `\L...\E` - Lowercase char or chars (replace)
- `\N{Black Club Suit}` - Unicode character by name (replace)
- `\u0000` and `\U00000000` - Unicode characters (replace)
- `\R` - Generic line breaks (search)
- `\e` - Escape character (search)
Licensed under MIT
Copyright (c) 2015 - 2020 Isaac Muse <[email protected]>
"""
import regex as _regex
import copyreg as _copyreg
from functools import lru_cache as _lru_cache
from . import util as _util
from . import _bregex_parse
from ._bregex_parse import ReplaceTemplate
__all__ = (
"expand", "expandf", "match", "fullmatch", "search", "sub", "subf", "subn", "subfn", "split", "splititer",
"findall", "finditer", "purge", "escape", "D", "DEBUG", "A", "ASCII", "B", "BESTMATCH",
"E", "ENHANCEMATCH", "F", "FULLCASE", "I", "IGNORECASE", "L", "LOCALE", "M", "MULTILINE", "R", "REVERSE",
"S", "DOTALL", "U", "UNICODE", "X", "VERBOSE", "V0", "VERSION0", "V1", "VERSION1", "W", "WORD",
"P", "POSIX", "DEFAULT_VERSION", "FORMAT", "compile", "compile_search", "compile_replace", "Bregex",
"ReplaceTemplate"
)
# Expose some common re flags and methods to
# save having to import re and backrefs libraries
D = _regex.D
DEBUG = _regex.DEBUG
A = _regex.A
ASCII = _regex.ASCII
B = _regex.B
BESTMATCH = _regex.BESTMATCH
E = _regex.E
ENHANCEMATCH = _regex.ENHANCEMATCH
F = _regex.F
FULLCASE = _regex.FULLCASE
I = _regex.I
IGNORECASE = _regex.IGNORECASE
L = _regex.L
LOCALE = _regex.LOCALE
M = _regex.M
MULTILINE = _regex.MULTILINE
R = _regex.R
REVERSE = _regex.REVERSE
S = _regex.S
DOTALL = _regex.DOTALL
U = _regex.U
UNICODE = _regex.UNICODE
X = _regex.X
VERBOSE = _regex.VERBOSE
V0 = _regex.V0
VERSION0 = _regex.VERSION0
V1 = _regex.V1
VERSION1 = _regex.VERSION1
W = _regex.W
WORD = _regex.WORD
P = _regex.P
POSIX = _regex.POSIX
DEFAULT_VERSION = _regex.DEFAULT_VERSION
escape = _regex.escape
# Replace flags
FORMAT = 1
# Case upper or lower
_UPPER = 1
_LOWER = 2
# Maximum size of the cache.
_MAXCACHE = 500
_REGEX_TYPE = type(_regex.compile('', 0))
@_lru_cache(maxsize=_MAXCACHE)
def _cached_search_compile(pattern, re_verbose, re_version, pattern_type):
"""Cached search compile."""
return _bregex_parse._SearchParser(pattern, re_verbose, re_version).parse()
@_lru_cache(maxsize=_MAXCACHE)
def _cached_replace_compile(pattern, repl, flags, pattern_type):
"""Cached replace compile."""
return _bregex_parse._ReplaceParser().parse(pattern, repl, bool(flags & FORMAT))
def _get_cache_size(replace=False):
"""Get size of cache."""
if not replace:
size = _cached_search_compile.cache_info().currsize
else:
size = _cached_replace_compile.cache_info().currsize
return size
def _purge_cache():
"""Purge the cache."""
_cached_replace_compile.cache_clear()
_cached_search_compile.cache_clear()
def _is_replace(obj):
"""Check if object is a replace object."""
return isinstance(obj, ReplaceTemplate)
def _apply_replace_backrefs(m, repl=None, flags=0):
"""Expand with either the `ReplaceTemplate` or compile on the fly, or return None."""
if m is None:
raise ValueError("Match is None!")
else:
if isinstance(repl, ReplaceTemplate):
return repl.expand(m)
elif isinstance(repl, (str, bytes)):
return _bregex_parse._ReplaceParser().parse(m.re, repl, bool(flags & FORMAT)).expand(m)
def _apply_search_backrefs(pattern, flags=0):
"""Apply the search backrefs to the search pattern."""
if isinstance(pattern, (str, bytes)):
re_verbose = VERBOSE & flags
if flags & V0:
re_version = V0
elif flags & V1:
re_version = V1
else:
re_version = 0
if not (flags & DEBUG):
pattern = _cached_search_compile(pattern, re_verbose, re_version, type(pattern))
else: # pragma: no cover
pattern = _bregex_parse._SearchParser(pattern, re_verbose, re_version).parse()
elif isinstance(pattern, Bregex):
if flags:
raise ValueError("Cannot process flags argument with a compiled pattern")
pattern = pattern._pattern
elif isinstance(pattern, _REGEX_TYPE):
if flags:
raise ValueError("Cannot process flags argument with a compiled pattern!")
else:
raise TypeError("Not a string or compiled pattern!")
return pattern
def _assert_expandable(repl, use_format=False):
"""Check if replace template is expandable."""
if isinstance(repl, ReplaceTemplate):
if repl.use_format != use_format:
if use_format:
raise ValueError("Replace not compiled as a format replace")
else:
raise ValueError("Replace should not be compiled as a format replace!")
elif not isinstance(repl, (str, bytes)):
raise TypeError("Expected string, buffer, or compiled replace!")
def compile(pattern, flags=0, auto_compile=None, **kwargs): # noqa A001
"""Compile both the search or search and replace into one object."""
if isinstance(pattern, Bregex):
if auto_compile is not None:
raise ValueError("Cannot compile Bregex with a different auto_compile!")
elif flags != 0:
raise ValueError("Cannot process flags argument with a compiled pattern")
return pattern
else:
if auto_compile is None:
auto_compile = True
return Bregex(compile_search(pattern, flags, **kwargs), auto_compile)
def compile_search(pattern, flags=0, **kwargs):
"""Compile with extended search references."""
return _regex.compile(_apply_search_backrefs(pattern, flags), flags, **kwargs)
def compile_replace(pattern, repl, flags=0):
"""Construct a method that can be used as a replace method for `sub`, `subn`, etc."""
call = None
if pattern is not None and isinstance(pattern, _REGEX_TYPE):
if isinstance(repl, (str, bytes)):
if not (pattern.flags & DEBUG):
call = _cached_replace_compile(pattern, repl, flags, type(repl))
else: # pragma: no cover
call = _bregex_parse._ReplaceParser().parse(pattern, repl, bool(flags & FORMAT))
elif isinstance(repl, ReplaceTemplate):
if flags:
raise ValueError("Cannot process flags argument with a ReplaceTemplate!")
if repl.pattern_hash != hash(pattern):
raise ValueError("Pattern hash doesn't match hash in compiled replace!")
call = repl
else:
raise TypeError("Not a valid type!")
else:
raise TypeError("Pattern must be a compiled regular expression!")
return call
###########################
# API
##########################
class Bregex(_util.Immutable):
"""Bregex object."""
__slots__ = ("_pattern", "auto_compile", "_hash")
def __init__(self, pattern, auto_compile=True):
"""Initialization."""
super(Bregex, self).__init__(
_pattern=pattern,
auto_compile=auto_compile,
_hash=hash((type(self), type(pattern), pattern, auto_compile))
)
@property
def pattern(self):
"""Return pattern."""
return self._pattern.pattern
@property
def flags(self):
"""Return flags."""
return self._pattern.flags
@property
def groupindex(self):
"""Return group index."""
return self._pattern.groupindex
@property
def groups(self):
"""Return groups."""
return self._pattern.groups
@property
def scanner(self):
"""Return scanner."""
return self._pattern.scanner
def __hash__(self):
"""Hash."""
return self._hash
def __eq__(self, other):
"""Equal."""
return (
isinstance(other, Bregex) and
self._pattern == other._pattern and
self.auto_compile == other.auto_compile
)
def __ne__(self, other):
"""Equal."""
return (
not isinstance(other, Bregex) or
self._pattern != other._pattern or
self.auto_compile != other.auto_compile
)
def __repr__(self): # pragma: no cover
"""Representation."""
return '%s.%s(%r, auto_compile=%r)' % (
self.__module__, self.__class__.__name__, self._pattern, self.auto_compile
)
def _auto_compile(self, template, use_format=False):
"""Compile replacements."""
is_replace = _is_replace(template)
is_string = isinstance(template, (str, bytes))
if is_replace and use_format != template.use_format:
raise ValueError("Compiled replace cannot be a format object!")
if is_replace or (is_string and self.auto_compile):
return self.compile(template, (FORMAT if use_format and not is_replace else 0))
elif is_string and use_format:
# Reject an attempt to run format replace when auto-compiling
# of template strings has been disabled and we are using a
# template string.
raise AttributeError('Format replaces cannot be called without compiling replace template!')
else:
return template
def compile(self, repl, flags=0): # noqa A001
"""Compile replace."""
return compile_replace(self._pattern, repl, flags)
def search(self, string, *args, **kwargs):
"""Apply `search`."""
return self._pattern.search(string, *args, **kwargs)
def match(self, string, *args, **kwargs):
"""Apply `match`."""
return self._pattern.match(string, *args, **kwargs)
def fullmatch(self, string, *args, **kwargs):
"""Apply `fullmatch`."""
return self._pattern.fullmatch(string, *args, **kwargs)
def split(self, string, *args, **kwargs):
"""Apply `split`."""
return self._pattern.split(string, *args, **kwargs)
def splititer(self, string, *args, **kwargs):
"""Apply `splititer`."""
return self._pattern.splititer(string, *args, **kwargs)
def findall(self, string, *args, **kwargs):
"""Apply `findall`."""
return self._pattern.findall(string, *args, **kwargs)
def finditer(self, string, *args, **kwargs):
"""Apply `finditer`."""
return self._pattern.finditer(string, *args, **kwargs)
def sub(self, repl, string, *args, **kwargs):
"""Apply `sub`."""
return self._pattern.sub(self._auto_compile(repl), string, *args, **kwargs)
def subf(self, repl, string, *args, **kwargs): # noqa A002
"""Apply `sub` with format style replace."""
return self._pattern.subf(self._auto_compile(repl, True), string, *args, **kwargs)
def subn(self, repl, string, *args, **kwargs):
"""Apply `subn` with format style replace."""
return self._pattern.subn(self._auto_compile(repl), string, *args, **kwargs)
def subfn(self, repl, string, *args, **kwargs): # noqa A002
"""Apply `subn` after applying backrefs."""
return self._pattern.subfn(self._auto_compile(repl, True), string, *args, **kwargs)
def purge():
"""Purge caches."""
_purge_cache()
_regex.purge()
def expand(m, repl):
"""Expand the string using the replace pattern or function."""
_assert_expandable(repl)
return _apply_replace_backrefs(m, repl)
def expandf(m, format): # noqa A002
"""Expand the string using the format replace pattern or function."""
_assert_expandable(format, True)
return _apply_replace_backrefs(m, format, flags=FORMAT)
def match(pattern, string, *args, **kwargs):
"""Wrapper for `match`."""
flags = args[2] if len(args) > 2 else kwargs.get('flags', 0)
return _regex.match(_apply_search_backrefs(pattern, flags), string, *args, **kwargs)
def fullmatch(pattern, string, *args, **kwargs):
"""Wrapper for `fullmatch`."""
flags = args[2] if len(args) > 2 else kwargs.get('flags', 0)
return _regex.fullmatch(_apply_search_backrefs(pattern, flags), string, *args, **kwargs)
def search(pattern, string, *args, **kwargs):
"""Wrapper for `search`."""
flags = args[2] if len(args) > 2 else kwargs.get('flags', 0)
return _regex.search(_apply_search_backrefs(pattern, flags), string, *args, **kwargs)
def sub(pattern, repl, string, *args, **kwargs):
"""Wrapper for `sub`."""
flags = args[4] if len(args) > 4 else kwargs.get('flags', 0)
is_replace = _is_replace(repl)
is_string = isinstance(repl, (str, bytes))
if is_replace and repl.use_format:
raise ValueError("Compiled replace cannot be a format object!")
pattern = compile_search(pattern, flags)
return _regex.sub(
pattern, (compile_replace(pattern, repl) if is_replace or is_string else repl), string,
*args, **kwargs
)
def subf(pattern, format, string, *args, **kwargs): # noqa A002
"""Wrapper for `subf`."""
flags = args[4] if len(args) > 4 else kwargs.get('flags', 0)
is_replace = _is_replace(format)
is_string = isinstance(format, (str, bytes))
if is_replace and not format.use_format:
raise ValueError("Compiled replace is not a format object!")
pattern = compile_search(pattern, flags)
rflags = FORMAT if is_string else 0
return _regex.sub(
pattern, (compile_replace(pattern, format, flags=rflags) if is_replace or is_string else format), string,
*args, **kwargs
)
def subn(pattern, repl, string, *args, **kwargs):
"""Wrapper for `subn`."""
flags = args[4] if len(args) > 4 else kwargs.get('flags', 0)
is_replace = _is_replace(repl)
is_string = isinstance(repl, (str, bytes))
if is_replace and repl.use_format:
raise ValueError("Compiled replace cannot be a format object!")
pattern = compile_search(pattern, flags)
return _regex.subn(
pattern, (compile_replace(pattern, repl) if is_replace or is_string else repl), string,
*args, **kwargs
)
def subfn(pattern, format, string, *args, **kwargs): # noqa A002
"""Wrapper for `subfn`."""
flags = args[4] if len(args) > 4 else kwargs.get('flags', 0)
is_replace = _is_replace(format)
is_string = isinstance(format, (str, bytes))
if is_replace and not format.use_format:
raise ValueError("Compiled replace is not a format object!")
pattern = compile_search(pattern, flags)
rflags = FORMAT if is_string else 0
return _regex.subn(
pattern, (compile_replace(pattern, format, flags=rflags) if is_replace or is_string else format), string,
*args, **kwargs
)
def split(pattern, string, *args, **kwargs):
"""Wrapper for `split`."""
flags = args[3] if len(args) > 3 else kwargs.get('flags', 0)
return _regex.split(_apply_search_backrefs(pattern, flags), string, *args, **kwargs)
def splititer(pattern, string, *args, **kwargs):
"""Wrapper for `splititer`."""
flags = args[3] if len(args) > 3 else kwargs.get('flags', 0)
return _regex.splititer(_apply_search_backrefs(pattern, flags), string, *args, **kwargs)
def findall(pattern, string, *args, **kwargs):
"""Wrapper for `findall`."""
flags = args[2] if len(args) > 2 else kwargs.get('flags', 0)
return _regex.findall(_apply_search_backrefs(pattern, flags), string, *args, **kwargs)
def finditer(pattern, string, *args, **kwargs):
"""Wrapper for `finditer`."""
flags = args[2] if len(args) > 2 else kwargs.get('flags', 0)
return _regex.finditer(_apply_search_backrefs(pattern, flags), string, *args, **kwargs)
def _pickle(p):
return Bregex, (p._pattern, p.auto_compile)
_copyreg.pickle(Bregex, _pickle)
|
|
import logging
from django.core.exceptions import ValidationError
from django.db.utils import IntegrityError
from framework import sentry
from framework.auth import Auth
from framework.celery_tasks import app as celery_app
from osf.exceptions import (
NodeStateError,
RegistrationBulkCreationRowError,
UserNotAffiliatedError,
UserStateError,
ValidationValueError,
)
from osf.models import (
AbstractNode,
Contributor,
DraftRegistration,
Institution,
OSFUser,
RegistrationBulkUploadJob,
RegistrationBulkUploadRow,
RegistrationProvider,
RegistrationSchema,
)
from osf.models.licenses import NodeLicense
from osf.models.registration_bulk_upload_job import JobState
from osf.utils.permissions import READ, WRITE, ADMIN
from website import mails, settings
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
@celery_app.task()
def prepare_for_registration_bulk_creation(payload_hash, initiator_id, provider_id, parsing_output, dry_run=False):
logger.info('Preparing OSF DB for registration bulk creation ...')
# Check initiator
initiator = OSFUser.load(initiator_id)
if not initiator:
message = 'Bulk upload preparation failure: initiator [id={}] not found'.format(initiator_id)
return handle_internal_error(initiator=None, provider=None, message=message, dry_run=dry_run)
# Check provider
try:
provider = RegistrationProvider.objects.get(_id=provider_id)
except RegistrationProvider.DoesNotExist:
message = 'Bulk upload preparation failure: registration provider [_id={}] not found'.format(provider_id)
return handle_internal_error(initiator=initiator, provider=None, message=message, dry_run=dry_run)
except RegistrationProvider.MultipleObjectsReturned:
message = 'Bulk upload preparation failure: multiple registration ' \
'providers returned for [_id={}] '.format(provider_id)
return handle_internal_error(initiator=initiator, provider=None, message=message, dry_run=dry_run)
# Check parsing output
if not parsing_output:
message = 'Bulk upload preparation failure: missing parser output as task input'
return handle_internal_error(initiator=initiator, provider=provider, message=message, dry_run=dry_run)
# Check schema
schema_id = parsing_output.get('schema_id', None)
try:
schema = RegistrationSchema.objects.get(_id=schema_id)
except RegistrationSchema.DoesNotExist:
message = 'Bulk upload preparation failure: registration schema [_id={}] not found'.format(schema_id)
return handle_internal_error(initiator=initiator, provider=provider, message=message, dry_run=dry_run)
except RegistrationSchema.MultipleObjectsReturned:
message = 'Bulk upload preparation failure: multiple registration schemas [_id={}] returned'.format(schema_id)
return handle_internal_error(initiator=initiator, provider=provider, message=message, dry_run=dry_run)
# Create the bulk upload job
upload = RegistrationBulkUploadJob.create(payload_hash, initiator, provider, schema)
logger.info('Creating a registration bulk upload job with [hash={}] ...'.format(upload.payload_hash))
if not dry_run:
try:
upload.save()
except ValidationError:
sentry.log_exception()
message = 'Bulk upload preparation failure: failed to create the job'
return handle_internal_error(initiator=initiator, provider=provider, message=message, dry_run=dry_run)
upload.reload()
logger.info('Bulk upload job created: [pk={}, hash={}]'.format(upload.id, upload.payload_hash))
else:
logger.info('Dry run: insertion did not happen')
# Create registration rows for the bulk upload job
registration_rows = parsing_output.get('registrations', [])
if not registration_rows:
message = 'Bulk upload preparation failure: missing registration rows'
return handle_internal_error(initiator=initiator, provider=provider, message=message, dry_run=dry_run)
initial_row_count = len(registration_rows)
logger.info('Preparing [{}] registration rows for bulk creation ...'.format(initial_row_count))
row_hash_set = set()
bulk_upload_rows = []
draft_error_list = []
try:
for registration_row in registration_rows:
bulk_upload_row = RegistrationBulkUploadRow.create(
upload,
registration_row.get('csv_raw', ''),
registration_row.get('csv_parsed'),
)
metadata = bulk_upload_row.csv_parsed.get('metadata', {}) or {}
row_external_id = metadata.get('External ID', 'N/A')
row_title = metadata.get('Title', 'N/A')
# Check duplicates with the database
if RegistrationBulkUploadRow.objects.filter(row_hash=bulk_upload_row.row_hash).exists():
error = 'Duplicate rows - existing row found in the system'
exception = RegistrationBulkCreationRowError(upload.id, 'N/A', row_title, row_external_id, error=error)
logger.error(exception.long_message)
sentry.log_message(exception.long_message)
draft_error_list.append(exception.short_message)
# Continue to check duplicates within the CSV
if bulk_upload_row.row_hash in row_hash_set:
error = 'Duplicate rows - CSV contains duplicate rows'
exception = RegistrationBulkCreationRowError(upload.id, 'N/A', row_title, row_external_id, error=error)
logger.error(exception.long_message)
sentry.log_message(exception.long_message)
draft_error_list.append(exception.short_message)
else:
row_hash_set.add(bulk_upload_row.row_hash)
bulk_upload_rows.append(bulk_upload_row)
except Exception as e:
upload.delete()
return handle_internal_error(initiator=initiator, provider=provider, message=repr(e), dry_run=dry_run)
# Cancel the preparation task if duplicates are found in the CSV and/or in DB
if len(draft_error_list) > 0:
upload.delete()
logger.info('Sending emails to initiator/uploader ...')
mails.send_mail(
to_addr=initiator.username,
mail=mails.REGISTRATION_BULK_UPLOAD_FAILURE_DUPLICATES,
fullname=initiator.fullname,
count=initial_row_count,
draft_errors=draft_error_list,
osf_support_email=settings.OSF_SUPPORT_EMAIL,
)
return
if dry_run:
logger.info('Dry run: bulk creation did not run and emails are not sent')
logger.info('Dry run: complete')
return
try:
logger.info('Bulk creating [{}] registration rows ...'.format(len(bulk_upload_rows)))
created_objects = RegistrationBulkUploadRow.objects.bulk_create(bulk_upload_rows)
except (ValueError, IntegrityError):
upload.delete()
sentry.log_exception()
message = 'Bulk upload preparation failure: failed to create the rows.'
return handle_internal_error(initiator=initiator, provider=provider, message=message, dry_run=dry_run)
logger.info('[{}] rows successfully prepared.'.format(len(created_objects)))
logger.info('Updating job state ...')
upload.state = JobState.INITIALIZED
try:
upload.save()
except ValidationError:
upload.delete()
sentry.log_exception()
message = 'Bulk upload preparation failure: job state update failed'
return handle_internal_error(initiator=initiator, provider=provider, message=message, dry_run=dry_run)
logger.info('Job state updated')
logger.info('Bulk upload preparation finished: [upload={}, provider={}, schema={}, '
'initiator={}]'.format(upload.id, upload.provider._id, upload.schema._id, upload.initiator._id))
@celery_app.task(name='api.providers.tasks.monitor_registration_bulk_upload_jobs')
def monitor_registration_bulk_upload_jobs(dry_run=True):
logger.info('Checking registration bulk upload jobs ...')
bulk_uploads = RegistrationBulkUploadJob.objects.filter(state=JobState.INITIALIZED)
logger.info('[{}] pending jobs found.'.format(len(bulk_uploads)))
for upload in bulk_uploads:
logger.info('Picked up job [upload={}, hash={}]'.format(upload.id, upload.payload_hash))
upload.state = JobState.PICKED_UP
bulk_create_registrations.delay(upload.id, dry_run=dry_run)
if not dry_run:
upload.save()
if dry_run:
logger.info('Dry run: bulk creation started in dry-run mode and job state was not updated')
logger.info('[{}] jobs have been picked up and kicked off. This monitor task ends.'.format(len(bulk_uploads)))
@celery_app.task()
def bulk_create_registrations(upload_id, dry_run=True):
try:
upload = RegistrationBulkUploadJob.objects.get(id=upload_id)
except RegistrationBulkUploadJob.DoesNotExist:
# This error should not happen since this task is only called by `monitor_registration_bulk_upload_jobs`
sentry.log_exception()
message = 'Registration bulk upload job not found: [id={}]'.format(upload_id)
return handle_internal_error(initiator=None, provider=None, message=message, dry_run=dry_run)
# Retrieve bulk upload job
provider = upload.provider
auto_approval = upload.provider.bulk_upload_auto_approval
schema = upload.schema
initiator = upload.initiator
logger.info(
'Bulk creating draft registrations: [provider={}, schema={}, initiator={}, auto_approval={}]'.format(
provider._id,
schema._id,
initiator._id,
auto_approval,
),
)
# Check registration rows and pick up them one by one to create draft registrations
registration_rows = RegistrationBulkUploadRow.objects.filter(upload__id=upload_id)
initial_row_count = len(registration_rows)
logger.info('Picked up [{}] registration rows for creation'.format(initial_row_count))
draft_error_list = [] # a list that stores rows that have failed the draft creation
approval_error_list = [] # a list that stores rows that have failed the approval process
successful_row_count = 0
for index, row in enumerate(registration_rows, 1):
logger.info('Processing registration row [{}: upload={}, row={}]'.format(index, upload.id, row.id))
row.is_picked_up = True
if not dry_run:
row.save()
try:
handle_registration_row(row, initiator, provider, schema, auto_approval=auto_approval, dry_run=dry_run)
successful_row_count += 1
except RegistrationBulkCreationRowError as e:
logger.error(e.long_message)
logger.error(e.error)
sentry.log_exception()
if auto_approval and e.approval_failure:
approval_error_list.append(e.short_message)
else:
draft_error_list.append(e.short_message)
if not dry_run:
if row.draft_registration:
row.draft_registration.delete()
elif e.draft_id:
DraftRegistration.objects.get(id=e.draft_id).delete()
row.delete()
else:
row.delete()
except Exception as e:
error = 'Bulk upload registration creation encountered an unexpected exception: ' \
'[row="{}", error="{}"]'.format(row.id, repr(e))
logger.error(error)
sentry.log_message(error)
sentry.log_exception()
draft_error_list.append('Title: N/A, External ID: N/A, Row Hash: {}, '
'Error: Unexpected'.format(row.row_hash))
if not dry_run:
if row.draft_registration:
row.draft_registration.delete()
else:
row.delete()
if len(draft_error_list) == initial_row_count:
upload.state = JobState.DONE_ERROR
message = 'All registration rows failed during bulk creation. ' \
'Upload ID: [{}], Draft Errors: [{}]'.format(upload_id, draft_error_list)
sentry.log_message(message)
logger.error(message)
elif len(draft_error_list) > 0 or len(approval_error_list) > 0:
upload.state = JobState.DONE_PARTIAL
message = 'Some registration rows failed during bulk creation. Upload ID: [{}]; Draft Errors: [{}]; ' \
'Approval Errors: [{}]'.format(upload_id, draft_error_list, approval_error_list)
sentry.log_message(message)
logger.warning(message)
else:
upload.state = JobState.DONE_FULL
logger.info('All registration rows succeeded for bulk creation. Upload ID: [{}].'.format(upload_id))
# Reverse the error lists so that users see failed rows in the same order as the original CSV
draft_error_list.reverse()
approval_error_list.reverse()
if not dry_run:
upload.save()
logger.info('Sending emails to initiator/uploader ...')
if upload.state == JobState.DONE_FULL:
mails.send_mail(
to_addr=initiator.username,
mail=mails.REGISTRATION_BULK_UPLOAD_SUCCESS_ALL,
fullname=initiator.fullname,
auto_approval=auto_approval,
count=initial_row_count,
pending_submissions_url=get_provider_submission_url(provider),
)
elif upload.state == JobState.DONE_PARTIAL:
mails.send_mail(
to_addr=initiator.username,
mail=mails.REGISTRATION_BULK_UPLOAD_SUCCESS_PARTIAL,
fullname=initiator.fullname,
auto_approval=auto_approval,
total=initial_row_count,
successes=successful_row_count,
draft_errors=draft_error_list,
failures=len(draft_error_list),
pending_submissions_url=get_provider_submission_url(provider),
osf_support_email=settings.OSF_SUPPORT_EMAIL,
)
elif upload.state == JobState.DONE_ERROR:
mails.send_mail(
to_addr=initiator.username,
mail=mails.REGISTRATION_BULK_UPLOAD_FAILURE_ALL,
fullname=initiator.fullname,
count=initial_row_count,
draft_errors=draft_error_list,
osf_support_email=settings.OSF_SUPPORT_EMAIL,
)
else:
message = 'Failed to send registration bulk upload outcome email due to invalid ' \
'upload state: [upload={}, state={}]'.format(upload.id, upload.state.name)
logger.error(message)
sentry.log_message(message)
logger.info('Email sent to bulk upload initiator [{}]'.format(initiator._id))
def handle_registration_row(row, initiator, provider, schema, auto_approval=False, dry_run=True):
"""Create a draft registration for one registration row in a given bulk upload job.
"""
metadata = row.csv_parsed.get('metadata', {}) or {}
row_external_id = metadata.get('External ID', 'N/A')
row_title = metadata.get('Title', 'N/A')
responses = row.csv_parsed.get('registration_responses', {}) or {}
auth = Auth(user=initiator)
# Check node
node = None
node_id = metadata.get('Project GUID')
if node_id:
try:
node = AbstractNode.objects.get(guids___id=node_id, is_deleted=False, type='osf.node')
initiator_contributor = node.contributor_set.get(user=initiator)
except AbstractNode.DoesNotExist:
error = 'Node does not exist: [node_id={}]'.format(node_id)
raise RegistrationBulkCreationRowError(row.upload.id, row.id, row_title, row_external_id, error=error)
except AbstractNode.MultipleObjectsReturned:
error = 'Multiple nodes returned: [node_id={}]'.format(node_id)
raise RegistrationBulkCreationRowError(row.upload.id, row.id, row_title, row_external_id, error=error)
except Contributor.DoesNotExist:
error = 'Initiator [{}] must be a contributor on the project [{}]'.format(initiator._id, node._id)
raise RegistrationBulkCreationRowError(row.upload.id, row.id, row_title, row_external_id, error=error)
if initiator_contributor.permission != ADMIN:
error = 'Initiator [{}] must have admin permission on the project [{}]'.format(initiator._id, node._id)
raise RegistrationBulkCreationRowError(row.upload.id, row.id, row_title, row_external_id, error=error)
# Prepare subjects
subject_texts = metadata.get('Subjects', []) or []
subject_ids = []
for text in subject_texts:
subject_list = provider.all_subjects.filter(text=text)
if not subject_list:
error = 'Subject not found: [text={}]'.format(text)
raise RegistrationBulkCreationRowError(row.upload.id, row.id, row_title, row_external_id, error=error)
if len(subject_list) > 1:
error = 'Duplicate subjects found: [text={}]'.format(text)
raise RegistrationBulkCreationRowError(row.upload.id, row.id, row_title, row_external_id, error=error)
subject_ids.append(subject_list.first()._id)
if len(subject_ids) == 0:
error = 'Missing subjects'
raise RegistrationBulkCreationRowError(row.upload.id, row.id, row_title, row_external_id, error=error)
# Prepare node licences
parsed_license = metadata.get('License', {}) or {}
license_name = parsed_license.get('name')
require_fields = parsed_license.get('required_fields', {}) or {}
year = require_fields.get('year')
copyright_holders = require_fields.get('copyright_holders')
try:
node_license = NodeLicense.objects.get(name=license_name)
except NodeLicense.DoesNotExist:
error = 'License not found: [license_name={}]'.format(license_name)
raise RegistrationBulkCreationRowError(row.upload.id, row.id, row_title, row_external_id, error=error)
node_license = {
'id': node_license.license_id,
}
if year and copyright_holders:
node_license.update({
'year': year,
'copyright_holders': copyright_holders,
})
# Prepare editable fields
data = {
'title': row_title,
'category': metadata.get('Category', ''),
'description': metadata.get('Description', ''),
'node_license': node_license,
}
# Prepare institutions
affiliated_institutions = []
institution_names = metadata.get('Affiliated Institutions', []) or []
for name in institution_names:
try:
institution = Institution.objects.get(name=name, is_deleted=False)
except Institution.DoesNotExist:
error = 'Institution not found: [name={}]'.format(name)
raise RegistrationBulkCreationRowError(row.upload.id, row.id, row_title, row_external_id, error=error)
if not initiator.is_affiliated_with_institution(institution):
error = 'Initiator [{}] is not affiliated with institution [{}]'.format(initiator._id, institution._id)
raise RegistrationBulkCreationRowError(row.upload.id, row.id, row_title, row_external_id, error=error)
affiliated_institutions.append(institution)
# Prepare tags
tags = metadata.get('Tags', [])
# Prepare contributors
admin_list = metadata.get('Admin Contributors', []) or []
if len(admin_list) == 0:
error = 'Missing admin contributors'
raise RegistrationBulkCreationRowError(row.upload.id, row.id, row_title, row_external_id, error=error)
admin_set = {contributor.get('email') for contributor in admin_list}
read_only_list = metadata.get('Read-Only Contributors', []) or []
read_only_set = {contributor.get('email') for contributor in read_only_list}
read_write_list = metadata.get('Read-Write Contributors', []) or []
read_write_set = {contributor.get('email') for contributor in read_write_list}
author_list = metadata.get('Bibliographic Contributors', []) or []
if len(author_list) == 0:
error = 'Missing bibliographic contributors'
raise RegistrationBulkCreationRowError(row.upload.id, row.id, row_title, row_external_id, error=error)
author_set = {contributor.get('email') for contributor in author_list} # Bibliographic contributors
contributor_list = admin_list + read_only_list + read_write_list
contributor_set = set.union(admin_set, read_only_set, read_write_set) # All contributors
if not author_set.issubset(contributor_set):
error = 'Bibliographic contributors must be one of admin, read-only or read-write'
raise RegistrationBulkCreationRowError(row.upload.id, row.id, row_title, row_external_id, error=error)
if dry_run:
logger.info('Dry run: no draft registration will be created.')
return
# Creating the draft registration
draft = None
try:
draft = DraftRegistration.create_from_node(
initiator,
schema,
node=node,
data=data,
provider=provider,
)
# Remove all contributors except the initiator if created from an existing node
if node:
# Temporarily make initiator contributor visible so that removal of the others can succeed.
initiator_contributor = draft.contributor_set.get(user=initiator)
if not initiator_contributor.visible:
initiator_contributor.visible = True
initiator_contributor.save()
contributor_set = draft.contributor_set.all()
for contributor in contributor_set:
if initiator != contributor.user:
is_removed = draft.remove_contributor(contributor, auth)
assert is_removed, 'Removal of an non-initiator contributor from the draft has failed'
draft.save()
assert len(draft.contributor_set.all()) == 1, 'Draft should only have one contributor upon creation.'
# Remove the initiator from the citation list
# TODO: only remove the initiator form the citation list for certain providers
initiator_contributor = draft.contributor_set.get(user=initiator)
initiator_contributor.visible = False
initiator_contributor.save()
row.draft_registration = draft
row.save()
except Exception as e:
# If the draft has been created already but failure happens before it is related to the registration row,
# provide the draft id to the exception object for the caller to delete it after the exception is caught.
draft_id = draft.id if draft else None
raise RegistrationBulkCreationRowError(
row.upload.id, row.id, row_title, row_external_id,
draft_id=draft_id, error=repr(e),
)
# Set subjects
# TODO: if available, capture specific exceptions during setting subject
draft.set_subjects_from_relationships(subject_ids, auth)
# Set affiliated institutions
for institution in affiliated_institutions:
try:
draft.add_affiliated_institution(institution, initiator)
except UserNotAffiliatedError:
error = 'Initiator [{}] is not affiliated with institution [{}]'.format(initiator._id, institution._id)
raise RegistrationBulkCreationRowError(row.upload.id, row.id, row_title, row_external_id, error=error)
# Validate and set registration responses
try:
draft.update_registration_responses(responses)
except Exception as e:
error = f'Fail to update registration responses: {repr(e)}'
raise RegistrationBulkCreationRowError(row.upload.id, row.id, row_title, row_external_id, error=error)
# Set tags
# TODO: if available, capture specific exceptions during setting tags
draft.update_tags(tags, auth=auth)
# Set contributors
for contributor in contributor_list:
email = contributor.get('email')
full_name = contributor.get('full_name')
if not email or not full_name:
error = 'Invalid contributor format: missing email and/or full name'
raise RegistrationBulkCreationRowError(row.upload.id, row.id, row_title, row_external_id, error=error)
bibliographic = email in author_set
permission = ADMIN if email in admin_set else (WRITE if email in read_write_set else READ)
try:
draft.add_contributor_registered_or_not(
auth, full_name=full_name, email=email,
permissions=permission, bibliographic=bibliographic,
)
except ValidationValueError as e:
# `add_contributor_registered_or_not` throws several ValidationError / ValidationValueError that
# needs to be treated differently.
if e.message.endswith(' is already a contributor.'):
logger.warning('Contributor already exists: [{}]'.format(email))
else:
error = 'This contributor cannot be added: [email="{}", error="{}"]'.format(email, e.message)
raise RegistrationBulkCreationRowError(row.upload.id, row.id, row_title, row_external_id, error=error)
except UserStateError as e:
error = 'This contributor cannot be added: [email="{}", error="{}"]'.format(email, repr(e))
raise RegistrationBulkCreationRowError(row.upload.id, row.id, row_title, row_external_id, error=error)
draft.save()
row.is_completed = True
row.save()
logger.info('Draft registration created: [{}]'.format(row.draft_registration._id))
# Register the draft
# TODO: figure out why `draft.validate_metadata()` fails
try:
registration = row.draft_registration.register(auth, save=True)
except NodeStateError as e:
error = f'Fail to register draft: {repr(e)}'
raise RegistrationBulkCreationRowError(row.upload.id, row.id, row_title, row_external_id, error=error)
except Exception as e:
raise RegistrationBulkCreationRowError(row.upload.id, row.id, row_title, row_external_id, error=repr(e))
logger.info('Registration [{}] created from draft [{}]'.format(registration._id, row.draft_registration._id))
# Requires approval
try:
registration.require_approval(initiator)
except NodeStateError as e:
error = f'Fail to require approval: {repr(e)}'
raise RegistrationBulkCreationRowError(row.upload.id, row.id, row_title, row_external_id, error=error)
except Exception as e:
raise RegistrationBulkCreationRowError(row.upload.id, row.id, row_title, row_external_id, error=repr(e))
logger.info('Approval required for registration [{}]'.format(registration._id))
# Once draft registration and registrations have been created, bulk creation of this row is considered completed.
# Any error that happens during `registration.sanction.accept()` doesn't affect the state of upload job and the
# registration row.
if auto_approval:
logger.info('Provider [{}] has enabled auto approval.'.format(provider._id))
try:
registration.sanction.accept()
except Exception as e:
raise RegistrationBulkCreationRowError(
row.upload.id, row.id, row_title, row_external_id,
error=repr(e), approval_failure=True,
)
logger.info('Registration approved but pending moderation: [{}]'.format(registration._id))
def handle_internal_error(initiator=None, provider=None, message=None, dry_run=True):
"""Log errors that happened due to unexpected bug and send emails the uploader (if available)
about failures. Product owner (if available) is informed as well with more details. Emails are
not sent during dry run.
"""
if not message:
message = 'Registration bulk upload failure'
logger.error(message)
sentry.log_message(message)
if not dry_run:
if initiator:
mails.send_mail(
to_addr=initiator.username,
mail=mails.REGISTRATION_BULK_UPLOAD_UNEXPECTED_FAILURE,
fullname=initiator.fullname,
osf_support_email=settings.OSF_SUPPORT_EMAIL,
)
inform_product_of_errors(initiator=initiator, provider=provider, message=message)
def inform_product_of_errors(initiator=None, provider=None, message=None):
"""Inform product owner of internal errors.
"""
email = settings.PRODUCT_OWNER_EMAIL_ADDRESS.get('Registration')
if not email:
logger.warning('Missing email for OSF Registration product owner.')
return
if not message:
message = 'Bulk upload preparation failure'
user = f'{initiator._id}, {initiator.fullname}, {initiator.username}' if initiator else 'UNIDENTIFIED'
provider_name = provider.name if provider else 'UNIDENTIFIED'
mails.send_mail(
to_addr=email,
mail=mails.REGISTRATION_BULK_UPLOAD_PRODUCT_OWNER,
message=message,
user=user,
provider_name=provider_name,
)
def get_provider_submission_url(provider):
"""Return the submission URL for a given registration provider
"""
return f'{settings.DOMAIN}registries/{provider._id}/moderation/submissions/'
|
|
import copy
import numpy as np
import unittest
import ray
from ray.rllib.agents.callbacks import DefaultCallbacks
import ray.rllib.agents.ppo as ppo
from ray.rllib.agents.ppo.ppo_tf_policy import (
ppo_surrogate_loss as ppo_surrogate_loss_tf,
)
from ray.rllib.agents.ppo.ppo_torch_policy import PPOTorchPolicy
from ray.rllib.evaluation.postprocessing import (
compute_gae_for_sample_batch,
Postprocessing,
)
from ray.rllib.models.tf.tf_action_dist import Categorical
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.models.torch.torch_action_dist import TorchCategorical
from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID, SampleBatch
from ray.rllib.utils.metrics.learner_info import LEARNER_INFO, LEARNER_STATS_KEY
from ray.rllib.utils.numpy import fc
from ray.rllib.utils.test_utils import (
check,
check_compute_single_action,
check_train_results,
framework_iterator,
)
# Fake CartPole episode of n time steps.
FAKE_BATCH = SampleBatch(
{
SampleBatch.OBS: np.array(
[[0.1, 0.2, 0.3, 0.4], [0.5, 0.6, 0.7, 0.8], [0.9, 1.0, 1.1, 1.2]],
dtype=np.float32,
),
SampleBatch.ACTIONS: np.array([0, 1, 1]),
SampleBatch.PREV_ACTIONS: np.array([0, 1, 1]),
SampleBatch.REWARDS: np.array([1.0, -1.0, 0.5], dtype=np.float32),
SampleBatch.PREV_REWARDS: np.array([1.0, -1.0, 0.5], dtype=np.float32),
SampleBatch.DONES: np.array([False, False, True]),
SampleBatch.VF_PREDS: np.array([0.5, 0.6, 0.7], dtype=np.float32),
SampleBatch.ACTION_DIST_INPUTS: np.array(
[[-2.0, 0.5], [-3.0, -0.3], [-0.1, 2.5]], dtype=np.float32
),
SampleBatch.ACTION_LOGP: np.array([-0.5, -0.1, -0.2], dtype=np.float32),
SampleBatch.EPS_ID: np.array([0, 0, 0]),
SampleBatch.AGENT_INDEX: np.array([0, 0, 0]),
}
)
class MyCallbacks(DefaultCallbacks):
@staticmethod
def _check_lr_torch(policy, policy_id):
for j, opt in enumerate(policy._optimizers):
for p in opt.param_groups:
assert p["lr"] == policy.cur_lr, "LR scheduling error!"
@staticmethod
def _check_lr_tf(policy, policy_id):
lr = policy.cur_lr
sess = policy.get_session()
if sess:
lr = sess.run(lr)
optim_lr = sess.run(policy._optimizer._lr)
else:
lr = lr.numpy()
optim_lr = policy._optimizer.lr.numpy()
assert lr == optim_lr, "LR scheduling error!"
def on_train_result(self, *, trainer, result: dict, **kwargs):
stats = result["info"][LEARNER_INFO][DEFAULT_POLICY_ID][LEARNER_STATS_KEY]
# Learning rate should go to 0 after 1 iter.
check(stats["cur_lr"], 5e-5 if trainer.iteration == 1 else 0.0)
# Entropy coeff goes to 0.05, then 0.0 (per iter).
check(stats["entropy_coeff"], 0.1 if trainer.iteration == 1 else 0.05)
trainer.workers.foreach_policy(
self._check_lr_torch
if trainer.config["framework"] == "torch"
else self._check_lr_tf
)
class TestPPO(unittest.TestCase):
@classmethod
def setUpClass(cls):
ray.init()
@classmethod
def tearDownClass(cls):
ray.shutdown()
def test_ppo_compilation_and_schedule_mixins(self):
"""Test whether a PPOTrainer can be built with all frameworks."""
config = copy.deepcopy(ppo.DEFAULT_CONFIG)
# For checking lr-schedule correctness.
config["callbacks"] = MyCallbacks
config["num_workers"] = 1
config["num_sgd_iter"] = 2
# Settings in case we use an LSTM.
config["model"]["lstm_cell_size"] = 10
config["model"]["max_seq_len"] = 20
# Use default-native keras models whenever possible.
# config["model"]["_use_default_native_models"] = True
# Setup lr- and entropy schedules for testing.
config["lr_schedule"] = [[0, config["lr"]], [128, 0.0]]
# Set entropy_coeff to a faulty value to proof that it'll get
# overridden by the schedule below (which is expected).
config["entropy_coeff"] = 100.0
config["entropy_coeff_schedule"] = [[0, 0.1], [256, 0.0]]
config["train_batch_size"] = 128
# Test with compression.
config["compress_observations"] = True
num_iterations = 2
for fw in framework_iterator(config, with_eager_tracing=True):
for env in ["FrozenLake-v1", "MsPacmanNoFrameskip-v4"]:
print("Env={}".format(env))
for lstm in [True, False]:
print("LSTM={}".format(lstm))
config["model"]["use_lstm"] = lstm
config["model"]["lstm_use_prev_action"] = lstm
config["model"]["lstm_use_prev_reward"] = lstm
trainer = ppo.PPOTrainer(config=config, env=env)
policy = trainer.get_policy()
entropy_coeff = trainer.get_policy().entropy_coeff
lr = policy.cur_lr
if fw == "tf":
entropy_coeff, lr = policy.get_session().run(
[entropy_coeff, lr]
)
check(entropy_coeff, 0.1)
check(lr, config["lr"])
for i in range(num_iterations):
results = trainer.train()
check_train_results(results)
print(results)
check_compute_single_action(
trainer, include_prev_action_reward=True, include_state=lstm
)
trainer.stop()
def test_ppo_exploration_setup(self):
"""Tests, whether PPO runs with different exploration setups."""
config = copy.deepcopy(ppo.DEFAULT_CONFIG)
config["num_workers"] = 0 # Run locally.
config["env_config"] = {"is_slippery": False, "map_name": "4x4"}
obs = np.array(0)
# Test against all frameworks.
for fw in framework_iterator(config):
# Default Agent should be setup with StochasticSampling.
trainer = ppo.PPOTrainer(config=config, env="FrozenLake-v1")
# explore=False, always expect the same (deterministic) action.
a_ = trainer.compute_single_action(
obs, explore=False, prev_action=np.array(2), prev_reward=np.array(1.0)
)
# Test whether this is really the argmax action over the logits.
if fw != "tf":
last_out = trainer.get_policy().model.last_output()
if fw == "torch":
check(a_, np.argmax(last_out.detach().cpu().numpy(), 1)[0])
else:
check(a_, np.argmax(last_out.numpy(), 1)[0])
for _ in range(50):
a = trainer.compute_single_action(
obs,
explore=False,
prev_action=np.array(2),
prev_reward=np.array(1.0),
)
check(a, a_)
# With explore=True (default), expect stochastic actions.
actions = []
for _ in range(300):
actions.append(
trainer.compute_single_action(
obs, prev_action=np.array(2), prev_reward=np.array(1.0)
)
)
check(np.mean(actions), 1.5, atol=0.2)
trainer.stop()
def test_ppo_free_log_std(self):
"""Tests the free log std option works."""
config = copy.deepcopy(ppo.DEFAULT_CONFIG)
config["num_workers"] = 0 # Run locally.
config["gamma"] = 0.99
config["model"]["fcnet_hiddens"] = [10]
config["model"]["fcnet_activation"] = "linear"
config["model"]["free_log_std"] = True
config["model"]["vf_share_layers"] = True
for fw, sess in framework_iterator(config, session=True):
trainer = ppo.PPOTrainer(config=config, env="CartPole-v0")
policy = trainer.get_policy()
# Check the free log std var is created.
if fw == "torch":
matching = [
v for (n, v) in policy.model.named_parameters() if "log_std" in n
]
else:
matching = [
v for v in policy.model.trainable_variables() if "log_std" in str(v)
]
assert len(matching) == 1, matching
log_std_var = matching[0]
def get_value():
if fw == "tf":
return policy.get_session().run(log_std_var)[0]
elif fw == "torch":
return log_std_var.detach().cpu().numpy()[0]
else:
return log_std_var.numpy()[0]
# Check the variable is initially zero.
init_std = get_value()
assert init_std == 0.0, init_std
batch = compute_gae_for_sample_batch(policy, FAKE_BATCH.copy())
if fw == "torch":
batch = policy._lazy_tensor_dict(batch)
policy.learn_on_batch(batch)
# Check the variable is updated.
post_std = get_value()
assert post_std != 0.0, post_std
trainer.stop()
def test_ppo_loss_function(self):
"""Tests the PPO loss function math."""
config = copy.deepcopy(ppo.DEFAULT_CONFIG)
config["num_workers"] = 0 # Run locally.
config["gamma"] = 0.99
config["model"]["fcnet_hiddens"] = [10]
config["model"]["fcnet_activation"] = "linear"
config["model"]["vf_share_layers"] = True
for fw, sess in framework_iterator(config, session=True):
trainer = ppo.PPOTrainer(config=config, env="CartPole-v0")
policy = trainer.get_policy()
# Check no free log std var by default.
if fw == "torch":
matching = [
v for (n, v) in policy.model.named_parameters() if "log_std" in n
]
else:
matching = [
v for v in policy.model.trainable_variables() if "log_std" in str(v)
]
assert len(matching) == 0, matching
# Post-process (calculate simple (non-GAE) advantages) and attach
# to train_batch dict.
# A = [0.99^2 * 0.5 + 0.99 * -1.0 + 1.0, 0.99 * 0.5 - 1.0, 0.5] =
# [0.50005, -0.505, 0.5]
train_batch = compute_gae_for_sample_batch(policy, FAKE_BATCH.copy())
if fw == "torch":
train_batch = policy._lazy_tensor_dict(train_batch)
# Check Advantage values.
check(train_batch[Postprocessing.VALUE_TARGETS], [0.50005, -0.505, 0.5])
# Calculate actual PPO loss.
if fw in ["tf2", "tfe"]:
ppo_surrogate_loss_tf(policy, policy.model, Categorical, train_batch)
elif fw == "torch":
PPOTorchPolicy.loss(
policy, policy.model, policy.dist_class, train_batch
)
vars = (
policy.model.variables()
if fw != "torch"
else list(policy.model.parameters())
)
if fw == "tf":
vars = policy.get_session().run(vars)
expected_shared_out = fc(
train_batch[SampleBatch.CUR_OBS],
vars[0 if fw != "torch" else 2],
vars[1 if fw != "torch" else 3],
framework=fw,
)
expected_logits = fc(
expected_shared_out,
vars[2 if fw != "torch" else 0],
vars[3 if fw != "torch" else 1],
framework=fw,
)
expected_value_outs = fc(
expected_shared_out, vars[4], vars[5], framework=fw
)
kl, entropy, pg_loss, vf_loss, overall_loss = self._ppo_loss_helper(
policy,
policy.model,
Categorical if fw != "torch" else TorchCategorical,
train_batch,
expected_logits,
expected_value_outs,
sess=sess,
)
if sess:
policy_sess = policy.get_session()
k, e, pl, v, tl = policy_sess.run(
[
policy._mean_kl_loss,
policy._mean_entropy,
policy._mean_policy_loss,
policy._mean_vf_loss,
policy._total_loss,
],
feed_dict=policy._get_loss_inputs_dict(train_batch, shuffle=False),
)
check(k, kl)
check(e, entropy)
check(pl, np.mean(-pg_loss))
check(v, np.mean(vf_loss), decimals=4)
check(tl, overall_loss, decimals=4)
elif fw == "torch":
check(policy.model.tower_stats["mean_kl_loss"], kl)
check(policy.model.tower_stats["mean_entropy"], entropy)
check(policy.model.tower_stats["mean_policy_loss"], np.mean(-pg_loss))
check(
policy.model.tower_stats["mean_vf_loss"],
np.mean(vf_loss),
decimals=4,
)
check(policy.model.tower_stats["total_loss"], overall_loss, decimals=4)
else:
check(policy._mean_kl_loss, kl)
check(policy._mean_entropy, entropy)
check(policy._mean_policy_loss, np.mean(-pg_loss))
check(policy._mean_vf_loss, np.mean(vf_loss), decimals=4)
check(policy._total_loss, overall_loss, decimals=4)
trainer.stop()
def _ppo_loss_helper(
self, policy, model, dist_class, train_batch, logits, vf_outs, sess=None
):
"""
Calculates the expected PPO loss (components) given Policy,
Model, distribution, some batch, logits & vf outputs, using numpy.
"""
# Calculate expected PPO loss results.
dist = dist_class(logits, policy.model)
dist_prev = dist_class(
train_batch[SampleBatch.ACTION_DIST_INPUTS], policy.model
)
expected_logp = dist.logp(train_batch[SampleBatch.ACTIONS])
if isinstance(model, TorchModelV2):
train_batch.set_get_interceptor(None)
expected_rho = np.exp(
expected_logp.detach().cpu().numpy()
- train_batch[SampleBatch.ACTION_LOGP]
)
# KL(prev vs current action dist)-loss component.
kl = np.mean(dist_prev.kl(dist).detach().cpu().numpy())
# Entropy-loss component.
entropy = np.mean(dist.entropy().detach().cpu().numpy())
else:
if sess:
expected_logp = sess.run(expected_logp)
expected_rho = np.exp(expected_logp - train_batch[SampleBatch.ACTION_LOGP])
# KL(prev vs current action dist)-loss component.
kl = dist_prev.kl(dist)
if sess:
kl = sess.run(kl)
kl = np.mean(kl)
# Entropy-loss component.
entropy = dist.entropy()
if sess:
entropy = sess.run(entropy)
entropy = np.mean(entropy)
# Policy loss component.
pg_loss = np.minimum(
train_batch[Postprocessing.ADVANTAGES] * expected_rho,
train_batch[Postprocessing.ADVANTAGES]
* np.clip(
expected_rho,
1 - policy.config["clip_param"],
1 + policy.config["clip_param"],
),
)
# Value function loss component.
vf_loss1 = np.power(vf_outs - train_batch[Postprocessing.VALUE_TARGETS], 2.0)
vf_clipped = train_batch[SampleBatch.VF_PREDS] + np.clip(
vf_outs - train_batch[SampleBatch.VF_PREDS],
-policy.config["vf_clip_param"],
policy.config["vf_clip_param"],
)
vf_loss2 = np.power(vf_clipped - train_batch[Postprocessing.VALUE_TARGETS], 2.0)
vf_loss = np.maximum(vf_loss1, vf_loss2)
# Overall loss.
if sess:
policy_sess = policy.get_session()
kl_coeff, entropy_coeff = policy_sess.run(
[policy.kl_coeff, policy.entropy_coeff]
)
else:
kl_coeff, entropy_coeff = policy.kl_coeff, policy.entropy_coeff
overall_loss = np.mean(
-pg_loss
+ kl_coeff * kl
+ policy.config["vf_loss_coeff"] * vf_loss
- entropy_coeff * entropy
)
return kl, entropy, pg_loss, vf_loss, overall_loss
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
|
#!/usr/bin/env python
# coding:utf-8
"""
This file manage the ssl connection dispatcher
Include http/1.1 and http/2 workers.
create ssl socket, then run worker on ssl.
if ssl suppport http/2, run http/2 worker.
provide simple https request block api.
caller don't need to known ip/ssl/http2/appid.
performance:
get the fastest worker to process the request.
sorted by rtt and pipeline task on load.
"""
import os
import time
import threading
import operator
import Queue
import traceback
from utils import SimpleCondition
from appids_manager import appid_manager
import connect_control
from connect_manager import https_manager
from http1 import HTTP1_worker
from http2_connection import HTTP2_worker
import http_common
from xlog import getLogger
xlog = getLogger("gae_proxy")
current_path = os.path.dirname(os.path.abspath(__file__))
g_cacertfile = os.path.join(current_path, "cacert.pem")
class HttpsDispatcher(object):
idle_time = 20 * 60
def __init__(self):
self.request_queue = Queue.Queue()
self.workers = []
self.working_tasks = {}
self.h1_num = 0
self.h2_num = 0
self.create_worker_th = None
self.last_request_time = time.time()
self.triger_create_worker_cv = SimpleCondition()
self.wait_a_worker_cv = SimpleCondition()
threading.Thread(target=self.dispatcher).start()
threading.Thread(target=self.create_worker_thread).start()
# move created ssl to worker after ssl timeout
https_manager.set_ssl_time_handler(self.on_ssl_created_cb)
def on_ssl_created_cb(self, ssl_sock, check_free_worke=True):
if not ssl_sock:
raise Exception("on_ssl_created_cb ssl_sock None")
appid = appid_manager.get_appid()
if not appid:
time.sleep(60)
ssl_sock.close()
raise http_common.GAE_Exception(1, "no appid can use")
ssl_sock.appid = appid
ssl_sock.host = ssl_sock.appid + ".appspot.com"
if ssl_sock.h2:
worker = HTTP2_worker(ssl_sock, self.close_cb, self.retry_task_cb, self._on_worker_idle_cb, self.log_debug_data)
self.h2_num += 1
else:
worker = HTTP1_worker(ssl_sock, self.close_cb, self.retry_task_cb, self._on_worker_idle_cb, self.log_debug_data)
self.h1_num += 1
self.workers.append(worker)
self.wait_a_worker_cv.notify()
if check_free_worke:
self.check_free_worker()
def log_debug_data(self, rtt, sent, received):
pass
def _on_worker_idle_cb(self):
self.wait_a_worker_cv.notify()
def create_worker_thread(self):
while connect_control.keep_running:
try:
ssl_sock = https_manager.get_ssl_connection()
except Exception as e:
continue
if not ssl_sock:
# xlog.warn("create_worker_thread get ssl_sock fail")
continue
try:
self.on_ssl_created_cb(ssl_sock, check_free_worke=False)
except:
time.sleep(10)
idle_num = 0
acceptable_num = 0
for worker in self.workers:
if worker.accept_task:
acceptable_num += 1
if worker.version == "1.1":
if worker.accept_task:
idle_num += 1
else:
if len(worker.streams) == 0:
idle_num += 1
if idle_num > 5 and acceptable_num > 20:
self.triger_create_worker_cv.wait()
def get_worker(self, nowait=False):
while connect_control.keep_running:
best_rtt = 9999
best_worker = None
idle_num = 0
for worker in self.workers:
if not worker.accept_task:
continue
if worker.version == "1.1":
idle_num += 1
else:
if len(worker.streams) == 0:
idle_num += 1
rtt = worker.get_score()
if rtt < best_rtt:
best_rtt = rtt
best_worker = worker
if idle_num < 5 or best_rtt > 1000:
self.triger_create_worker_cv.notify()
if best_worker or nowait:
return best_worker
self.wait_a_worker_cv.wait()
def check_free_worker(self):
# close slowest worker,
# give change for better worker
while True:
slowest_rtt = 9999
slowest_worker = None
idle_num = 0
for worker in self.workers:
if not worker.accept_task:
continue
if worker.version == "2" and len(worker.streams) > 0:
continue
idle_num += 1
rtt = worker.get_score()
if rtt > slowest_rtt:
slowest_rtt = rtt
slowest_worker = worker
if idle_num < 30 or idle_num < int(len(self.workers) * 0.3):
return
if slowest_worker is None:
return
self.close_cb(slowest_worker)
def request(self, headers, body, url, timeout):
# xlog.debug("task start request:%s timeout:%d", url, timeout)
self.last_request_time = time.time()
q = Queue.Queue()
task = http_common.Task(headers, body, q, url, timeout)
task.set_state("start_request")
self.request_queue.put(task)
response = q.get(True)
task.set_state("get_response")
return response
def retry_task_cb(self, task):
if task.responsed:
xlog.warn("retry but responsed. %s", task.url)
st = traceback.extract_stack()
stl = traceback.format_list(st)
xlog.warn("stack:%r", repr(stl))
task.put_data("")
return
if task.retry_count > 10:
task.response_fail("retry time exceed 10")
return
if time.time() - task.start_time > 240:
task.response_fail("retry timeout:%d" % (time.time() - task.start_time))
return
task.set_state("retry")
task.retry_count += 1
self.request_queue.put(task)
def dispatcher(self):
while connect_control.keep_running:
try:
task = self.request_queue.get(True)
if task is None:
# exit
break
except Exception as e:
xlog.exception("http_dispatcher dispatcher request_queue.get fail:%r", e)
continue
task.set_state("get_task")
try:
worker = self.get_worker()
except Exception as e:
xlog.warn("get worker fail:%r", e)
task.response_fail(reason="get worker fail:%r" % e)
continue
if worker is None:
# can send because exit.
xlog.warn("http_dispatcher get None worker")
continue
task.set_state("get_worker:%s" % worker.ip)
try:
worker.request(task)
except Exception as e:
xlog.exception("dispatch request:%r", e)
# wait up threads to exit.
self.wait_a_worker_cv.notify()
self.triger_create_worker_cv.notify()
def is_idle(self):
return time.time() - self.last_request_time > self.idle_time
def close_cb(self, worker):
try:
self.workers.remove(worker)
if worker.version == "2":
self.h2_num -= 1
else:
self.h1_num -= 1
except:
pass
def close_all_worker(self):
for w in self.workers:
w.close("close all worker")
self.workers = []
self.h1_num = 0
self.h2_num = 0
def to_string(self):
worker_rate = {}
for w in self.workers:
worker_rate[w] = w.get_score()
w_r = sorted(worker_rate.items(), key=operator.itemgetter(1))
out_str = 'thread num:%d\r\n' % threading.activeCount()
for w, r in w_r:
out_str += "%s rtt:%d a:%d live:%d processed:%d" % \
(w.ip, w.rtt, w.accept_task, (time.time()-w.ssl_sock.create_time), w.processed_tasks)
if w.version == "2":
out_str += " streams:%d ping_on_way:%d\r\n" % (len(w.streams), w.ping_on_way)
out_str += " Speed:"
for speed in w.speed_history:
out_str += "%d," % speed
out_str += "\r\n"
out_str += "\r\n<br> working_tasks:\r\n"
for unique_id in self.working_tasks:
task = self.working_tasks[unique_id]
out_str += task.to_string()
return out_str
http_dispatch = HttpsDispatcher()
|
|
#File used for passing all parameters of text to speech script in the command line
#Arguments in order of appearance on the command line:
#1: User input of text to be synthesized
#2: Voice ID, male or female (1 = male, 0 = female)
#3: Stream or Download (1 = stream, 0 = download)
# (At this point, if stream is chosen, there are no more args)
#4: Audio format, 1 = wav, 2 = ogg, 3 = vox
#5: filename, no extension used
#6: filepath, simple name and path of directory
#Example command line statement:
# This command would stream the word 'hello' in english with a female voice
# $ python3 command.py "*English hello" 0 1
# This command downloads a vox file that says 'how are you' in english with a
# male voice, saved as voxfiles/example.vox
# $ python3 command.py "*English how are you" 1 0 3 "example" "voxfiles"
import logging
from logging.handlers import RotatingFileHandler
from watson import Watson
import requests
import os
import datetime
import subprocess
import sys
argList = sys.argv
#parameters for authorization and audio format
URL = 'WATSON_URL'
PASSWORD = 'WATSON_PASSWORD'
USERNAME = 'WATSON_USERNAME'
CHUNK_SIZE = 1024
#Information for logger
MEGABYTE = 1000000 #number of bytes in a megabyte
now = datetime.datetime.now() #current time
#parameters for database connection
DB_DRIVER = "{SQL Server}"
DB_HOST = "SERVER_HOST"
DB_NAME = "SERVER_NAME"
DB_USER = "SERVER_USER"
DB_PASSWORD = "SERVER_PASSWORD"
#method for making a rotating log
#REQUIRES: Path is valid
#MODIFIES: Log based on byte size and period of time
#EFFECTS: Creates multiple logs, as well as deleting them after 30 days
def createRotatingLog(path):
#initiates logging session
Logger = logging.getLogger("TTS")
Logger.setLevel(logging.DEBUG)
#defines handler for byte size
#will roll over after 100 mb, will max out at 10 backup files
sizeHandler = RotatingFileHandler(path, maxBytes=(MEGABYTE * 100),
backupCount=10)
fmtr = logging.Formatter('%(asctime)s %(levelname)s %(message)s',
datefmt='%H:%M:%S')
sizeHandler.setFormatter(fmtr)
sizeHandler.setLevel(logging.DEBUG)
Logger.addHandler(sizeHandler)
return Logger
#Bool method to assert whether the string is intended to return
#True or False
def yesOrNo(string):
if string == '1':
return True
if string == '0':
return False
#Bool method that shows the input is valid (a 1 or a 0)
def validBool(string):
if string == '1' or string == '0':
return True
else:
return False
#Bool method that shows the filename does not contain bad characters
def validFilename(string):
for c in string:
if c == ':' or c == '.':
return False
return True
#method to request a text phrase to synthesize voice
def getPhrase(Logger, phrase):
#checks for empty input
if phrase == '':
Logger.warning("No text input")
if len(phrase) < 2:
Logger.warning("Not enough text to synthesize")
return phrase
#method to request a voiceID yes or no answer
def getVoiceID(Logger, voiceIDBool):
if not validBool(voiceIDBool):
Logger.warning("Invalid input for VoiceID: %s" % voiceIDBool)
if yesOrNo(voiceIDBool):
voiceID = 'en-US_MichaelVoice'
else:
voiceID = 'en-US_AllisonVoice'
return voiceID
#method to check if user wants to stream or download
#returns true or false
def isStream(Logger, streamBool):
#stream input (determines whether code runs stream() or download())
if not validBool(streamBool):
Logger.warning("Invalid input for streamBool: %s" % streamBool)
if yesOrNo(streamBool):
return True
else:
return False
#method to receive format of audio from user
#also recieves if the file is to be converted into vox
#returns a dictionary, in the format of (accept, voxBool)
def getFormat(Logger, formatBool):
fInt = int(formatBool)
if fInt != 1 or fInt != 2 or fInt != 3:
Logger.warning("Invalid input for formatBool: %s" % formatBool)
#adjusts the accept variable based on response
if fInt == 1:
accept = "audio/wav"
Logger.info("File type: .wav")
voxBool = False
elif fInt == 2:
accept = "audio/ogg;codecs=opus"
Logger.info("File type: .ogg")
voxBool = False
elif fInt == 3:
accept = "audio/ogg;codecs=opus"
Logger.info("File type: .ogg")
voxBool = True
return {'accept':accept, 'voxBool':voxBool}
#method to receive filename from user
def getFilename(Logger, filename):
#filename and location input
if not validFilename(filename):
Logger.warning("Invalid input for filename: %s" % filename)
#logs filename
Logger.info("Filename: %s" % filename)
return filename
#method to receive filepath from user
def getPath(Logger, location):
#asserts that the path exists
if not os.path.isdir(location):
Logger.warning("Directory in path does not exist: %s" % location)
return location
#method to initially convert ogg file to wav
def convertToWav(filename):
#strips ogg extension and attaches .wav
wavName = filename[:-4] + '.wav'
#creates command line for ffmpeg
command = ["ffmpeg", "-i", filename, wavName]
#ffmpeg is a service for command line conversion
#used specifically because it ignores bad header information (Watson wav files)
#called through subprocess to return converted file
subprocess.call(command, shell=True)
#removes ogg file
os.remove(filename)
#returns string name reference to the wavfile
return wavName
#method to convert a wav file to a vox file, provided full path
def convertToVox(filename, voxName):
#creates command for vcecopy, another command line executable
#vcecopy handles wav -> vox conversion
command = [r"copyfiles\vcecopy", filename, voxName]
subprocess.call(command, shell=True)
#removes wav file
os.remove(filename)
#method to convert ogg file to vox
#ties together methods above to create a single command conversion
def fullConvert(stringList):
#with only one element in the list, conversion is simple
#extract filename, end with vox, convert
if len(stringList) == 1:
#takes first and only element from the list
for string in stringList:
filepath = string[0]
filename = string[1]
fullPath = filepath + '\\' + filename + '.ogg'
#wavPath is the filepath to the newly converted file, ogg->wav
wavPath = convertToWav(fullPath)
#voxName is the new file for conversion, removes '.wav'
#and replaces it with '.vox', so the file will still have the user's
#desired name choice
voxPath = fullPath[:-4] + '.vox'
#end conversion of wav->vox
convertToVox(wavPath, voxPath)
#else clause for the event of merging multiple files
else:
for string in stringList:
filepath = string[0]
filename = string[1]
fullPath = filepath + '\\' + filename + '.ogg'
wavPath = convertToWav(fullPath)
#removes the .ogg extension as well as the numeric identifier
#that organizes the ogg/wav files.
#each file will be subsequently converted to the same vox name
#merging the files in the process
voxPath = fullPath[:-5] + '.vox'
convertToVox(wavPath, voxPath)
def main():
argv = sys.argv
#creates the log session
Logger = createRotatingLog("TTS.log")
Logger.info("* File session started *")
#disable warnings for requests library
requests.packages.urllib3.disable_warnings()
#empty variables to be used as parameters for download()
userInput = ''
filename = ''
location = ''
accept = 'audio/wav'
voiceID = ''
#main function, loops until user types quit
#phrase input
userInput = getPhrase(Logger, argv[1])
#breaks loop
#voiceID input (bool conversion to string)
voiceID = getVoiceID(Logger, argv[2])
if isStream(Logger, argv[3]):
Logger.info("Output: Stream.")
#creates watson object, wav is default for stream
watson = Watson(USERNAME, PASSWORD, voiceID,
URL, CHUNK_SIZE, 'audio/wav')
watson.playFiles(userInput)
#Request ID placeholder
Logger.info("Request ID: 375832948 (placeholder)")
Logger.info("Stream successful.")
else:
#audio format input
#returns a short dictionary
audioFormat = getFormat(Logger, argv[4])
#filename and location input
filename = getFilename(Logger, argv[5])
location = getPath(Logger, argv[6])
#creates watson object
watson = Watson(USERNAME, PASSWORD, voiceID,
URL, CHUNK_SIZE, audioFormat['accept'])
#writes files
fileList = watson.writeFiles(userInput, filename, location)
if audioFormat['voxBool']:
fullConvert(fileList)
Logger.info("Vox filed created.")
Logger.info("Request ID: 375832948 (placeholder)")
print("Audio file saved.")
Logger.info("Download successful.")
#Indicates end of logging session, adds space between sessions
Logger.info("* File session ended *\n\n")
#runs main function
if __name__ == "__main__":
main()
|
|
"""
Unified interface for performing file system tasks. Uses os, os.path. shutil
and distutil to perform the tasks. The behavior of some functions is slightly
contaminated with requirements from Hyde: For example, the backup function
deletes the directory that is being backed up.
"""
import os
import platform
import shutil
import codecs
import fnmatch
from datetime import datetime
from distutils import dir_util, file_util
from path_util import PathUtil
from zipfile import ZipFile, ZIP_DEFLATED
from subprocess import check_call
class FileSystemEntity(object):
"""
Base class for files and folders.
"""
def __init__(self, path):
super(FileSystemEntity, self).__init__()
if path is FileSystemEntity:
self.path = path.path
else:
self.path = path
def __str__(self):
return self.path
def __repr__(self):
return self.path
def allow(self, include=None, exclude=None):
"""
Given a set of wilcard patterns in the include and exclude arguments,
tests if the patterns allow this item for processing.
The exclude parameter is processed first as a broader filter and then
include is used as a narrower filter to override the results for more
specific files.
Example:
exclude = (".*", "*~")
include = (".htaccess")
"""
if not include:
include = ()
if not exclude:
exclude = ()
if reduce(lambda result,
pattern: result or
fnmatch.fnmatch(self.name, pattern), include, False):
return True
if reduce(lambda result, pattern:
result and not fnmatch.fnmatch(self.name, pattern),
exclude, True):
return True
return False
@property
def humblepath(self):
"""
Expands variables, user, normalizes path and case and coverts
to absolute.
"""
return os.path.abspath(
os.path.normpath(
os.path.normcase(
os.path.expandvars(
os.path.expanduser(self.path)))))
def same_as(self, other):
"""
Checks if the path of this object is same as `other`. `other` must
be a FileSystemEntity.
"""
return (self.humblepath.rstrip(os.sep) ==
other.humblepath.rstrip(os.sep))
@property
def exists(self):
"""
Checks if the entity exists in the file system.
"""
return os.path.exists(self.path)
@property
def isdir(self):
"""
Is this a folder.
"""
return os.path.isdir(self.path)
@property
def stats(self):
"""
Shortcut for os.stat.
"""
return os.stat(self.path)
@property
def name(self):
"""
Name of the entity. Calls os.path.basename.
"""
return os.path.basename(self.path)
@property
def parent(self):
"""
The parent folder. Returns a `Folder` object.
"""
return Folder(os.path.dirname(self.path))
def __get_destination__(self, destination):
"""
Returns a File or Folder object that would represent this entity
if it were copied or moved to `destination`. `destination` must be
an instance of File or Folder.
"""
if os.path.isdir(str(destination)):
target = destination.child(self.name)
if os.path.isdir(self.path):
return Folder(target)
else: return File(target)
else:
return destination
# pylint: disable-msg=R0904,W0142
class File(FileSystemEntity):
"""
Encapsulates commonly used functions related to files.
"""
def __init__(self, path):
super(File, self).__init__(path)
def has_extension(self, extension):
"""
Checks if this file has the given extension.
"""
return self.extension == extension
def delete(self):
"""
Deletes if the file exists.
"""
if self.exists:
os.remove(self.path)
@property
def last_modified(self):
"""
Returns a datetime object representing the last modified time.
Calls os.path.getmtime.
"""
return datetime.fromtimestamp(os.path.getmtime(self.path))
def changed_since(self, basetime):
"""
Returns True if the file has been changed since the given time.
"""
return self.last_modified > basetime
def older_than(self, another_file):
"""
Checks if this file is older than the given file. Uses last_modified to
determine age.
"""
return another_file.last_modified > self.last_modified
@property
def path_without_extension(self):
"""
The full path of the file without its extension.
"""
return os.path.splitext(self.path)[0]
@property
def name_without_extension(self):
"""
Name of the file without its extension.
"""
return os.path.splitext(self.name)[0]
@property
def extension(self):
"""
File's extension prefixed with a dot.
"""
return os.path.splitext(self.path)[1]
@property
def kind(self):
"""
File's extension without a dot prefix.
"""
return self.extension.lstrip(".")
def move_to(self, destination):
"""
Moves the file to the given destination. Returns a File
object that represents the target file. `destination` must
be a File or Folder object.
"""
shutil.move(self.path, str(destination))
return self.__get_destination__(destination)
def copy_to(self, destination):
"""
Copies the file to the given destination. Returns a File
object that represents the target file. `destination` must
be a File or Folder object.
"""
shutil.copy(self.path, str(destination))
return self.__get_destination__(destination)
@property
def size(self):
return os.path.getsize(self.path)
def write(self, text, encoding="utf-8"):
"""
Writes the given text to the file using the given encoding.
"""
fout = codecs.open(self.path, 'w', encoding)
fout.write(text)
fout.close()
def open_read(self):
return codecs.open(self.path, 'r')
def open_write(self, encoding='utf-8'):
return codecs.open(self.path, 'w', encoding)
def read_all(self):
"""
Reads from the file and returns the content as a string.
"""
fin = codecs.open(self.path, 'r')
read_text = fin.read()
fin.close()
return read_text
# pylint: disable-msg=R0904,W0142
class Folder(FileSystemEntity):
"""
Encapsulates commonly used directory functions.
"""
def __init__(self, path):
super(Folder, self).__init__(path)
def __str__(self):
return self.path
def __repr__(self):
return self.path
def cd(self):
os.chdir(self.path)
def delete(self):
"""
Deletes the directory if it exists.
"""
if self.exists:
shutil.rmtree(self.path)
def depth(self):
"""
Returns the number of ancestors of this directory.
"""
return len(self.path.split(os.sep))
def make(self):
"""
Creates this directory and any of the missing directories in the path.
Any errors that may occur are eaten.
"""
try:
if not self.exists:
os.makedirs(self.path)
except:
pass
return self
def is_parent_of(self, other_entity):
"""
Returns True if this directory is a direct parent of the the given
directory.
"""
return self.same_as(other_entity.parent)
def is_ancestor_of(self, other_entity):
"""
Returns True if this directory is in the path of the given directory.
Note that this will return True if the given directory is same as this.
"""
folder = other_entity
while not folder.parent.same_as(folder):
folder = folder.parent
if self.same_as(folder):
return True
return False
def child(self, name):
"""
Returns a path of a child item represented by `name`.
"""
return os.path.join(self.path, name)
def child_folder(self, *args):
"""
Returns a Folder object by joining the path component in args
to this directory's path.
"""
return Folder(os.path.join(self.path, *args))
def child_folder_with_fragment(self, fragment):
"""
Returns a Folder object by joining the fragment to
this directory's path.
"""
return Folder(os.path.join(self.path, fragment.lstrip(os.sep)))
def get_fragment(self, root):
"""
Returns the path fragment of this directory starting with the given
directory.
"""
return PathUtil.get_path_fragment(str(root), self.path)
def get_mirror_folder(self, root, mirror_root, ignore_root=False):
"""
Returns a Folder object that reperesents if the entire fragment of this
directory starting with `root` were copied to `mirror_root`. If ignore_root
is True, the mirror does not include `root` directory itself.
Example:
Current Directory: /usr/local/hyde/stuff
root: /usr/local/hyde
mirror_root: /usr/tmp
Result:
if ignore_root == False:
Folder(/usr/tmp/hyde/stuff)
if ignore_root == True:
Folder(/usr/tmp/stuff)
"""
path = PathUtil.get_mirror_dir(
self.path, str(root), str(mirror_root), ignore_root)
return Folder(path)
def create_mirror_folder(self, root, mirror_root, ignore_root=False):
"""
Creates the mirror directory returned by `get_mirror_folder`
"""
mirror_folder = self.get_mirror_folder(
root, mirror_root, ignore_root)
mirror_folder.make()
return mirror_folder
def backup(self, destination):
"""
Creates a backup of this directory in the given destination. The backup is
suffixed with a number for uniqueness. Deletes this directory after backup
is complete.
"""
new_name = self.name
count = 0
dest = Folder(destination.child(new_name))
while(True):
dest = Folder(destination.child(new_name))
if not dest.exists:
break
else:
count = count + 1
new_name = self.name + str(count)
dest.make()
dest.move_contents_of(self)
self.delete()
return dest
def move_to(self, destination):
"""
Moves this directory to the given destination. Returns a Folder object
that represents the moved directory.
"""
shutil.copytree(self.path, str(destination))
shutil.rmtree(str(destination))
return self.__get_destination__(destination)
def copy_to(self, destination):
"""
Copies this directory to the given destination. Returns a Folder object
that represents the moved directory.
"""
shutil.copytree(self.path, str(destination))
return self.__get_destination__(destination)
def move_folder_from(self, source, incremental=False):
"""
Moves the given source directory to this directory. If incremental is True
only newer objects are overwritten.
"""
self.copy_folder_from(source, incremental)
shutil.rmtree(str(source))
def copy_folder_from(self, source, incremental=False):
"""
Copies the given source directory to this directory. If incremental is True
only newer objects are overwritten.
"""
# There is a bug in dir_util that makes copy_tree crash if a folder in the
# tree has been deleted before and readded now. To workaround the bug, we first
# walk the tree and create directories that are needed.
#
# pylint: disable-msg=C0111,W0232
target_root = self
class _DirCreator:
@staticmethod
def visit_folder(folder):
target = folder.get_mirror_folder(
source.parent, target_root, ignore_root=True)
target.make()
source.walk(_DirCreator)
dir_util.copy_tree(str(source),
self.child(source.name),
preserve_symlinks=True,
update=incremental)
def move_contents_of(self, source, move_empty_folders=True,
incremental=False):
"""
Moves the contents of the given source directory to this directory. If
incremental is True only newer objects are overwritten.
"""
# pylint: disable-msg=C0111,W0232
class _Mover:
@staticmethod
def visit_folder(folder):
self.move_folder_from(folder, incremental)
@staticmethod
def visit_file(a_file):
self.move_file_from(a_file, incremental)
source.list(_Mover, move_empty_folders)
def copy_contents_of(self, source, copy_empty_folders=True,
incremental=False):
"""
Copies the contents of the given source directory to this directory. If
incremental is True only newer objects are overwritten.
"""
# pylint: disable-msg=C0111,W0232
class _Copier:
@staticmethod
def visit_folder(folder):
self.copy_folder_from(folder, incremental)
@staticmethod
def visit_file(a_file):
self.copy_file_from(a_file, incremental)
source.list(_Copier, copy_empty_folders)
def move_file_from(self, source, incremental=False):
"""
Moves the given source file to this directory. If incremental is True the
move is performed only if the source file is newer.
"""
self.copy_file_from(source, incremental)
source.delete()
def copy_file_from(self, source, incremental=False):
"""
Copies the given source file to this directory. If incremental is True the
move is performed only if the source file is newer.
"""
file_util.copy_file(str(source), self.path, update=incremental)
def list(self, visitor, list_empty_folders=True):
"""
Calls the visitor.visit_file or visitor.visit_folder for each file or folder
in this directory. If list_empty_folders is False folders that are empty are
skipped.
"""
a_files = os.listdir(self.path)
for a_file in a_files:
path = os.path.join(self.path, str(a_file))
if os.path.isdir(path):
if not list_empty_folders:
if Folder(path).empty():
continue
visitor.visit_folder(Folder(path))
else:
visitor.visit_file(File(path))
def empty(self):
"""
Checks if this directory or any of its subdirectories contain files.
"""
paths = os.listdir(self.path)
for path in paths:
if os.path.isdir(path):
if not Folder(path).empty():
return False
else:
return False
return True
def ditto_zip(self):
folder = self
zip_path = self.parent.child(folder.name + ".zip")
check_call(['ditto', '-c', '-k', '--keepParent', '--sequesterRsrc', str(folder), zip_path])
return zip_path
def zzip(self):
if not platform.mac_ver()[0] == '':
return self.ditto_zip()
folder = self
zip_path = self.parent.child(folder.name + ".zip")
class Zipper(object):
def __init__(self):
super(Zipper, self).__init__()
self.zip_file = ZipFile(zip_path, 'w', ZIP_DEFLATED)
def visit_file(self, file):
path = Folder(folder.name).child_folder(file.parent.get_fragment(folder)).child(file.name)
self.zip_file.write(str(file), path)
def visit_complete(self):
self.zip_file.close()
self.walk(Zipper())
return zip_path
def walk(self, visitor = None, pattern = None):
"""
Walks the entire hirearchy of this directory starting with itself.
Calls visitor.visit_folder first and then calls visitor.visit_file for
any files found. After all files and folders have been exhausted
visitor.visit_complete is called.
If a pattern is provided, only the files that match the pattern are
processed.
If visitor.visit_folder returns False, the files in the folder are not
processed.
"""
def __visit_folder__(visitor, folder):
process_folder = True
if visitor and hasattr(visitor,'visit_folder'):
process_folder = visitor.visit_folder(folder)
# If there is no return value assume true
#
if process_folder is None:
process_folder = True
return process_folder
def __visit_file__(visitor, a_file):
if visitor and hasattr(visitor,'visit_file'):
visitor.visit_file(a_file)
def __visit_complete__(visitor):
if visitor and hasattr(visitor,'visit_complete'):
visitor.visit_complete()
for root, dirs, a_files in os.walk(self.path):
folder = Folder(root)
if not __visit_folder__(visitor, folder):
dirs[:] = []
continue
for a_file in a_files:
if not pattern or fnmatch.fnmatch(a_file, pattern):
__visit_file__(visitor, File(folder.child(a_file)))
__visit_complete__(visitor)
|
|
#!/usr/bin/env python2
"""
This tutorial introduces the multilayer perceptron using Theano.
A multilayer perceptron is a logistic regressor where
instead of feeding the input to the logistic regression you insert a
intermediate layer, called the hidden layer, that has a nonlinear
activation function (usually tanh or sigmoid) . One can use many such
hidden layers making the architecture deep. The tutorial will also tackle
the problem of MNIST digit classification.
.. math::
f(x) = G( b^{(2)} + W^{(2)}( s( b^{(1)} + W^{(1)} x))),
References:
- textbooks: "Pattern Recognition and Machine Learning" -
Christopher M. Bishop, section 5
"""
__docformat__ = 'restructedtext en'
import os
import sys
import time
import numpy
import theano
import theano.tensor as T
from logistic_sgd import LogisticRegression, load_data
# start-snippet-1
class HiddenLayer(object):
def __init__(self, rng, input, n_in, n_out, W=None, b=None,
activation=T.tanh):
"""
Typical hidden layer of a MLP: units are fully-connected and have
sigmoidal activation function. Weight matrix W is of shape (n_in,n_out)
and the bias vector b is of shape (n_out,).
NOTE : The nonlinearity used here is tanh
Hidden unit activation is given by: tanh(dot(input,W) + b)
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.dmatrix
:param input: a symbolic tensor of shape (n_examples, n_in)
:type n_in: int
:param n_in: dimensionality of input
:type n_out: int
:param n_out: number of hidden units
:type activation: theano.Op or function
:param activation: Non linearity to be applied in the hidden
layer
"""
self.input = input
# end-snippet-1
# `W` is initialized with `W_values` which is uniformely sampled
# from sqrt(-6./(n_in+n_hidden)) and sqrt(6./(n_in+n_hidden))
# for tanh activation function
# the output of uniform if converted using asarray to dtype
# theano.config.floatX so that the code is runable on GPU
# Note : optimal initialization of weights is dependent on the
# activation function used (among other things).
# For example, results presented in [Xavier10] suggest that you
# should use 4 times larger initial weights for sigmoid
# compared to tanh
# We have no info for other function, so we use the same as
# tanh.
if W is None:
W_values = numpy.asarray(
rng.uniform(
low=-numpy.sqrt(6. / (n_in + n_out)),
high=numpy.sqrt(6. / (n_in + n_out)),
size=(n_in, n_out)
),
dtype=theano.config.floatX
)
if activation == theano.tensor.nnet.sigmoid:
W_values *= 4
W = theano.shared(value=W_values, name='W', borrow=True)
if b is None:
b_values = numpy.zeros((n_out,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b', borrow=True)
self.W = W
self.b = b
lin_output = T.dot(input, self.W) + self.b
self.output = (
lin_output if activation is None
else activation(lin_output)
)
# parameters of the model
self.params = [self.W, self.b]
# start-snippet-2
class MLP(object):
"""Multi-Layer Perceptron Class
A multilayer perceptron is a feedforward artificial neural network model
that has one layer or more of hidden units and nonlinear activations.
Intermediate layers usually have as activation function tanh or the
sigmoid function (defined here by a ``HiddenLayer`` class) while the
top layer is a softamx layer (defined here by a ``LogisticRegression``
class).
"""
def __init__(self, rng, input, n_in, n_hidden, n_out):
"""Initialize the parameters for the multilayer perceptron
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.TensorType
:param input: symbolic variable that describes the input of the
architecture (one minibatch)
:type n_in: int
:param n_in: number of input units, the dimension of the space in
which the datapoints lie
:type n_hidden: int
:param n_hidden: number of hidden units
:type n_out: int
:param n_out: number of output units, the dimension of the space in
which the labels lie
"""
# Since we are dealing with a one hidden layer MLP, this will translate
# into a HiddenLayer with a tanh activation function connected to the
# LogisticRegression layer; the activation function can be replaced by
# sigmoid or any other nonlinear function
self.hiddenLayer = HiddenLayer(
rng=rng,
input=input,
n_in=n_in,
n_out=n_hidden,
activation=T.tanh
)
# The logistic regression layer gets as input the hidden units
# of the hidden layer
self.logRegressionLayer = LogisticRegression(
input=self.hiddenLayer.output,
n_in=n_hidden,
n_out=n_out
)
# end-snippet-2 start-snippet-3
# L1 norm ; one regularization option is to enforce L1 norm to
# be small
self.L1 = (
abs(self.hiddenLayer.W).sum()
+ abs(self.logRegressionLayer.W).sum()
)
# square of L2 norm ; one regularization option is to enforce
# square of L2 norm to be small
self.L2_sqr = (
(self.hiddenLayer.W ** 2).sum()
+ (self.logRegressionLayer.W ** 2).sum()
)
# negative log likelihood of the MLP is given by the negative
# log likelihood of the output of the model, computed in the
# logistic regression layer
self.negative_log_likelihood = (
self.logRegressionLayer.negative_log_likelihood
)
# same holds for the function computing the number of errors
self.errors = self.logRegressionLayer.errors
# the parameters of the model are the parameters of the two layer it is
# made out of
self.params = self.hiddenLayer.params + self.logRegressionLayer.params
# end-snippet-3
def test_mlp(learning_rate=0.01, L1_reg=0.00, L2_reg=0.0001, n_epochs=1000,
dataset='mnist.pkl.gz', batch_size=20, n_hidden=500):
"""
Demonstrate stochastic gradient descent optimization for a multilayer
perceptron
This is demonstrated on MNIST.
:type learning_rate: float
:param learning_rate: learning rate used (factor for the stochastic
gradient
:type L1_reg: float
:param L1_reg: L1-norm's weight when added to the cost (see
regularization)
:type L2_reg: float
:param L2_reg: L2-norm's weight when added to the cost (see
regularization)
:type n_epochs: int
:param n_epochs: maximal number of epochs to run the optimizer
:type dataset: string
:param dataset: the path of the MNIST dataset file from
http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz
"""
datasets = load_data(dataset)
train_set_x, train_set_y = datasets[0]
valid_set_x, valid_set_y = datasets[1]
test_set_x, test_set_y = datasets[2]
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size
n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size
######################
# BUILD ACTUAL MODEL #
######################
print '... building the model'
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
x = T.matrix('x') # the data is presented as rasterized images
y = T.ivector('y') # the labels are presented as 1D vector of
# [int] labels
rng = numpy.random.RandomState(1234)
# construct the MLP class
classifier = MLP(
rng=rng,
input=x,
n_in=28 * 28,
n_hidden=n_hidden,
n_out=10
)
# start-snippet-4
# the cost we minimize during training is the negative log likelihood of
# the model plus the regularization terms (L1 and L2); cost is expressed
# here symbolically
cost = (
classifier.negative_log_likelihood(y)
+ L1_reg * classifier.L1
+ L2_reg * classifier.L2_sqr
)
# end-snippet-4
# compiling a Theano function that computes the mistakes that are made
# by the model on a minibatch
test_model = theano.function(
inputs=[index],
outputs=classifier.errors(y),
givens={
x: test_set_x[index * batch_size:(index + 1) * batch_size],
y: test_set_y[index * batch_size:(index + 1) * batch_size]
}
)
validate_model = theano.function(
inputs=[index],
outputs=classifier.errors(y),
givens={
x: valid_set_x[index * batch_size:(index + 1) * batch_size],
y: valid_set_y[index * batch_size:(index + 1) * batch_size]
}
)
# start-snippet-5
# compute the gradient of cost with respect to theta (sotred in params)
# the resulting gradients will be stored in a list gparams
gparams = [T.grad(cost, param) for param in classifier.params]
# specify how to update the parameters of the model as a list of
# (variable, update expression) pairs
# given two list the zip A = [a1, a2, a3, a4] and B = [b1, b2, b3, b4] of
# same length, zip generates a list C of same size, where each element
# is a pair formed from the two lists :
# C = [(a1, b1), (a2, b2), (a3, b3), (a4, b4)]
updates = [
(param, param - learning_rate * gparam)
for param, gparam in zip(classifier.params, gparams)
]
# compiling a Theano function `train_model` that returns the cost, but
# in the same time updates the parameter of the model based on the rules
# defined in `updates`
train_model = theano.function(
inputs=[index],
outputs=cost,
updates=updates,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
y: train_set_y[index * batch_size: (index + 1) * batch_size]
}
)
# end-snippet-5
###############
# TRAIN MODEL #
###############
print '... training'
# early-stopping parameters
patience = 10000 # look as this many examples regardless
patience_increase = 2 # wait this much longer when a new best is
# found
improvement_threshold = 0.995 # a relative improvement of this much is
# considered significant
validation_frequency = min(n_train_batches, patience / 2)
# go through this many
# minibatche before checking the network
# on the validation set; in this case we
# check every epoch
best_validation_loss = numpy.inf
best_iter = 0
test_score = 0.
start_time = time.clock()
epoch = 0
done_looping = False
while (epoch < n_epochs) and (not done_looping):
epoch = epoch + 1
for minibatch_index in xrange(n_train_batches):
minibatch_avg_cost = train_model(minibatch_index)
# iteration number
iter = (epoch - 1) * n_train_batches + minibatch_index
if (iter + 1) % validation_frequency == 0:
# compute zero-one loss on validation set
validation_losses = [validate_model(i) for i
in xrange(n_valid_batches)]
this_validation_loss = numpy.mean(validation_losses)
print(
'epoch %i, minibatch %i/%i, validation error %f %%' %
(
epoch,
minibatch_index + 1,
n_train_batches,
this_validation_loss * 100.
)
)
# if we got the best validation score until now
if this_validation_loss < best_validation_loss:
#improve patience if loss improvement is good enough
if (
this_validation_loss < best_validation_loss *
improvement_threshold
):
patience = max(patience, iter * patience_increase)
best_validation_loss = this_validation_loss
best_iter = iter
# test it on the test set
test_losses = [test_model(i) for i
in xrange(n_test_batches)]
test_score = numpy.mean(test_losses)
print((' epoch %i, minibatch %i/%i, test error of '
'best model %f %%') %
(epoch, minibatch_index + 1, n_train_batches,
test_score * 100.))
if patience <= iter:
done_looping = True
break
end_time = time.clock()
print(('Optimization complete. Best validation score of %f %% '
'obtained at iteration %i, with test performance %f %%') %
(best_validation_loss * 100., best_iter + 1, test_score * 100.))
print >> sys.stderr, ('The code for file ' +
os.path.split(__file__)[1] +
' ran for %.2fm' % ((end_time - start_time) / 60.))
if __name__ == '__main__':
test_mlp()
|
|
#!/usr/bin/python
#24th May
#take input DONE
#create directory DONE
#save to system variable
#how to write a service
#make an account in google drive
import sys; import os;
import subprocess;
import json
from celery.bin.celery import status
print (sys.version)
# x = raw_input("enter username")
# print 'kaa be'
# y = raw_input("enter path")
#
# if x == 'piyush':
# print 'you are logged in now'
# if os.path.exists(y):
# path = os.path.join(y,"newDir")
# print 'here'
# print path
# os.makedirs(path)
# print 'dir made'
# else:
# print 'path not found'
#
# subprocess.call("ls -l",shell=True)
#
# subprocess.call('tail -f /home/piyush/dump.sql', shell=True)
#output = subprocess.Popen(['tail', '-100', '/home/piyush/piyush'], stdout=subprocess.PIPE)
import sys
import time
import logging
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
from watchdog.events import LoggingEventHandler
import Queue
import threading
import time
queueLock = threading.Lock()
workQueue = Queue.Queue(0)
thread = None
class LoggingEventHandler(FileSystemEventHandler):
"""Logs all the events captured."""
# makeSystemFile('.report', '/home/piyush/Drive')
def on_moved(self, event):
print('moved ' + event.dest_path + " " + event.src_path )
# Action(event,'deleted')
# Action(event,'moved')
#
queueLock.acquire()
workQueue.put('moved ' + event.src_path + event.dest_path )
queueLock.release()
def on_created(self, event):
# print('created ' + event.src_path)
# Action(event,'created')
queueLock.acquire()
workQueue.put('created ' + event.src_path )
queueLock.release()
def on_deleted(self, event):
# print('deleted ' + event.src_path)
# Action(event, 'deleted')
queueLock.acquire()
workQueue.put('deleted ' + event.src_path )
queueLock.release()
def on_modified(self, event):
# print('modified ' + event.src_path)
# if not event.is_directory:
# Action(event, 'modified')
#
queueLock.acquire()
workQueue.put('modified ' + event.src_path )
queueLock.release()
def makeSystemFile(name, path):
file_path = os.path.join(path, name)
if not os.path.exists(file_path):
file = open(file_path, 'w+')
file.flush()
file.close()
class Action:
def __init__(self, event, action):
file_path = None
if action == 'moved':
file_path = event.dest_path
else:
file_path = event.src_path
if ((os.path.basename(file_path)).startswith('.')):
return
report_file = open('/home/piyush/Drive/.report', 'r')
data = report_file.read()
report_file.close()
print('------------')
print(data)
print('------------')
report = dict()
try:
report = json.loads(data)
except ValueError, e:
print (str(e))
report = dict()
report[file_path] = File(os.path.basename(file_path), 'upload', action, None, None, event.is_directory).__dict__
report_file = open('/home/piyush/Drive/.report', 'w')
report_file.write(json.dumps(report))
report_file.flush()
report_file.close()
class File :
driveid = None
name = None
action = None
status = None # modified, added, deleted, untouched
md5 = None
is_dir = None
parent_id = None
def __init__(self, name, action = None, status = None, driveid=None, md5 = None, is_dir = None, parent_id = None):
self.name = name
self.action = action
self.status = status
self.driveid = driveid
self.md5 = md5
self.is_dir = is_dir
self.parent_id = parent_id
class myThread (threading.Thread):
def __init__(self, threadID, name, q):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.q = q
def run(self):
print "Starting " + self.name
process_data(self.name, self.q)
print "Exiting " + self.name
def process_data(threadName, q):
while True:
queueLock.acquire()
if not workQueue.empty():
data = q.get()
queueLock.release()
print "%s processing %s" % (threadName, data)
else:
queueLock.release()
time.sleep(1)
if __name__ == "__main__":
# logging.basicConfig(level=logging.INFO,
# format='%(asctime)s - %(message)s',
# datefmt='%Y-%m-%d %H:%M:%S')
# path = sys.argv[1] if len(sys.argv) > 1 else '.'
# event_handler = LoggingEventHandler()
# observer = Observer()
# observer.schedule(event_handler, path, recursive=True)
# observer.start()
# thread = myThread(1, "Sync_guy", workQueue)
# thread.start()
# try:
# while True:
# time.sleep(1)
# except KeyboardInterrupt:
# observer.stop()
# observer.join()
rootdir = '/home/piyush/Drive'
for subdir, dirs, files in os.walk(rootdir):
print subdir
for file in files:
print os.path.join(subdir, file)
|
|
import warnings
try: # Python 3
import http.client as httplib
from urllib.parse import parse_qsl
from functools import partial
to_bytes = lambda value, *args, **kwargs: bytes(value, "utf-8", *args, **kwargs)
except ImportError: # Python 2
import httplib
from urlparse import parse_qsl
to_bytes = str
import textwrap
import codecs
import json
from . import exceptions
import base64
import hashlib
import hmac
try:
from rauth import OAuth1Session, OAuth1Service, OAuth2Session
except ImportError:
print("Please import Rauth:\n\n")
print("http://rauth.readthedocs.org/en/latest/\n")
raise
class Environments(object):
SANDBOX = 'sandbox'
PRODUCTION = 'production'
class QuickBooks(object):
company_id = 0
session = None
auth_client = None
sandbox = False
minorversion = None
verifier_token = None
invoice_link = False
sandbox_api_url_v3 = "https://sandbox-quickbooks.api.intuit.com/v3"
api_url_v3 = "https://quickbooks.api.intuit.com/v3"
current_user_url = "https://appcenter.intuit.com/api/v1/user/current"
_BUSINESS_OBJECTS = [
"Account", "Attachable", "Bill", "BillPayment",
"Class", "CreditMemo", "Customer", "CompanyCurrency",
"Department", "Deposit", "Employee", "Estimate", "ExchangeRate", "Invoice",
"Item", "JournalEntry", "Payment", "PaymentMethod", "Preferences",
"Purchase", "PurchaseOrder", "RefundReceipt",
"SalesReceipt", "TaxAgency", "TaxCode", "TaxService/Taxcode", "TaxRate", "Term",
"TimeActivity", "Transfer", "Vendor", "VendorCredit", "CreditCardPayment",
]
__instance = None
__use_global = False
def __new__(cls, **kwargs):
"""
If global is disabled, don't set global client instance.
"""
if QuickBooks.__use_global:
if QuickBooks.__instance is None:
QuickBooks.__instance = object.__new__(cls)
instance = QuickBooks.__instance
else:
instance = object.__new__(cls)
if 'refresh_token' in kwargs:
instance.refresh_token = kwargs['refresh_token']
if 'auth_client' in kwargs:
instance.auth_client = kwargs['auth_client']
if instance.auth_client.environment == Environments.SANDBOX:
instance.sandbox = True
else:
instance.sandbox = False
refresh_token = instance._start_session()
instance.refresh_token = refresh_token
if 'company_id' in kwargs:
instance.company_id = kwargs['company_id']
if 'minorversion' in kwargs:
instance.minorversion = kwargs['minorversion']
instance.invoice_link = kwargs.get('invoice_link', False)
if 'verifier_token' in kwargs:
instance.verifier_token = kwargs.get('verifier_token')
return instance
def _start_session(self):
if self.auth_client.access_token is None:
self.auth_client.refresh(refresh_token=self.refresh_token)
self.session = OAuth2Session(
client_id=self.auth_client.client_id,
client_secret=self.auth_client.client_secret,
access_token=self.auth_client.access_token,
)
return self.auth_client.refresh_token
@classmethod
def get_instance(cls):
return cls.__instance
@classmethod
def disable_global(cls):
"""
Disable use of singleton pattern.
"""
warnings.warn("disable_global deprecated", PendingDeprecationWarning)
QuickBooks.__use_global = False
QuickBooks.__instance = None
@classmethod
def enable_global(cls):
"""
Allow use of singleton pattern.
"""
warnings.warn("enable_global deprecated", PendingDeprecationWarning)
QuickBooks.__use_global = True
def _drop(self):
QuickBooks.__instance = None
@property
def api_url(self):
if self.sandbox:
return self.sandbox_api_url_v3
else:
return self.api_url_v3
def validate_webhook_signature(self, request_body, signature, verifier_token=None):
hmac_verifier_token_hash = hmac.new(
to_bytes(verifier_token or self.verifier_token),
request_body.encode('utf-8'),
hashlib.sha256
).digest()
decoded_hex_signature = base64.b64decode(signature)
return hmac_verifier_token_hash == decoded_hex_signature
def get_current_user(self):
"""Get data from the current user endpoint"""
url = self.current_user_url
result = self.get(url)
return result
def get_report(self, report_type, qs=None):
"""Get data from the report endpoint"""
if qs is None:
qs = {}
url = self.api_url + "/company/{0}/reports/{1}".format(self.company_id, report_type)
result = self.get(url, params=qs)
return result
def change_data_capture(self, entity_string, changed_since):
url = "{0}/company/{1}/cdc".format(self.api_url, self.company_id)
params = {"entities": entity_string, "changedSince": changed_since}
result = self.get(url, params=params)
return result
def make_request(self, request_type, url, request_body=None, content_type='application/json',
params=None, file_path=None, request_id=None):
if not params:
params = {}
if self.minorversion:
params['minorversion'] = self.minorversion
if request_id:
params['requestid'] = request_id
if self.invoice_link:
params['include'] = 'invoiceLink'
if not request_body:
request_body = {}
headers = {
'Content-Type': content_type,
'Accept': 'application/json',
'User-Agent': 'python-quickbooks V3 library'
}
if file_path:
attachment = open(file_path, 'rb')
url = url.replace('attachable', 'upload')
boundary = '-------------PythonMultipartPost'
headers.update({
'Content-Type': 'multipart/form-data; boundary=%s' % boundary,
'Accept-Encoding': 'gzip;q=1.0,deflate;q=0.6,identity;q=0.3',
'User-Agent': 'python-quickbooks V3 library',
'Accept': 'application/json',
'Connection': 'close'
})
binary_data = str(base64.b64encode(attachment.read()).decode('ascii'))
content_type = json.loads(request_body)['ContentType']
request_body = textwrap.dedent(
"""
--%s
Content-Disposition: form-data; name="file_metadata_01"
Content-Type: application/json
%s
--%s
Content-Disposition: form-data; name="file_content_01"
Content-Type: %s
Content-Transfer-Encoding: base64
%s
--%s--
"""
) % (boundary, request_body, boundary, content_type, binary_data, boundary)
# make sure request_body is not unicode (python 2 case)
request_body = str(request_body)
req = self.process_request(request_type, url, headers=headers, params=params, data=request_body)
if req.status_code == httplib.UNAUTHORIZED:
raise exceptions.AuthorizationException(
"Application authentication failed", error_code=req.status_code, detail=req.text)
try:
result = req.json()
except:
raise exceptions.QuickbooksException("Error reading json response: {0}".format(req.text), 10000)
if "Fault" in result:
self.handle_exceptions(result["Fault"])
elif not req.status_code == httplib.OK:
raise exceptions.QuickbooksException("Error returned with status code '{0}': {1}".format(
req.status_code, req.text), 10000)
else:
return result
def get(self, *args, **kwargs):
return self.make_request("GET", *args, **kwargs)
def post(self, *args, **kwargs):
return self.make_request("POST", *args, **kwargs)
def process_request(self, request_type, url, headers="", params="", data=""):
if self.session is None:
raise exceptions.QuickbooksException('No session manager')
headers.update({'Authorization': 'Bearer ' + self.session.access_token})
return self.session.request(
request_type, url, headers=headers, params=params, data=data)
def get_single_object(self, qbbo, pk):
url = "{0}/company/{1}/{2}/{3}/".format(self.api_url, self.company_id, qbbo.lower(), pk)
result = self.get(url, {})
return result
@staticmethod
def handle_exceptions(results):
"""
Error codes with description in documentation:
https://developer.intuit.com/app/developer/qbo/docs/develop/troubleshooting/error-codes#id1
"""
# Needs to handle multiple errors
for error in results["Error"]:
message = error["Message"]
detail = ""
if "Detail" in error:
detail = error["Detail"]
code = ""
if "code" in error:
code = int(error["code"])
if 0 < code <= 499:
raise exceptions.AuthorizationException(message, code, detail)
elif 500 <= code <= 599:
raise exceptions.UnsupportedException(message, code, detail)
elif 600 <= code <= 1999:
if code == 610:
raise exceptions.ObjectNotFoundException(message, code, detail)
raise exceptions.GeneralException(message, code, detail)
elif 2000 <= code <= 4999:
raise exceptions.ValidationException(message, code, detail)
elif 10000 <= code:
raise exceptions.SevereException(message, code, detail)
else:
raise exceptions.QuickbooksException(message, code, detail)
def create_object(self, qbbo, request_body, _file_path=None, request_id=None):
self.isvalid_object_name(qbbo)
url = "{0}/company/{1}/{2}".format(self.api_url, self.company_id, qbbo.lower())
results = self.post(url, request_body, file_path=_file_path, request_id=request_id)
return results
def query(self, select):
url = "{0}/company/{1}/query".format(self.api_url, self.company_id)
result = self.post(url, select, content_type='application/text')
return result
def isvalid_object_name(self, object_name):
if object_name not in self._BUSINESS_OBJECTS:
raise Exception("{0} is not a valid QBO Business Object.".format(object_name))
return True
def update_object(self, qbbo, request_body, _file_path=None, request_id=None):
url = "{0}/company/{1}/{2}".format(self.api_url, self.company_id, qbbo.lower())
result = self.post(url, request_body, file_path=_file_path, request_id=request_id)
return result
def delete_object(self, qbbo, request_body, _file_path=None, request_id=None):
url = "{0}/company/{1}/{2}".format(self.api_url, self.company_id, qbbo.lower())
result = self.post(url, request_body, params={'operation': 'delete'}, file_path=_file_path, request_id=request_id)
return result
def batch_operation(self, request_body):
url = "{0}/company/{1}/batch".format(self.api_url, self.company_id)
results = self.post(url, request_body)
return results
def misc_operation(self, end_point, request_body, content_type='application/json'):
url = "{0}/company/{1}/{2}".format(self.api_url, self.company_id, end_point)
results = self.post(url, request_body, content_type)
return results
def download_pdf(self, qbbo, item_id):
if self.session is None:
raise exceptions.QuickbooksException('No session')
url = "{0}/company/{1}/{2}/{3}/pdf".format(
self.api_url, self.company_id, qbbo.lower(), item_id)
headers = {
'Content-Type': 'application/pdf',
'Accept': 'application/pdf, application/json',
'User-Agent': 'python-quickbooks V3 library'
}
response = self.process_request("GET", url, headers=headers)
if response.status_code != httplib.OK:
if response.status_code == httplib.UNAUTHORIZED:
# Note that auth errors have different result structure which can't be parsed by handle_exceptions()
raise exceptions.AuthorizationException(
"Application authentication failed", error_code=response.status_code, detail=response.text)
try:
result = response.json()
except:
raise exceptions.QuickbooksException("Error reading json response: {0}".format(response.text), 10000)
self.handle_exceptions(result["Fault"])
else:
return response.content
|
|
#!/usr/bin/env python3
# Copyright 2016 The Johns Hopkins University Applied Physics Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
By default, start the annotation indexing of a channel. Invokes the
Index.FindCuboids step function on the given channel.
Can also stop a running indexing process or resume one that's been stopped via
the --stop and --resume flags, respectively.
"""
import argparse
import boto3
import os
import json
from collections import namedtuple
import alter_path
from lib import configuration
# When this number of number of write units is consumed updating an entry in
# the id index, a new entry will be created to reduce the cost of adding
# additional morton ids to that id.
NEW_CHUNK_THRESHOLD = 100
RESOLUTION = 0
# Format string for building the first part of step function's arn.
SFN_ARN_PREFIX_FORMAT = 'arn:aws:states:{}:{}:stateMachine:'
# "lookup_key": "4&4&24&0", # This is the 192 cuboid test dataset with 249 ids per cuboid.
# "lookup_key": "8&8&26&0", # This is the annotation regression test data.
# "lookup_key": "4&4&24&0", # This is 1200 cuboid test dataset.
# "lookup_key": "4&4&30&0", # This is 1200 cuboid test dataset with only 1s in the cuboids where x > 1.
class ResourceNotFoundException(Exception):
"""
Raised when unable to locate the id of collection, experiment, or
resource.
"""
"""
Container that identifies Boss channel.
Fields:
collection (str): Collection name.
experiment (str): Experiment name.
channel (str): Channel name.
"""
ChannelParams = namedtuple(
'ChannelParams', ['collection', 'experiment', 'channel'])
def get_lookup_key_from_db(bosslet_config, channel_params):
"""
Get the lookup key that identifies the annotation channel from the database.
Args:
bosslet_config (BossConfiguration): Bosslet configuration object
channel_params (ChannelParams): Identifies channel.
Returns:
(str): Lookup key.
"""
coll_query = "SELECT id FROM collection WHERE name = %s"
exp_query = "SELECT id FROM experiment WHERE name = %s"
chan_query = "SELECT id FROM channel WHERE name = %s"
with bosslet_config.call.connect_rds() as cursor:
cursor.execute(coll_query, (channel_params.collection,))
coll_set = cursor.fetchall()
if len(coll_set) != 1:
raise ResourceNotFoundException(
"Can't find collection: {}".format(channel_params.collection))
cursor.execute(exp_query, (channel_params.experiment,))
exp_set = cursor.fetchall()
if len(exp_set) != 1:
raise ResourceNotFoundException(
"Can't find experiment: {}".format(channel_params.experiment))
cursor.execute(chan_query, (channel_params.channel,))
chan_set = cursor.fetchall()
if len(chan_set) != 1:
raise ResourceNotFoundException(
"Can't find channel: {}".format(channel_params.experiment))
return '{}&{}&{}&{}'.format(
coll_set[0][0], exp_set[0][0], chan_set[0][0], RESOLUTION)
def get_common_args(bosslet_config):
"""
Get common arguments for starting step functions related to indexing.
Args:
bosslet_config (BossConfiguration): Bosslet configuration object
Returns:
(dict): Arguments.
"""
account = bosslet_config.ACCOUNT_ID
sfn_arn_prefix = SFN_ARN_PREFIX_FORMAT.format(bosslet_config.REGION,
bosslet_config.ACCOUNT_ID)
n = bosslet_config.names
common_args = {
"id_supervisor_step_fcn": '{}{}'.format(sfn_arn_prefix, n.index_supervisor.sfn),
"id_cuboid_supervisor_step_fcn": '{}{}'.format(sfn_arn_prefix, n.index_cuboid_supervisor.sfn),
"index_dequeue_cuboids_step_fcn":'{}{}'.format(sfn_arn_prefix, n.index_dequeue_cuboids.sfn),
"id_index_step_fcn": '{}{}'.format(sfn_arn_prefix, n.index_id_writer.sfn),
"batch_enqueue_cuboids_step_fcn": '{}{}'.format(sfn_arn_prefix, n.index_enqueue_cuboids.sfn),
"fanout_enqueue_cuboids_step_fcn": '{}{}'.format(sfn_arn_prefix, n.index_fanout_enqueue_cuboids.sfn),
"fanout_id_writers_step_fcn": '{}{}'.format(sfn_arn_prefix, n.index_fanout_id_writers.sfn),
"config": {
"object_store_config": {
"id_count_table": n.id_count_index.ddb,
"page_in_lambda_function": n.multi_lambda.lambda_,
"page_out_lambda_function": n.multi_lambda.lambda_,
"cuboid_bucket": n.cuboid_bucket.s3,
"s3_index_table": n.s3_index.ddb,
"id_index_table": n.id_index.ddb,
"s3_flush_queue": 'https://queue.amazonaws.com/{}/{}'.format(account, n.s3flush.sqs),
"id_index_new_chunk_threshold": NEW_CHUNK_THRESHOLD,
"index_deadletter_queue": 'https://queue.amazonaws.com/{}/{}'.format(account, n.index_deadletter.sqs),
"index_cuboids_keys_queue": 'https://queue.amazonaws.com/{}/{}'.format(account, n.index_cuboids_keys.sqs)
},
"kv_config": {
"cache_host": n.cache.redis,
"read_timeout": 86400,
"cache_db": "0"
},
"state_config": {
"cache_state_db": "0",
"cache_state_host": n.cache_state.redis,
}
},
"max_write_id_index_lambdas": 599,
"max_cuboid_fanout": 30,
"max_items": 100
}
return common_args
def get_find_cuboid_args(bosslet_config, lookup_key):
"""
Get all arguments needed to start Index.FindCuboids.
Args:
bosslet_config (BossConfiguration): Bosslet configuration object
Returns:
(str, str): [0] is the ARN of Index.FindCuboids; [1] are the step function arguments as a JSON string.
"""
sfn_arn_prefix = SFN_ARN_PREFIX_FORMAT.format(bosslet_config.REGION,
bosslet_config.ACCOUNT_ID)
arn = '{}{}'.format(sfn_arn_prefix, bosslet_config.names.index_find_cuboids.sfn)
find_cuboid_args = get_common_args(bosslet_config)
find_cuboid_args['lookup_key'] = lookup_key
return arn, json.dumps(find_cuboid_args)
def get_running_step_fcns(bosslet_config, arn):
"""
Retrive execution arns of running step functions.
Args:
bosslet_config (BossConfiguration): Bosslet configuration object
arn (str): Specifies step function of interest.
Yields:
(str): Execution arn of running step function.
"""
sfn = bosslet_config.session.client('stepfunctions')
list_args = dict(
stateMachineArn=arn, statusFilter='RUNNING', maxResults=100)
resp = sfn.list_executions(**list_args)
for exe in resp['executions']:
yield exe['executionArn']
while 'nextToken' in resp:
list_args['nextToken'] = resp['nextToken']
resp = sfn.list_executions(**list_args)
for exe in resp['executions']:
yield exe['executionArn']
def run_find_cuboids(bosslet_config, args):
"""
Start Index.FindCuboids. This step function kicks off the entire indexing
process from the beginning.
Args:
bosslet_config (BossConfiguration): Bosslet configuration object
args (Namespace): Parsed command line arguments.
"""
channel_params = ChannelParams(
args.collection, args.experiment, args.channel)
if args.lookup_key is not None:
lookup_key = args.lookup_key
else:
lookup_key = get_lookup_key_from_db(bosslet_config,
channel_params)
print('lookup_key is: {}'.format(lookup_key))
find_cuboid_args = get_find_cuboid_args(bosslet_config,
lookup_key)
#print(find_cuboid_args[1])
print('Starting Index.FindCuboids . . .')
sfn = bosslet_config.session.client('stepfunctions')
resp = sfn.start_execution(
stateMachineArn=find_cuboid_args[0],
input=find_cuboid_args[1]
)
print(resp)
def resume_indexing(bosslet_config):
"""
Resume indexing a channel or channels. If the CuboidsKeys queue is not
empty, indexing will resume on those cuboids identified in that queue.
Args:
bosslet_config (BossConfiguration): Bosslet configuration object
"""
resume_args = get_common_args(bosslet_config)
resume_args['queue_empty'] = False
arn = resume_args['id_supervisor_step_fcn']
print('Resuming indexing (starting Index.Supervisor) . . .')
sfn = bosslet_config.session.client('stepfunctions')
resp = sfn.start_execution(
stateMachineArn=arn,
input=json.dumps(resume_args)
)
print(resp)
def stop_indexing(bosslet_config):
"""
Stop the indexing process, gracefully. Index.CuboidSupervisors will not
be stopped, so the entire index process will not terminate, immediately.
Only the Index.Supervisor and any running Index.DequeueCuboid step
functions will be halted. This allows the indexing process to be resumed.
Args:
bosslet_config (BossConfiguration): Bosslet configuration object
"""
stop_args = get_common_args(bosslet_config)
# This error could optionally be caught inside a step function if special
# shutdown behavior required.
error = 'ManualAbort'
cause = 'User initiated abort'
supe_arn = stop_args['id_supervisor_step_fcn']
sfn = session.client('stepfunctions')
print('Stopping Index.Supervisor . . .')
for arn in get_running_step_fcns(bosslet_config, supe_arn):
print('\tStopping {}'.format(arn))
sfn.stop_execution(
executionArn=arn,
error=error,
cause=cause)
print('Stopping Index.DequeueCuboids . . .')
deque_arn = stop_args['index_dequeue_cuboids_step_fcn']
for arn in get_running_step_fcns(bosslet_config, deque_arn):
print('\tStopping {}'.format(arn))
sfn.stop_execution(
executionArn=arn,
error=error,
cause=cause)
print('Done.')
def parse_args():
"""
Parse command line or config file.
Returns:
(Namespace): Parsed arguments.
"""
parser = configuration.BossParser(
description='Script for starting annotation index process. ' +
'To supply arguments from a file, provide the filename prepended with an `@`.',
fromfile_prefix_chars = '@')
parser.add_argument(
'--lookup-key', '-l',
default=None,
help='Lookup key of channel (supply this to avoid slow tunneling to DB)')
parser.add_argument(
'--stop',
action='store_true',
help='Stop indexing operation (will leave CuboidKeys queue untouched so indexing may be resumed)')
parser.add_argument(
'--resume',
action='store_true',
help='Resume indexing operation (if CuboidKeys queue still has messages, indexing will resume)')
parser.add_bosslet("Bosslet name where the lambda functions live")
parser.add_argument(
'collection',
nargs='?',
default=None,
help='Name of collection')
parser.add_argument(
'experiment',
nargs='?',
default=None,
help='Name of experiment')
parser.add_argument(
'channel',
nargs='?',
default=None,
help='Name of channel')
args = parser.parse_args()
if args.stop and args.resume:
parser.print_usage()
parser.exit(
1, 'Error: cannot specify --stop and --resume simultaneously')
if (args.lookup_key is None and not args.stop and not args.resume and
(args.collection is None or args.experiment is None or args.channel is None)
):
parser.print_usage()
parser.exit(1, 'Error: must specify collection, experiment, and channel')
if (args.lookup_key is not None and
(args.collection is not None or args.experiment is not None or args.channel is not None)
):
print('lookup-key specified, ignoring collection/experiment/channel name(s)')
return args
if __name__ == '__main__':
args = parse_args()
if args.stop:
stop_indexing(args.bosslet_config)
elif args.resume:
resume_indexing(args.bosslet_config)
else:
run_find_cuboids(args.bosslet_config, args)
|
|
from comanage_nacha import NachaFile
from comanage_nacha.entries import FileControl, EntryDetail
from comanage_nacha.parser import Parser
parser = Parser()
confirmation_parser = Parser(confirmation_file=True)
rejection_parser = Parser(rejection_file=True)
simple = ("101 9100001912737206971506161208A094101WELLSFARGO COMANAGELLC\n"
"5225COMANAGELLC ACH SETTLEMENT 1234567890CCDPAYMENT 150616 1001237370000001\n"
"6271221052785005486880 0000082100 JANE DOE 0001237370000001\n"
"822500000100122105270000000821000000000000001234567890 001237370000001\n"
"9000001000001000000010012210527000000082100000000000000\n"
"9999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999\n"
"9999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999\n"
"9999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999\n"
"9999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999\n"
"9999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999")
def test_parse_lines_simple():
# Just test it doesnt throw exceptions basically
list(parser.parse_lines(simple))
def test_parse_simple():
nacha = parser.parse(simple)
nacha.include_blocking_lines = False
assert isinstance(nacha, NachaFile)
assert len(list(nacha.lines)) == 5
large = """101 9100001912737206971506161217A094101WELLSFARGO COMANAGELLC
5200COMANAGELLC ACH SETTLEMENT 1234567890CCDPAYMENT 150616 1001237370000001
6271221052785005486880 0000082100 JANE DOE 0001237370000001
6271221052786886896684 0000864107 JANE DOE 0001237370000002
6223221747951228713 0000220000 SOME CLEANERS 0001237370000003
622122100024785323353 0000020125 SOME HVAC COMPANY 0001237370000004
820000000400688485350000009462070000002401251234567890 001237370000001
5220COMANAGELLC ACH SETTLEMENT 1234567890PPDPAYMENT 150616 1001237370000002
6221221052789886521146 0000101832 HANDYMAN 0001237370000001
6221221052789886521146 0000069863 HANDYMAN 0001237370000002
822000000200244210540000000000000000001716951234567890 001237370000002
9000002000002000000060093269589000000946207000000411820"""
def test_parse_lines_large():
# Just test it doesnt throw exceptions basically
list(parser.parse_lines(large))
def test_parse_large():
nacha = parser.parse(large)
nacha.include_blocking_lines = False
assert isinstance(nacha, NachaFile)
assert len(list(nacha.lines)) == 12
with_addenda = """101 091000019 1702161755A094101WELLS FARGO COMANAGE LLC
5200COMANAGE LLC 0123456789 170216 1091000010000001
6220910000100123456789 0000010000123 FRANK 1091000010000001
705 00010000001
820000000200091000010000000000000000000100000123456789 091000010000000
9000001000001000000020009100001000000000000000000010000 """
def test_parse_lines_addenda():
list(parser.parse_lines(with_addenda))
def test_parse_addenda():
nacha = parser.parse(with_addenda)
nacha.include_blocking_lines = False
assert isinstance(nacha, NachaFile)
assert len(list(nacha.lines)) == 6
confirmation = """101 9100001912737206971506161208A094101WELLSFARGO COMANAGELLC
5225COMANAGELLC ACH SETTLEMENT 1234567890CCDPAYMENT 150616 1001237370000001
822500000100122105270000000821000000000000001234567890 001237370000001
9000001000001000000010012210527000000082100000000000000"""
def test_parse_lines_confirmation():
(file_header,
batch_header,
batch_control,
file_control) = confirmation_parser.parse_lines(confirmation)
assert isinstance(file_control, FileControl)
assert file_control.message_codes == []
def test_parse_confirmation():
confirmation_parser.parse(confirmation)
confirmation_message_codes = """101 9100001912737206971506161217A094101WELLSFARGO COMANAGELLC
5200COMANAGELLC ACH SETTLEMENT 1234567890CCDPAYMENT 150616 1001237370000001
820000000400688485350000009462070000002401251234567890 001237370000001
5220COMANAGELLC ACH SETTLEMENT 1234567890PPDPAYMENT 150616 1001237370000002
822000000200244210540000000000000000001716951234567890 001237370000002
9000002000002000000060093269589000000946207000000411820 0102TT"""
def test_confirmation_message_codes():
(file_header,
batch_header_1,
batch_control_1,
batch_header_2,
batch_control_2,
file_control) = confirmation_parser.parse_lines(confirmation_message_codes)
assert isinstance(file_control, FileControl)
assert file_control.message_codes == ['01', '02', 'TT']
entry_reject = """101 9100001912737206971506161217A094101WELLSFARGO COMANAGELLC
5200COMANAGELLC ACH SETTLEMENT 1234567890CCDPAYMENT 150616 1001237370000001
622122100024785323353 0000020125 SOME HVAC COMPANY 0REJ060010000004
820000000400688485350000009462070000002401251234567890 001237370000001
9000002000002000000060093269589000000946207000000411820"""
def test_entry_reject():
(file_header,
batch_header,
rejected_entry,
batch_control,
file_control) = rejection_parser.parse_lines(entry_reject)
assert isinstance(rejected_entry, EntryDetail)
assert rejected_entry.error_code == '6001'
def test_empty_reject_only_zero_items():
# "empty" reject-only
(file_header,
file_control) = rejection_parser.parse_lines(
"101 09100001999999999990609141125A094101WELLS FARGO ABC CORP \n"
"9000000000101000000000000000000000000000000000000000000 \n"
"9999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999\n"
"9999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999\n"
"9999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999\n"
"9999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999\n"
"9999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999\n"
"9999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999\n"
"9999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999\n"
"9999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999"
)
def test_reject_only_file_five_rejected_items():
(file_header,
batch_header,
entry_1,
entry_2,
entry_3,
entry_4,
entry_5,
batch_control,
file_control) = rejection_parser.parse_lines(
"101 09100001999999999990609141125A094101WELLS FARGO ABC CORP \n"
"5200ABC CORP DEPOSIT9999999999PPDPAYABLES 091509090915 1091000012381268\n"
"622771045912999999 00000125000116 C CHANG 0REJ060300000002\n"
"6225072003909999999999 00000233500485 D DAVIDSON 0REJ060300000019\n"
"622541210032199999999999 00000100000989 E EDWARDS 0REJ060300000027\n"
"622580101814499999 00000200001022 F FREEMAN 0REJ060300000030\n"
"622507206213499999999 00000150001177 G GONZALES 0REJ060300000037\n"
"820000000502906764350000000000000000000808509999999999 091000012381356\n"
"9000001000010000000050290676435000000000000000000080850 "
)
def test_item_level_reject_origination():
(file_header,
batch_header,
entry_1,
entry_2,
entry_3,
entry_4,
entry_5,
batch_control,
file_control) = rejection_parser.parse_lines(
# Item-level reject
"101 09100001999999999990609141125A094101WELLS FARGO ABC CORP \n"
"5200ABC CORP 9999999999PPDPAYABLES 091509090915 1091000013841231\n"
"622507003908999999 0000010000 CUSTOMER ONE 0REJ060300000001\n"
"632091000019999999999 0000015000 CUSTOMER TWO 0091000014412012\n"
"6221210002489999999999 0000020000 CUSTOMER THREE 0091000014412013\n"
"6220910000199999999 0000012500 CUSTOMER FOUR 0091000014412014\n"
"62209100001999999 0000017500 CUSTOMER FIVE 0091000014412015\n"
"820000000509010041700000000000000000000650009999999999 009100013841231\n"
"900000100000100000050090100417000000000000000000065000 \n"
"9999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999"
)
def test_batch_level_reject_with_origination():
(file_header,
batch_header_1,
entry_1,
entry_2,
batch_control_1,
batch_header_2,
entry_3,
entry_4,
batch_control_2,
file_control) = rejection_parser.parse_lines(
# Batch-level reject
"101 09100001999999999990609141125A094101WELLS FARGO ABC CORP \n"
"5200ABC CORP 9999999999PPDPAYABLES 091509090915 1REJ055803841231\n"
"622507003908999999 0000010000 CUSTOMER ONE 0REJ055800000001\n"
"632091000019999999999 0000015000 CUSTOMER TWO 0REJ055804412012\n"
"820000002000598003910000000000000000000250009999999999 REJ055803841231\n"
"5200ABC CORP 9999999999PPDPAYROLL 091509090915 1091000013841231\n"
"6230910000199999999 0000012500 EMPLOYEE A 0009100014412014\n"
"62309100001999999 0000017500 EMPLOYEE B 0009100014412015\n"
"820000002000182000020000000000000000000300009999999999 009100013841231\n"
"900000200000100000040000780003930000000000000000055000 "
)
def test_file_level_reject_with_origination():
(file_header,
batch_header_1,
entry_1,
entry_2,
batch_control_1,
batch_header_2,
entry_3,
entry_4,
batch_control_2,
file_control) = rejection_parser.parse_lines(
# File-level reject
"101 09100001999999999990609141125A094101WELLS FARGO ABC CORP REJ00010 \n"
"5200ABC CORP 9999999999PPDPAYABLES 091509090915 1REJ000103841231\n"
"622507003908999999 0000010000 CUSTOMER ONE 0REJ000100000001\n"
"632091000019999999999 0000015000 CUSTOMER TWO 0REJ000104412012\n"
"820000002000598003910000000000000000000250009999999999 REJ000103841231\n"
"5200ABC CORP 9999999999PPDPAYROLL 091509090915 1REJ000103841231\n"
"6230910000199999999 0000012500 EMPLOYEE A 0REJ000104412014\n"
"62309100001999999 0000017500 EMPLOYEE B 0REJ000104412015\n"
"820000002000182000020000000000000000000300009999999999 REJ000103841231\n"
"900000200000100000040000780003930000000000000000055000 "
)
def test_micr_split_item_with_origination():
(file_header,
batch_header,
entry_1,
entry_2,
entry_3,
entry_4,
entry_5,
batch_control,
file_control) = rejection_parser.parse_lines(
# MICR-Split item
"101 09100001999999999990609141125A094101WELLS FARGO ABC CORP \n"
"5200ABC CORP 9999999999PPDPAYABLES 091509090915 1091000013841231\n"
"622507003908999999 0000010000 CUSTOMER ONE 0MICR60300000001\n"
"632091000019999999999 0000015000 CUSTOMER TWO 0091000014412012\n"
"6221210002489999999999 0000020000 CUSTOMER THREE 0091000014412013\n"
"6220910000199999999 0000012500 CUSTOMER FOUR 0091000014412014\n"
"62209100001999999 0000017500 CUSTOMER FIVE 0091000014412015\n"
"820000005009010041700000000000000000000650009999999999 009100013841231\n"
"900000100000100000050090100417000000000000000000065000 \n"
"9999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999"
)
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1
from google.api_core import grpc_helpers_async
from google.api_core import operations_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.deploy_v1.types import cloud_deploy
from google.longrunning import operations_pb2 # type: ignore
from .base import CloudDeployTransport, DEFAULT_CLIENT_INFO
from .grpc import CloudDeployGrpcTransport
class CloudDeployGrpcAsyncIOTransport(CloudDeployTransport):
"""gRPC AsyncIO backend transport for CloudDeploy.
CloudDeploy service creates and manages Continuous Delivery
operations on Google Cloud Platform via Skaffold
(https://skaffold.dev).
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "clouddeploy.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "clouddeploy.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsAsyncClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Quick check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsAsyncClient(
self.grpc_channel
)
# Return the client from cache.
return self._operations_client
@property
def list_delivery_pipelines(
self,
) -> Callable[
[cloud_deploy.ListDeliveryPipelinesRequest],
Awaitable[cloud_deploy.ListDeliveryPipelinesResponse],
]:
r"""Return a callable for the list delivery pipelines method over gRPC.
Lists DeliveryPipelines in a given project and
location.
Returns:
Callable[[~.ListDeliveryPipelinesRequest],
Awaitable[~.ListDeliveryPipelinesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_delivery_pipelines" not in self._stubs:
self._stubs["list_delivery_pipelines"] = self.grpc_channel.unary_unary(
"/google.cloud.deploy.v1.CloudDeploy/ListDeliveryPipelines",
request_serializer=cloud_deploy.ListDeliveryPipelinesRequest.serialize,
response_deserializer=cloud_deploy.ListDeliveryPipelinesResponse.deserialize,
)
return self._stubs["list_delivery_pipelines"]
@property
def get_delivery_pipeline(
self,
) -> Callable[
[cloud_deploy.GetDeliveryPipelineRequest],
Awaitable[cloud_deploy.DeliveryPipeline],
]:
r"""Return a callable for the get delivery pipeline method over gRPC.
Gets details of a single DeliveryPipeline.
Returns:
Callable[[~.GetDeliveryPipelineRequest],
Awaitable[~.DeliveryPipeline]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_delivery_pipeline" not in self._stubs:
self._stubs["get_delivery_pipeline"] = self.grpc_channel.unary_unary(
"/google.cloud.deploy.v1.CloudDeploy/GetDeliveryPipeline",
request_serializer=cloud_deploy.GetDeliveryPipelineRequest.serialize,
response_deserializer=cloud_deploy.DeliveryPipeline.deserialize,
)
return self._stubs["get_delivery_pipeline"]
@property
def create_delivery_pipeline(
self,
) -> Callable[
[cloud_deploy.CreateDeliveryPipelineRequest],
Awaitable[operations_pb2.Operation],
]:
r"""Return a callable for the create delivery pipeline method over gRPC.
Creates a new DeliveryPipeline in a given project and
location.
Returns:
Callable[[~.CreateDeliveryPipelineRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_delivery_pipeline" not in self._stubs:
self._stubs["create_delivery_pipeline"] = self.grpc_channel.unary_unary(
"/google.cloud.deploy.v1.CloudDeploy/CreateDeliveryPipeline",
request_serializer=cloud_deploy.CreateDeliveryPipelineRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_delivery_pipeline"]
@property
def update_delivery_pipeline(
self,
) -> Callable[
[cloud_deploy.UpdateDeliveryPipelineRequest],
Awaitable[operations_pb2.Operation],
]:
r"""Return a callable for the update delivery pipeline method over gRPC.
Updates the parameters of a single DeliveryPipeline.
Returns:
Callable[[~.UpdateDeliveryPipelineRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_delivery_pipeline" not in self._stubs:
self._stubs["update_delivery_pipeline"] = self.grpc_channel.unary_unary(
"/google.cloud.deploy.v1.CloudDeploy/UpdateDeliveryPipeline",
request_serializer=cloud_deploy.UpdateDeliveryPipelineRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["update_delivery_pipeline"]
@property
def delete_delivery_pipeline(
self,
) -> Callable[
[cloud_deploy.DeleteDeliveryPipelineRequest],
Awaitable[operations_pb2.Operation],
]:
r"""Return a callable for the delete delivery pipeline method over gRPC.
Deletes a single DeliveryPipeline.
Returns:
Callable[[~.DeleteDeliveryPipelineRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_delivery_pipeline" not in self._stubs:
self._stubs["delete_delivery_pipeline"] = self.grpc_channel.unary_unary(
"/google.cloud.deploy.v1.CloudDeploy/DeleteDeliveryPipeline",
request_serializer=cloud_deploy.DeleteDeliveryPipelineRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_delivery_pipeline"]
@property
def list_targets(
self,
) -> Callable[
[cloud_deploy.ListTargetsRequest], Awaitable[cloud_deploy.ListTargetsResponse]
]:
r"""Return a callable for the list targets method over gRPC.
Lists Targets in a given project and location.
Returns:
Callable[[~.ListTargetsRequest],
Awaitable[~.ListTargetsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_targets" not in self._stubs:
self._stubs["list_targets"] = self.grpc_channel.unary_unary(
"/google.cloud.deploy.v1.CloudDeploy/ListTargets",
request_serializer=cloud_deploy.ListTargetsRequest.serialize,
response_deserializer=cloud_deploy.ListTargetsResponse.deserialize,
)
return self._stubs["list_targets"]
@property
def get_target(
self,
) -> Callable[[cloud_deploy.GetTargetRequest], Awaitable[cloud_deploy.Target]]:
r"""Return a callable for the get target method over gRPC.
Gets details of a single Target.
Returns:
Callable[[~.GetTargetRequest],
Awaitable[~.Target]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_target" not in self._stubs:
self._stubs["get_target"] = self.grpc_channel.unary_unary(
"/google.cloud.deploy.v1.CloudDeploy/GetTarget",
request_serializer=cloud_deploy.GetTargetRequest.serialize,
response_deserializer=cloud_deploy.Target.deserialize,
)
return self._stubs["get_target"]
@property
def create_target(
self,
) -> Callable[
[cloud_deploy.CreateTargetRequest], Awaitable[operations_pb2.Operation]
]:
r"""Return a callable for the create target method over gRPC.
Creates a new Target in a given project and location.
Returns:
Callable[[~.CreateTargetRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_target" not in self._stubs:
self._stubs["create_target"] = self.grpc_channel.unary_unary(
"/google.cloud.deploy.v1.CloudDeploy/CreateTarget",
request_serializer=cloud_deploy.CreateTargetRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_target"]
@property
def update_target(
self,
) -> Callable[
[cloud_deploy.UpdateTargetRequest], Awaitable[operations_pb2.Operation]
]:
r"""Return a callable for the update target method over gRPC.
Updates the parameters of a single Target.
Returns:
Callable[[~.UpdateTargetRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_target" not in self._stubs:
self._stubs["update_target"] = self.grpc_channel.unary_unary(
"/google.cloud.deploy.v1.CloudDeploy/UpdateTarget",
request_serializer=cloud_deploy.UpdateTargetRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["update_target"]
@property
def delete_target(
self,
) -> Callable[
[cloud_deploy.DeleteTargetRequest], Awaitable[operations_pb2.Operation]
]:
r"""Return a callable for the delete target method over gRPC.
Deletes a single Target.
Returns:
Callable[[~.DeleteTargetRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_target" not in self._stubs:
self._stubs["delete_target"] = self.grpc_channel.unary_unary(
"/google.cloud.deploy.v1.CloudDeploy/DeleteTarget",
request_serializer=cloud_deploy.DeleteTargetRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_target"]
@property
def list_releases(
self,
) -> Callable[
[cloud_deploy.ListReleasesRequest], Awaitable[cloud_deploy.ListReleasesResponse]
]:
r"""Return a callable for the list releases method over gRPC.
Lists Releases in a given project and location.
Returns:
Callable[[~.ListReleasesRequest],
Awaitable[~.ListReleasesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_releases" not in self._stubs:
self._stubs["list_releases"] = self.grpc_channel.unary_unary(
"/google.cloud.deploy.v1.CloudDeploy/ListReleases",
request_serializer=cloud_deploy.ListReleasesRequest.serialize,
response_deserializer=cloud_deploy.ListReleasesResponse.deserialize,
)
return self._stubs["list_releases"]
@property
def get_release(
self,
) -> Callable[[cloud_deploy.GetReleaseRequest], Awaitable[cloud_deploy.Release]]:
r"""Return a callable for the get release method over gRPC.
Gets details of a single Release.
Returns:
Callable[[~.GetReleaseRequest],
Awaitable[~.Release]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_release" not in self._stubs:
self._stubs["get_release"] = self.grpc_channel.unary_unary(
"/google.cloud.deploy.v1.CloudDeploy/GetRelease",
request_serializer=cloud_deploy.GetReleaseRequest.serialize,
response_deserializer=cloud_deploy.Release.deserialize,
)
return self._stubs["get_release"]
@property
def create_release(
self,
) -> Callable[
[cloud_deploy.CreateReleaseRequest], Awaitable[operations_pb2.Operation]
]:
r"""Return a callable for the create release method over gRPC.
Creates a new Release in a given project and
location.
Returns:
Callable[[~.CreateReleaseRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_release" not in self._stubs:
self._stubs["create_release"] = self.grpc_channel.unary_unary(
"/google.cloud.deploy.v1.CloudDeploy/CreateRelease",
request_serializer=cloud_deploy.CreateReleaseRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_release"]
@property
def approve_rollout(
self,
) -> Callable[
[cloud_deploy.ApproveRolloutRequest],
Awaitable[cloud_deploy.ApproveRolloutResponse],
]:
r"""Return a callable for the approve rollout method over gRPC.
Approves a Rollout.
Returns:
Callable[[~.ApproveRolloutRequest],
Awaitable[~.ApproveRolloutResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "approve_rollout" not in self._stubs:
self._stubs["approve_rollout"] = self.grpc_channel.unary_unary(
"/google.cloud.deploy.v1.CloudDeploy/ApproveRollout",
request_serializer=cloud_deploy.ApproveRolloutRequest.serialize,
response_deserializer=cloud_deploy.ApproveRolloutResponse.deserialize,
)
return self._stubs["approve_rollout"]
@property
def list_rollouts(
self,
) -> Callable[
[cloud_deploy.ListRolloutsRequest], Awaitable[cloud_deploy.ListRolloutsResponse]
]:
r"""Return a callable for the list rollouts method over gRPC.
Lists Rollouts in a given project and location.
Returns:
Callable[[~.ListRolloutsRequest],
Awaitable[~.ListRolloutsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_rollouts" not in self._stubs:
self._stubs["list_rollouts"] = self.grpc_channel.unary_unary(
"/google.cloud.deploy.v1.CloudDeploy/ListRollouts",
request_serializer=cloud_deploy.ListRolloutsRequest.serialize,
response_deserializer=cloud_deploy.ListRolloutsResponse.deserialize,
)
return self._stubs["list_rollouts"]
@property
def get_rollout(
self,
) -> Callable[[cloud_deploy.GetRolloutRequest], Awaitable[cloud_deploy.Rollout]]:
r"""Return a callable for the get rollout method over gRPC.
Gets details of a single Rollout.
Returns:
Callable[[~.GetRolloutRequest],
Awaitable[~.Rollout]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_rollout" not in self._stubs:
self._stubs["get_rollout"] = self.grpc_channel.unary_unary(
"/google.cloud.deploy.v1.CloudDeploy/GetRollout",
request_serializer=cloud_deploy.GetRolloutRequest.serialize,
response_deserializer=cloud_deploy.Rollout.deserialize,
)
return self._stubs["get_rollout"]
@property
def create_rollout(
self,
) -> Callable[
[cloud_deploy.CreateRolloutRequest], Awaitable[operations_pb2.Operation]
]:
r"""Return a callable for the create rollout method over gRPC.
Creates a new Rollout in a given project and
location.
Returns:
Callable[[~.CreateRolloutRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_rollout" not in self._stubs:
self._stubs["create_rollout"] = self.grpc_channel.unary_unary(
"/google.cloud.deploy.v1.CloudDeploy/CreateRollout",
request_serializer=cloud_deploy.CreateRolloutRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_rollout"]
@property
def get_config(
self,
) -> Callable[[cloud_deploy.GetConfigRequest], Awaitable[cloud_deploy.Config]]:
r"""Return a callable for the get config method over gRPC.
Gets the configuration for a location.
Returns:
Callable[[~.GetConfigRequest],
Awaitable[~.Config]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_config" not in self._stubs:
self._stubs["get_config"] = self.grpc_channel.unary_unary(
"/google.cloud.deploy.v1.CloudDeploy/GetConfig",
request_serializer=cloud_deploy.GetConfigRequest.serialize,
response_deserializer=cloud_deploy.Config.deserialize,
)
return self._stubs["get_config"]
def close(self):
return self.grpc_channel.close()
__all__ = ("CloudDeployGrpcAsyncIOTransport",)
|
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file license.txt or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 11235
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
|
import shutil # Library For Work With File In High Level Like Copy
import webbrowser
from params import *
import socket
import requests
import re
import time
import sys
import urllib.request
import platform
import random
import datetime
from functools import reduce
import doctest
meta_input = ""
def sample_browser(sample_number=1,randomize=True):
'''
Choose a sample site and open it with browser
:param sample_number: choose which sample site default value is 1
:type sample_number:int
:return: None
'''
try:
response=input("Please enter [S] key if you want see a sample site of qpage (or any other key to ignore this step)")
if response.upper()=="S":
if randomize==True:
webbrowser.open(SAMPLE_SITE_LIST[random.randint(0,len(SAMPLE_SITE_LIST)-1)])
elif sample_number<len(SAMPLE_SITE_LIST):
webbrowser.open(SAMPLE_SITE_LIST[sample_number])
except Exception as e:
print("Error In Sample Browsing")
error_log(str(e))
def list_randomize(input_list):
'''
:param input_list: raw_input list
:type input_list:list
:return: randomized list
>>> random.seed(1)
>>> list_randomize([1,2,3,5,6])
[2, 1, 5, 3, 6]
'''
response=[]
input_list_copy=input_list
iteration_number=len(input_list_copy)
for i in range(iteration_number):
response.append(input_list_copy.pop(random.randint(0,len(input_list_copy)-1)))
return response
def email_at(text,USE_RE=False,replace_char=" at "):
'''
:param text: input text of pages
:param USE_RE: flag for using regular expression (default False)
:param replace_char: replace char for @
:type text:str
:type USE_RE:bool
:type replace_char:str
:return: replaced string
>>> email_at("[email protected]")
'example at yahoo.com'
>>>
'''
if USE_RE==False:
return text.replace("@",replace_char)
def show_items(enum_list):
"""
show item of enum_list
:param enum_list the list that should be shown
:type enum_list : list
"""
for i, item in enumerate(enum_list):
print(str(i + 1) + "-" + item)
def print_logo(external=False):
'''
print qpage logo by sequential characters
:param external: flag for choosing internal or external logo
:type external:bool
:return: None
>>> print_logo()
____ ___
/ __ \ / _ \___ ____ ____
/ /_/ / / ___/ _ `/ _ `/ -_)
\___\_\/_/ \_,_/\_, /\__/
/___/
'''
if external==True:
if "logo.txt" in os.listdir(RESOURCE_DIR):
logo_path = os.path.join(RESOURCE_DIR, 'logo.txt')
with open(logo_path, "r") as logo_file:
for line in logo_file:
print(line.rstrip())
else:
pass
else:
print(LOGO)
def convert_bytes(num):
"""
convert num to idiomatic byte unit
:param num: the input number.
:type num:int
:return: str
>>> convert_bytes(200)
'200.0 bytes'
>>> convert_bytes(6000)
'5.9 KB'
>>> convert_bytes(80000)
'78.1 KB'
"""
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if num < 1024.0:
return "%3.1f %s" % (num, x)
num /= 1024.0
def file_size():
"""
Print the size of output file
:return: None
>>> file_size() # if there is no output directory
Access Error
>>> file_size() # if there is a valid output directory
Used SPACE --> 78.1 KB
"""
try:
list_of_files = os.listdir(OUT_DIR)
response = 0
for file in list_of_files:
file_info = os.stat(os.path.join(OUT_DIR, file))
response += file_info.st_size
print_line(70, "*")
print("Used SPACE --> " + convert_bytes(response))
print_line(70, "*")
except:
print("Access Error!")
def download_badge(address):
"""
Download badge for website
:param address: the address that should get badge
:type address : str
:return: None
"""
r = requests.get(address, stream=True)
with open(os.path.join(OUT_DIR, "badge.svg"), 'wb') as f:
shutil.copyfileobj(r.raw, f)
def random_badge_color():
"""
return a random color for badge
:return: badge color as string
>>> random.seed(1)
>>> random_badge_color()
'yellowgreen'
"""
random_index = random.randint(0, len(BADGE_COLOR_LIST) - 1)
return BADGE_COLOR_LIST[random_index]
def system_details():
"""
Show detail of system that code is runnig on
:return: system details as string (node , processor , platform)
>>> system_details()
'DESKTOP-B16C9BR , Intel64 Family 6 Model 94 Stepping 3, GenuineIntel , Windows-10-10.0.10240-SP0'
"""
return platform.node() + " , " + platform.processor() + " , " + platform.platform()
def generation_time(time_1=None):
"""
Calculate the generation time
:param time_1: time that passed but not counted in generation time
:type time_1:float
:return :the amount of time that passed as float
"""
if time_1 is None:
return time.perf_counter()
else:
return time.perf_counter() - time_1
def find_global_ip():
"""
Find the global IP for using in API
:return: return the IP as string
"""
try:
response = requests.get(IP_FINDER_API)
return response.text[:-1]
except Exception as e:
error_log(e)
return "0.0.0.0"
def create_badge(subject="qpage", status=VERSION, color="blue", random=False):
'''
this function use shields.io template for creating badges
:param subject: badge subject
:param status: badge status ( in our case version)
:param color: badge color
:param random: randomization flag
:type subject:str
:type status:str
:type color:str
:type random:bool
:return: shields.io badge addresses as string
>>> create_badge()
'https://img.shields.io/badge/qpage-1.9-blue.svg'
>>> random.seed(1)
>>> create_badge(random=True)
'https://img.shields.io/badge/qpage-1.9-yellowgreen.svg'
'''
if random:
color = random_badge_color()
else:
if color not in BADGE_COLOR_LIST:
color = "orange"
badge_adr = ADV_BADGE_STATIC + subject + "-" + status + "-" + color + '.svg'
return badge_adr
def is_sample_downloaded():
"""
Check the sample site material is downloaded of not
:return : index of materials that downloaded as list
"""
download_list = []
if "profile.png" not in os.listdir(IMAGE_DIR):
download_list.append(0)
if "font.TTF" not in os.listdir(FONT_DIR):
download_list.append(1)
if "resume.pdf" not in os.listdir(DOC_DIR) and "resume.txt" not in os.listdir(DOC_DIR):
download_list.extend([2, 3])
if "icon.ico" not in os.listdir(IMAGE_DIR):
download_list.append(4)
return download_list
def download_lorem():
"""
Download the lorem file
:return: None
"""
if internet():
lorem_path = os.path.join(RESOURCE_DIR, 'Latin-Lipsum.txt')
urllib.request.urlretrieve("http://www.qpage.ir/sample/Latin-Lipsum.txt", lorem_path)
else:
print("Error In Download Lorem")
def read_lorem(char=100,external=False,randomize=True):
"""
find and read lorem
:param char: the amount of char that needed to print
:param external: flag for using external of internal resource for lorem_ipsum
:param randomize: flag for using randomization
:type char:int
:type external:bool
:type randomize:bool
:return : the lorem string
>>> read_lorem(5)
'Lorem ipsum dolor sit amet,'
"""
try:
if external==True:
if "Latin-Lipsum.txt" not in os.listdir(RESOURCE_DIR):
download_lorem()
lorem_path = os.path.join(RESOURCE_DIR, 'Latin-Lipsum.txt')
lorem_file = open(lorem_path, "r")
lorem_text = lorem_file.read()
lorem_file.close()
if randomize==True:
return " ".join(list_randomize(lorem_text.split(" ")[:char]))
else:
return " ".join(lorem_text.split(" ")[:char])
else:
if randomize==True:
return " ".join(list_randomize(LOREM_IPSUM.split(" ")[:char]))
else:
return " ".join(LOREM_IPSUM.split(" ")[:char])
except Exception as e:
error_log(str(e))
return None
def sample_site_download(item_list):
"""
Download sample material for make a fake site
:param item_list: Download items form item_list
:type item_list:list
"""
try:
if internet():
for i in item_list:
print("Downloading " + SAMPLE_DICT_MESSAGE[i] + " . . . [" + str(i + 1) + "/5]")
print_line(70)
urllib.request.urlretrieve(list(SAMPLE_DICT_ADDR.values())[i],
os.path.join(IMAGE_DIR, list(SAMPLE_DICT_ADDR.keys())[i]))
print("Done! All Material Downloaded")
print_line(70)
else:
print("Error In Internet Connection!")
print_line(70)
except Exception as e:
error_log(e)
print("Error in downloading sample files check your internet conection")
print_line(70)
def logger(status=False, perf_time=None):
"""
Create the build log of the app
:param status: show status of app.
:param perf_time : show the time passed for generate files
:type status:bool
:type perf_time:float
"""
if "log" not in os.listdir():
os.mkdir("log")
file = open(reduce(os.path.join, [os.getcwd(), "log", "build_log.txt"]), "a")
if not status:
file.write("Failed " + str(datetime.datetime.now()) + "\n")
else:
file.write("Success " + str(datetime.datetime.now()) + "\n")
file.write("Generation Time: " + str(perf_time) + "\n")
file.close()
def error_log(msg):
"""
Create the errorlog of the app
:param msg: error message
:type msg:str
"""
if "log" not in os.listdir():
os.mkdir("log")
file = open(reduce(os.path.join, [os.getcwd(), "log", "error_log.txt"]), "a")
file.write(str(datetime.datetime.now()) + " --> " + str(msg) + "\n")
file.close()
def print_line(number, char="-"):
"""
Print a Line
:param number: the amount char that in lien
:param char : the char that used to draw line
:type number :int
:type char : str
>>> print_line(4)
----
>>> print_line(5,"%")
%%%%%
"""
line = ""
i = 0
while (i < number):
line += char
i += 1
print(line)
def name_standard(name):
"""
return the Standard VERSION of the input word
:param name: the name that should be standard
:type name:str
:return name: the standard form of word as string
>>> name_standard('test')
'Test'
>>> name_standard('TesT')
'Test'
"""
reponse_name = name[0].upper() + name[1:].lower()
return reponse_name
def address_print():
"""
Print the working directory
:return:None
"""
print_line(70, "*")
print("Where --> " + SOURCE_DIR)
print_line(70, "*")
def create_folder():
"""
This Function Create Empty Folder At Begin
:return:folder status as boolean
"""
folder_flag = 0
list_of_folders = os.listdir(SOURCE_DIR)
for i in ["doc", "image", "output", "font"]:
if i not in list_of_folders:
os.mkdir(i)
folder_flag += 1
if i == "doc":
file = open(os.path.join(DOC_DIR, "index.txt"), "w")
if read_lorem() is None:
file.write("This is For First Page . . .")
else:
file.write(read_lorem())
file.close()
return bool(folder_flag)
def page_name_update():
"""
This Function Update Page Names
:return: None
"""
for i in os.listdir(DOC_DIR):
if i.find(".txt") != -1 and i[:-4].upper() != "INDEX":
ACTUAL_NAME.append(i[:-4])
PAGE_NAME.append(i[:-4])
def menu_maker():
"""
Top Menu Maker In each html page
:return:site menu as string
"""
result = "<center>"
for i, item in enumerate(PAGE_NAME):
if item == "Home":
targets_blank = ""
else:
#targets_blank = 'target="blank"'
targets_blank = ''
# Hyper Link To Each Page In HTML File
result += '\t<a href="' \
+ ACTUAL_NAME[i] + '.html"' + targets_blank + '>' + name_standard(item) + "</a>\n"
result += " \n"
result += "</center>"
result = result + "\t\t" + BREAK_LINE # Add Break line to End Of The Menu
return result # Return All Of The Menu
def menu_writer(): #
"""
Write menu_maker output in html and close file after
:return:None
"""
message = menu_maker()
PAGE_NAME_length = len(PAGE_NAME)
for i in range(PAGE_NAME_length):
file = open(os.path.join(OUT_DIR, ACTUAL_NAME[i] + ".html"), "a")
file.write(message)
file.close()
def print_meta():
"""
Add meta to html files
:return static_meta: The meta that created
"""
global meta_input
meta_input = input("Please Enter Your Name : ")
static_meta = '<meta name="description" content="Welcome to HOMEPAGE of ' + meta_input + '"/>\n'
static_meta += '<meta property="og:title" content="' + meta_input + '"/>\n'
static_meta += '<meta property="og:site_name" content="' + meta_input + '"/>\n'
static_meta += '<meta property="og:image" content="favicon.ico" />\n'
if len(meta_input) < 4:
warnings.append("[Warning] Your input for name is too short!!")
return static_meta
def html_init(name):
"""
Create Initial Form Of each Html Page Like Title And HTML And Body Tag.
:param name: the name of html file.
:type name:str
"""
html_name = os.path.join(OUT_DIR, name + ".html")
file = open(html_name, "w")
file.write("<html>\n")
file.write("\t<head>\n")
if name == "index":
file.write("\t\t<title>Welcome To My HOMEPAGE</title>\n")
else:
file.write("\t\t<title>" + name_standard(name) + "</title>\n")
file.write('<link rel="stylesheet" href="styles.css" type="text/css"/>\n')
css_link = 'https://maxcdn.bootstrapcdn.com/bootstrap/3.3.6/css/bootstrap.min.css'
file.write('<link rel="stylesheet" href= ' + css_link + ' type="text/style"/>\n')
if name == 'index': # Add meta only for index page
file.write(print_meta())
file.write("\t</head>\n")
file.write('\t<body class="body_tag">\n')
file.close()
def html_end(name):
"""
Create End Of The Html and close file
:param name: The name of html file.
:type name:str
"""
html_name = os.path.join(OUT_DIR, name + ".html")
file = open(html_name, "a")
file.write("\n\t</body>\n")
file.write("</html>")
file.close()
def close_files():
"""
Close all the files.
:return:None
"""
for i in files:
if i.closed == False:
i.close()
def LSM_translate(line, center):
# TODO : write a document for this function
"""
Convert size and style of each line in input plaintext
:param line: the input line.
:param center: flag of putting text in center
:type center:bool
:type line:str
:return : return a list contain text,header_end and header_begin
"""
line.strip()
text = line
header_start = '<h4 class="color_tag">'
header_end = "</h4>"
if line.find("[L]") != -1:
header_start = '<h2 class="color_tag">'
header_end = "</h2>"
text = line[3:]
elif line.find("[S]") != -1:
header_start = '<h5 class="color_tag">'
header_end = "</h5>"
text = line[3:]
elif line.find("[M]") != -1:
text = line[3:]
if center: # Centuries Text If Condition Is True For Manual Centering
header_start = "<center>" + header_start
header_end += "</center>"
if text.find("[center]") != -1: # Find Center Tag In Each Line
header_start = "<center>" + header_start
header_end += "</center>"
text = text[:text.find("[center]")]
return [text, header_end, header_start]
def print_text(text_file, file, center=False, close=False): # Write Text Part Of Each Page
"""
Write the text part of each page
:param text_file: Text that should be written.
:param file : The file that text will be written inside.
:param center: flag of putting text in center
:param close : flag of closing file after editing
:type close : bool
:type center: bool
:type file:_io.TextIOWrapper
:type text_file:str
:return:None
"""
text_code = ""
for line in text_file:
if len(line) == 1:
text_code = SPACE
else:
text_header = LSM_translate(line, center)
text = email_at(text=text_header[0])
header_end = text_header[1]
header_start = text_header[2]
text_code = header_start + text + header_end + "\n"
file.write(text_code)
if close:
file.close()
def print_image(file, image_format="jpg", close=False):
"""
Write Image Part OF The Page.
:param file: The file that images will be added.
:param close : flag of closing file after editing
:param image_format: the format of image
:type close : bool
:type image_format:str
:type file:_io.TextIOWrapper
:return:None
"""
for i, item in enumerate(SIZE_BOX):
print(i, "-", item)
image_size = int(input("Please Enter Profile Image Size : ")) # Choose Profile Image Size
image_size_string = SIZE_BOX[2] # Getting Html String From SIZE_BOX list default mode (Medium)
if 0 <= image_size < len(SIZE_BOX):
image_size_string = SIZE_BOX[image_size]
image_code = '<center><img src="image.' + image_format + '"' + ', width=' + image_size_string + ' alt="profile image"></img></center>\n'
file.write(image_code)
if close:
file.close()
def print_download(file, name, link, center=False, close=False):
"""
Create Download Link in page
:param file: The file that contain html of page.
:param name: The name of the link
:param link: The place that name is Linked
:param center: put the text in center
:param close : close file after done editing
:type center: bool
:type close : bool
:type link:str
:type name:str
:type file:_io.TextIOWrapper
:return:None
"""
link_code = "<a href=" + '"' + link + '"' + TARGET_BLANK + '>' + name + "</a>"
if center:
link_code = "<center>" + link_code + "</center>"
file.write(link_code + "\n")
file.write(BREAK_LINE)
if close:
file.close()
def print_adv(file, close=True):
"""
Print the advertisement (qpage footer)
:param file : The file that should adv to it.
:param close : Close file after add
:type file:_io.TextIOWrapper
:type close:bool
:return: None
"""
file.write(BREAK_LINE)
file.write(
'<center>' + "<p>" + "Generated " + today_time + " By" + "</p>" + '<a href=' + '"' + HOMEPAGE + '"' + TARGET_BLANK + '>' + '<img src="' + create_badge(
random=True) + '"alt="Qpage">' + '</a> </center>')
if close:
file.close()
def build_index(file):
"""
Find and build index page
:param file: The index file.
:type file:_io.TextIOWrapper
:return:None
"""
image_name = ""
img_format = "jpg"
file_of_images = os.listdir(IMAGE_DIR)
for i in file_of_images:
for form in IMFORMAT_BOX:
if i.find("." + form) != -1:
image_name = os.path.join(IMAGE_DIR, i)
img_format = form
global IMAGE_COUNTER
IMAGE_COUNTER = 1
break
shutil.copyfile(image_name, os.path.join(OUT_DIR, "image." + img_format))
print_image(file, img_format)
def build_resume(file):
"""
Find and build resume page.
:param file: The resume file.
:type file:_io.TextIOWrapper
:return:None
"""
resume_name = ""
file_of_docs = os.listdir(DOC_DIR)
for i in file_of_docs:
if i.find(".pdf") != -1:
resume_name = os.path.join(DOC_DIR, i)
global PDF_COUNTER
PDF_COUNTER = 1
break
shutil.copyfile(resume_name, os.path.join(OUT_DIR, "Resume.pdf"))
print_download(file, "Download Full Version", "Resume.pdf", center=True)
def contain(name):
"""
Main function that open each page HTML file and call other function to write data in it
:param name: the name of the file that should be written
:type name:str
:return:None
"""
#
file = open(os.path.join(OUT_DIR, name + ".html"), "a")
text_file = open(os.path.join(DOC_DIR, name + ".txt"), "r")
files.append(file)
files.append(text_file)
if name.upper() == "INDEX":
build_index(file)
elif name.upper() == "RESUME":
build_resume(file)
print_text(text_file, file)
if name.upper() == "INDEX":
print_adv(file)
def clear_folder(path):
"""
This function get path of folder and delete its contains
:param path: the path that gonna be deleted.
:type path:str
:return: None
"""
if os.path.exists(path):
list_of_files = os.listdir(path)
for file in list_of_files:
os.remove(os.path.join(path, file))
else:
os.mkdir(path)
def print_warning():
"""
Print warnings!
:return:None
"""
print(str(len(warnings)) + " Warning , 0 Error")
show_items(warnings)
def get_color_code():
"""
Ask for selecting color of text and background
:return list: background and text color
>>> get_color_code()
0 - White
1 - Black
2 - Purple
3 - Yellow
4 - Orange
5 - Green
6 - Blue
Please enter your background color : 1
Please enter your text color : 2
[1, 2]
"""
for i, item in enumerate(COLOR_BOX):
print(i, "-", item)
back_color_code = int(input("Please enter your background color : "))
if back_color_code not in range(7):
back_color_code = 0
text_color_code = int(input("Please enter your text color : "))
if text_color_code not in range(7):
text_color_code = 1
return [back_color_code, text_color_code]
def color_code_map():
"""
Check and insert colors that is chosen.
:return list: background and text color
"""
[back_color_code, text_color_code] = get_color_code()
if text_color_code == back_color_code:
warnings.append(WARNING_DICT["color_warning"] + " Your text color and background color are same!!")
background_color = COLOR_BOX[back_color_code] # convert code to color string in COLOR_BOX
text_color = COLOR_BOX[text_color_code] # convert code to color string in COLOR_BOX
return [background_color, text_color]
def css_font(font_folder):
"""
Search and file all fonts.
:param font_folder: the folder to search.
:type font_folder:list
:return list : font_flag and the current format
"""
font_flag = 0 # 0 If there is no font file in font_folder
current_FONT_FORMAT = None
for i in font_folder:
for j in FONT_FORMAT: # search for other font format in font box
if i.lower().find(j) != -1: # If there is a font in font folder
shutil.copyfile(os.path.join(FONT_DIR, i),
os.path.join(OUT_DIR, "qpage" + j)) # copy font file to output folder
font_flag = 1 # Turn Flag On
current_FONT_FORMAT = j # font format of current selected font for css editing
return [font_flag, current_FONT_FORMAT]
def font_creator(css_file, font_section):
"""
Ask and Select font.
:param css_file: the file that font css will be added to.
:param font_section: the font section of css file
:type css_file:_io.TextIOWrapper
:type font_section:str
:return font_section: the font section of css after edit as string
"""
font_folder = os.listdir(FONT_DIR)
details = css_font(font_folder)
current_FONT_FORMAT = details[1]
font_flag = details[0]
if font_flag == 1: # check flag if it is 1
css_file.write(
"@font-face{\nfont-family:qpagefont;\nsrc:url(qpage"
+ current_FONT_FORMAT
+ ");\n}\n") # Write font-face in html
font_section = "font-family:qpagefont;\n" # Update Font Section For Body Tag
for i, item in enumerate(FONTSTYLE_BOX):
print(i, "-", item)
font_style = int(input(" Please choose your font style "))
if font_style < len(FONTSTYLE_BOX):
font_style = FONTSTYLE_BOX[font_style]
else:
font_style = "normal"
font_section = font_section + "font-style:" + font_style + ";\n"
else:
warnings.append(WARNING_DICT["font_warning"] + " There is no specific font set for this website!!")
return font_section
def css_creator():
"""
Ask For background and text color in and make css base
:return:None
"""
font_section = 'font-family : Georgia , serif;\n'
colors = color_code_map()
background_color = colors[0]
text_color = colors[1]
css_file = open(os.path.join(OUT_DIR, "styles.css"), "w") # open css file
font_section = font_creator(css_file, font_section)
css_file.write(
".body_tag{\n"
+ "background-color:"
+ background_color
+ ";\n"
+ font_section
+ CSS_MARGIN
+ CSS_ANIMATION_1
+ "}\n") # write body tag
css_file.write(".color_tag{\n" + "color:" + text_color + ";\n}") # write color_tag in css
css_file.write(CSS_ANIMATION_2)
css_file.close() # close css file
def preview():
"""
Preview website in browser
:return:None
"""
# TODO: not working on unix
webbrowser.open(os.path.join(OUT_DIR, "index.html"))
def error_finder():
"""
Check and find error that display it
:return : error and pass vector as list
"""
error_vector = []
pass_vector = []
PDF_COUNTER = 0
# image_list = os.listdir(IMAGE_DIR)
doc_list = os.listdir(DOC_DIR)
if IMAGE_COUNTER == 1:
pass_vector.append("[Pass] Your profile image in OK!!")
else:
error_vector.append(ERROR_DICT["image_error"] + " Your profile image is not in correct format")
if len(doc_list) == 0:
error_vector.append(ERROR_DICT["empty_error"] + "There is no file in doc folder ( index.txt and .pdf file in "
"necessary)")
else:
if "index.txt" in doc_list:
pass_vector.append("[Pass] index.txt file OK!")
else:
error_vector.append(ERROR_DICT["firstpage_error"] + " index.txt is not in doc folder!")
if PDF_COUNTER == 0:
error_vector.append(ERROR_DICT["resume_error"] + "[Error] Where Is Your Resume File? It should be in doc "
"folder")
else:
pass_vector.append("[Pass] Your Resume File is OK!!")
return [error_vector, pass_vector]
def icon_creator():
"""
Find .ico file and use it as favicon of website.
:return:None
"""
icon_flag = 0
for file in os.listdir(IMAGE_DIR):
if file.endswith('ico'):
shutil.copy(os.path.join(IMAGE_DIR, file), OUT_DIR)
os.rename(os.path.join(OUT_DIR, file), os.path.join(OUT_DIR, 'favicon.ico'))
icon_flag = 1
break
if icon_flag == 0:
if "favicon.ico" in os.listdir(SOURCE_DIR):
shutil.copy(os.path.join(SOURCE_DIR, "favicon.ico"), OUT_DIR)
warnings.append(WARNING_DICT["icon_warning"] + " There is no icon for this website")
def robot_maker():
"""
Create Robots.txt for pages
:return:None
"""
robots = open(os.path.join(OUT_DIR, "robots.txt"), "w")
robots.write("User-agent: *\n")
robots.write("Disallow: ")
robots.close()
def internet(host="8.8.8.8", port=53, timeout=3):
"""
Check Internet Connections.
:param host: the host that check connection to
:param port: port that check connection with
:param timeout: times that check the connnection
:type host:str
:type port:int
:type timeout:int
:return bool: True if Connection is Stable
>>> internet() # if there is stable internet connection
True
>>> internet() # if there is no stable internet connection
False
"""
try:
socket.setdefaulttimeout(timeout)
socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect((host, port))
return True
except Exception as ex:
error_log(str(ex))
return False
def server():
"""
Get Server response.
:return:None
>>> server()
Installed Saved!
"""
# global meta_input
headers = {'content-type': 'application/json', "NAME": meta_input, "VERSION": VERSION, "SYSTEM": system_details(),
"IP": find_global_ip()}
try:
response = requests.get(SERVER_API, headers=headers)
if response.status_code == 200:
print("Installed Saved!")
except Exception as e:
error_log(str(e))
def version_control():
"""
Check and update version status
:return:None
"""
try:
# print("Check for new VERSION . . .")
# print_line(70)
VERSION_pattern = r"last_VERSION:(.+)"
if internet():
response = requests.get("http://www.qpage.ir/releases.html")
body = response.text
last_VERSION = float(re.findall(VERSION_pattern, body)[0][:-3])
if last_VERSION > float(VERSION):
print_line(70)
print("**New VERSION Of Qpage Is Available Now (VERSION " + str(last_VERSION) + ")**")
print("Download Link -->" + "https://github.com/sepandhaghighi/qpage/archive/v" + str(
last_VERSION) + ".zip")
print_line(70)
else:
# TODO : fix VERSION control else
pass
# print("Already Updated!!!")
# print_line(70)
except Exception as e:
error_log(str(e))
pass
def enter_to_exit():
"""
Quit Project by pressing a key.
:return:None
"""
print_line(70, "*")
response = input("Enter [R] for restart Qpage and any other key to exit : ")
if response.upper() != "R":
sys.exit()
def wait_func(iteration=2):
"""
Wait for-in range Iteration.
:param iteration: the amount of wait.
:type iteration:int
:return:None
>>> wait_func(4)
.
.
.
.
>>> wait_func()
.
.
"""
for _ in range(iteration):
time.sleep(1)
print(".")
if __name__=="__main__":
doctest.testmod()
|
|
import pytest
import numpy as np
import scipy.sparse as sp
from sklearn.base import clone
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import ignore_warnings
from sklearn.utils.stats import _weighted_percentile
from sklearn.dummy import DummyClassifier, DummyRegressor
from sklearn.exceptions import NotFittedError
@ignore_warnings
def _check_predict_proba(clf, X, y):
proba = clf.predict_proba(X)
# We know that we can have division by zero
log_proba = clf.predict_log_proba(X)
y = np.atleast_1d(y)
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
n_outputs = y.shape[1]
n_samples = len(X)
if n_outputs == 1:
proba = [proba]
log_proba = [log_proba]
for k in range(n_outputs):
assert proba[k].shape[0] == n_samples
assert proba[k].shape[1] == len(np.unique(y[:, k]))
assert_array_almost_equal(proba[k].sum(axis=1), np.ones(len(X)))
# We know that we can have division by zero
assert_array_almost_equal(np.log(proba[k]), log_proba[k])
def _check_behavior_2d(clf):
# 1d case
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([1, 2, 1, 1])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert y.shape == y_pred.shape
# 2d case
y = np.array([[1, 0], [2, 0], [1, 0], [1, 3]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert y.shape == y_pred.shape
def _check_behavior_2d_for_constant(clf):
# 2d case only
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([[1, 0, 5, 4, 3], [2, 0, 1, 2, 5], [1, 0, 4, 5, 2], [1, 3, 3, 2, 0]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert y.shape == y_pred.shape
def _check_equality_regressor(statistic, y_learn, y_pred_learn, y_test, y_pred_test):
assert_array_almost_equal(np.tile(statistic, (y_learn.shape[0], 1)), y_pred_learn)
assert_array_almost_equal(np.tile(statistic, (y_test.shape[0], 1)), y_pred_test)
def test_most_frequent_and_prior_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [1, 2, 1, 1]
for strategy in ("most_frequent", "prior"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
if strategy == "prior":
assert_array_almost_equal(
clf.predict_proba([X[0]]), clf.class_prior_.reshape((1, -1))
)
else:
assert_array_almost_equal(
clf.predict_proba([X[0]]), clf.class_prior_.reshape((1, -1)) > 0.5
)
def test_most_frequent_and_prior_strategy_with_2d_column_y():
# non-regression test added in
# https://github.com/scikit-learn/scikit-learn/pull/13545
X = [[0], [0], [0], [0]]
y_1d = [1, 2, 1, 1]
y_2d = [[1], [2], [1], [1]]
for strategy in ("most_frequent", "prior"):
clf_1d = DummyClassifier(strategy=strategy, random_state=0)
clf_2d = DummyClassifier(strategy=strategy, random_state=0)
clf_1d.fit(X, y_1d)
clf_2d.fit(X, y_2d)
assert_array_equal(clf_1d.predict(X), clf_2d.predict(X))
def test_most_frequent_and_prior_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[1, 0], [2, 0], [1, 0], [1, 3]])
n_samples = len(X)
for strategy in ("prior", "most_frequent"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
assert_array_equal(
clf.predict(X),
np.hstack([np.ones((n_samples, 1)), np.zeros((n_samples, 1))]),
)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_stratified_strategy():
X = [[0]] * 5 # ignored
y = [1, 2, 1, 1, 2]
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 3.0 / 5, decimal=1)
assert_almost_equal(p[2], 2.0 / 5, decimal=1)
_check_predict_proba(clf, X, y)
def test_stratified_strategy_multioutput():
X = [[0]] * 5 # ignored
y = np.array([[2, 1], [2, 2], [1, 1], [1, 2], [1, 1]])
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3.0 / 5, decimal=1)
assert_almost_equal(p[2], 2.0 / 5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_uniform_strategy():
X = [[0]] * 4 # ignored
y = [1, 2, 1, 1]
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
def test_uniform_strategy_multioutput():
X = [[0]] * 4 # ignored
y = np.array([[2, 1], [2, 2], [1, 2], [1, 1]])
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_string_labels():
X = [[0]] * 5
y = ["paris", "paris", "tokyo", "amsterdam", "berlin"]
clf = DummyClassifier(strategy="most_frequent")
clf.fit(X, y)
assert_array_equal(clf.predict(X), ["paris"] * 5)
@pytest.mark.parametrize(
"y,y_test",
[
([2, 1, 1, 1], [2, 2, 1, 1]),
(
np.array([[2, 2], [1, 1], [1, 1], [1, 1]]),
np.array([[2, 2], [2, 2], [1, 1], [1, 1]]),
),
],
)
def test_classifier_score_with_None(y, y_test):
clf = DummyClassifier(strategy="most_frequent")
clf.fit(None, y)
assert clf.score(None, y_test) == 0.5
@pytest.mark.parametrize(
"strategy", ["stratified", "most_frequent", "prior", "uniform", "constant"]
)
def test_classifier_prediction_independent_of_X(strategy):
y = [0, 2, 1, 1]
X1 = [[0]] * 4
clf1 = DummyClassifier(strategy=strategy, random_state=0, constant=0)
clf1.fit(X1, y)
predictions1 = clf1.predict(X1)
X2 = [[1]] * 4
clf2 = DummyClassifier(strategy=strategy, random_state=0, constant=0)
clf2.fit(X2, y)
predictions2 = clf2.predict(X2)
assert_array_equal(predictions1, predictions2)
def test_classifier_exceptions():
clf = DummyClassifier(strategy="unknown")
with pytest.raises(ValueError):
clf.fit([], [])
with pytest.raises(NotFittedError):
clf.predict([])
with pytest.raises(NotFittedError):
clf.predict_proba([])
def test_mean_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 4 # ignored
y = random_state.randn(4)
reg = DummyRegressor()
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.mean(y)] * len(X))
def test_mean_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
mean = np.mean(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor()
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(mean, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_regressor_exceptions():
reg = DummyRegressor()
with pytest.raises(NotFittedError):
reg.predict([])
def test_median_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="median")
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
def test_median_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="median")
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="quantile", quantile=0.5)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.min(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=1)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.max(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0.3)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.percentile(y, q=30)] * len(X))
def test_quantile_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
quantile_values = np.percentile(y_learn, axis=0, q=80).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.5)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.8)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
quantile_values, y_learn, y_pred_learn, y_test, y_pred_test
)
_check_behavior_2d(est)
def test_quantile_invalid():
X = [[0]] * 5 # ignored
y = [0] * 5 # ignored
est = DummyRegressor(strategy="quantile")
with pytest.raises(ValueError):
est.fit(X, y)
est = DummyRegressor(strategy="quantile", quantile=None)
with pytest.raises(ValueError):
est.fit(X, y)
est = DummyRegressor(strategy="quantile", quantile=[0])
with pytest.raises(ValueError):
est.fit(X, y)
est = DummyRegressor(strategy="quantile", quantile=-0.1)
with pytest.raises(ValueError):
est.fit(X, y)
est = DummyRegressor(strategy="quantile", quantile=1.1)
with pytest.raises(ValueError):
est.fit(X, y)
est = DummyRegressor(strategy="quantile", quantile="abc")
with pytest.raises(TypeError):
est.fit(X, y)
def test_quantile_strategy_empty_train():
est = DummyRegressor(strategy="quantile", quantile=0.4)
with pytest.raises(ValueError):
est.fit([], [])
def test_constant_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="constant", constant=[43])
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
reg = DummyRegressor(strategy="constant", constant=43)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
def test_constant_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
# test with 2d array
constants = random_state.randn(5)
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="constant", constant=constants)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(constants, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d_for_constant(est)
def test_y_mean_attribute_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
# when strategy = 'mean'
est = DummyRegressor(strategy="mean")
est.fit(X, y)
assert est.constant_ == np.mean(y)
def test_unknown_strategey_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy="gona")
with pytest.raises(ValueError):
est.fit(X, y)
def test_constants_not_specified_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy="constant")
with pytest.raises(TypeError):
est.fit(X, y)
def test_constant_size_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X = random_state.randn(10, 10)
y = random_state.randn(10, 5)
est = DummyRegressor(strategy="constant", constant=[1, 2, 3, 4])
with pytest.raises(ValueError):
est.fit(X, y)
def test_constant_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0, constant=1)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
X = [[0], [0], [0], [0]] # ignored
y = ["two", "one", "two", "two"]
clf = DummyClassifier(strategy="constant", random_state=0, constant="one")
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.array(["one"] * 4))
_check_predict_proba(clf, X, y)
def test_constant_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[2, 3], [1, 3], [2, 3], [2, 0]])
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0, constant=[1, 0])
clf.fit(X, y)
assert_array_equal(
clf.predict(X), np.hstack([np.ones((n_samples, 1)), np.zeros((n_samples, 1))])
)
_check_predict_proba(clf, X, y)
@pytest.mark.parametrize(
"y, params, err_msg",
[
([2, 1, 2, 2], {"random_state": 0}, "Constant.*has to be specified"),
([2, 1, 2, 2], {"constant": [2, 0]}, "Constant.*should have shape"),
(
np.transpose([[2, 1, 2, 2], [2, 1, 2, 2]]),
{"constant": 2},
"Constant.*should have shape",
),
(
[2, 1, 2, 2],
{"constant": "my-constant"},
"constant=my-constant.*Possible values.*\\[1, 2]",
),
(
np.transpose([[2, 1, 2, 2], [2, 1, 2, 2]]),
{"constant": [2, "unknown"]},
"constant=\\[2, 'unknown'].*Possible values.*\\[1, 2]",
),
],
ids=[
"no-constant",
"too-many-constant",
"not-enough-output",
"single-output",
"multi-output",
],
)
def test_constant_strategy_exceptions(y, params, err_msg):
X = [[0], [0], [0], [0]]
clf = DummyClassifier(strategy="constant", **params)
with pytest.raises(ValueError, match=err_msg):
clf.fit(X, y)
def test_classification_sample_weight():
X = [[0], [0], [1]]
y = [0, 1, 0]
sample_weight = [0.1, 1.0, 0.1]
clf = DummyClassifier(strategy="stratified").fit(X, y, sample_weight)
assert_array_almost_equal(clf.class_prior_, [0.2 / 1.2, 1.0 / 1.2])
def test_constant_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[0, 1], [4, 0], [1, 1], [1, 4], [1, 1]]))
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0, constant=[1, 0])
clf.fit(X, y)
y_pred = clf.predict(X)
assert sp.issparse(y_pred)
assert_array_equal(
y_pred.toarray(), np.hstack([np.ones((n_samples, 1)), np.zeros((n_samples, 1))])
)
def test_uniform_strategy_sparse_target_warning():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[2, 1], [2, 2], [1, 4], [4, 2], [1, 1]]))
clf = DummyClassifier(strategy="uniform", random_state=0)
with pytest.warns(UserWarning, match="the uniform strategy would not save memory"):
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 1 / 3, decimal=1)
assert_almost_equal(p[2], 1 / 3, decimal=1)
assert_almost_equal(p[4], 1 / 3, decimal=1)
def test_stratified_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[4, 1], [0, 0], [1, 1], [1, 4], [1, 1]]))
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
assert sp.issparse(y_pred)
y_pred = y_pred.toarray()
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3.0 / 5, decimal=1)
assert_almost_equal(p[0], 1.0 / 5, decimal=1)
assert_almost_equal(p[4], 1.0 / 5, decimal=1)
def test_most_frequent_and_prior_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[1, 0], [1, 3], [4, 0], [0, 1], [1, 0]]))
n_samples = len(X)
y_expected = np.hstack([np.ones((n_samples, 1)), np.zeros((n_samples, 1))])
for strategy in ("most_frequent", "prior"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
y_pred = clf.predict(X)
assert sp.issparse(y_pred)
assert_array_equal(y_pred.toarray(), y_expected)
def test_dummy_regressor_sample_weight(n_samples=10):
random_state = np.random.RandomState(seed=1)
X = [[0]] * n_samples
y = random_state.rand(n_samples)
sample_weight = random_state.rand(n_samples)
est = DummyRegressor(strategy="mean").fit(X, y, sample_weight)
assert est.constant_ == np.average(y, weights=sample_weight)
est = DummyRegressor(strategy="median").fit(X, y, sample_weight)
assert est.constant_ == _weighted_percentile(y, sample_weight, 50.0)
est = DummyRegressor(strategy="quantile", quantile=0.95).fit(X, y, sample_weight)
assert est.constant_ == _weighted_percentile(y, sample_weight, 95.0)
def test_dummy_regressor_on_3D_array():
X = np.array([[["foo"]], [["bar"]], [["baz"]]])
y = np.array([2, 2, 2])
y_expected = np.array([2, 2, 2])
cls = DummyRegressor()
cls.fit(X, y)
y_pred = cls.predict(X)
assert_array_equal(y_pred, y_expected)
def test_dummy_classifier_on_3D_array():
X = np.array([[["foo"]], [["bar"]], [["baz"]]])
y = [2, 2, 2]
y_expected = [2, 2, 2]
y_proba_expected = [[1], [1], [1]]
cls = DummyClassifier(strategy="stratified")
cls.fit(X, y)
y_pred = cls.predict(X)
y_pred_proba = cls.predict_proba(X)
assert_array_equal(y_pred, y_expected)
assert_array_equal(y_pred_proba, y_proba_expected)
def test_dummy_regressor_return_std():
X = [[0]] * 3 # ignored
y = np.array([2, 2, 2])
y_std_expected = np.array([0, 0, 0])
cls = DummyRegressor()
cls.fit(X, y)
y_pred_list = cls.predict(X, return_std=True)
# there should be two elements when return_std is True
assert len(y_pred_list) == 2
# the second element should be all zeros
assert_array_equal(y_pred_list[1], y_std_expected)
@pytest.mark.parametrize(
"y,y_test",
[
([1, 1, 1, 2], [1.25] * 4),
(np.array([[2, 2], [1, 1], [1, 1], [1, 1]]), [[1.25, 1.25]] * 4),
],
)
def test_regressor_score_with_None(y, y_test):
reg = DummyRegressor()
reg.fit(None, y)
assert reg.score(None, y_test) == 1.0
@pytest.mark.parametrize("strategy", ["mean", "median", "quantile", "constant"])
def test_regressor_prediction_independent_of_X(strategy):
y = [0, 2, 1, 1]
X1 = [[0]] * 4
reg1 = DummyRegressor(strategy=strategy, constant=0, quantile=0.7)
reg1.fit(X1, y)
predictions1 = reg1.predict(X1)
X2 = [[1]] * 4
reg2 = DummyRegressor(strategy=strategy, constant=0, quantile=0.7)
reg2.fit(X2, y)
predictions2 = reg2.predict(X2)
assert_array_equal(predictions1, predictions2)
@pytest.mark.parametrize(
"strategy", ["stratified", "most_frequent", "prior", "uniform", "constant"]
)
def test_dtype_of_classifier_probas(strategy):
y = [0, 2, 1, 1]
X = np.zeros(4)
model = DummyClassifier(strategy=strategy, random_state=0, constant=0)
probas = model.fit(X, y).predict_proba(X)
assert probas.dtype == np.float64
# TODO: remove in 1.2
@pytest.mark.filterwarnings("ignore:`n_features_in_` is deprecated")
@pytest.mark.parametrize("Dummy", (DummyRegressor, DummyClassifier))
def test_n_features_in_(Dummy):
X = [[1, 2]]
y = [0]
d = Dummy()
assert not hasattr(d, "n_features_in_")
d.fit(X, y)
with pytest.warns(FutureWarning, match="`n_features_in_` is deprecated"):
n_features_in = d.n_features_in_
assert n_features_in is None
|
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import logging
import sys
import torch
from dataclasses import dataclass, field
from typing import List, Optional, Tuple
from fairseq.data import Dictionary
from fairseq.data.codedataset import ExpressiveCodeDataConfig, CodeDataset
from fairseq.dataclass.configs import FairseqDataclass
from fairseq.tasks import register_task
from fairseq.tasks.fairseq_task import FairseqTask
from omegaconf import MISSING, DictConfig
logger = logging.getLogger(__name__)
class UnitDictionary(Dictionary):
"""
A fixed-sized Dictionary that operates on integer-valued tokens
wth a trivial (identity) token <-> id mapping.
Special symbols (bos, eos, ...) have ids above n_units.
"""
def __init__(
self,
*, # begin keyword-only arguments
n_units,
bos="<s>",
pad="<pad>",
eos="</s>",
unk="<unk>",
extra_special_symbols=None,
clip=False,
):
self.n_units = n_units
self.bos_word, self.unk_word, self.pad_word, self.eos_word = bos, unk, pad, eos
self.clip = clip
self.symbols = []
self.count = []
self.indices = {}
for i in range(n_units):
self.add_symbol(str(i))
self.bos_index = self.add_symbol(bos)
self.pad_index = self.add_symbol(pad)
self.eos_index = self.add_symbol(eos)
self.unk_index = self.add_symbol(unk)
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(s)
self.nspecial = len(self.symbols)
def encode_line(self, line, append_eos=True, prepend_bos=False) -> torch.IntTensor:
words = [int(x) for x in line.split()]
if self.clip:
words = [min(self.n_units - 1, word) for word in words]
if prepend_bos:
words = [self.bos_index] + words
if append_eos:
words.append(self.eos_index)
ids = torch.IntTensor(words)
return ids
@dataclass
class SpeechUnitModelingConfig(FairseqDataclass):
data: str = field(default=MISSING, metadata={"help": "Path to data config.json"})
max_token_duration: int = field(
default=20, metadata={"help": "all token durations are capped to this value"}
)
tokens_per_sample: int = field(
default=1024, metadata={"help": "tokens in a sample"}
)
max_target_positions: int = field(
default=1024, metadata={"help": "max target positions"}
)
# duration modeling
ignore_duration_input: bool = field(
default=False, metadata={"help": "whether token durations should be zeroed out"}
)
discrete_duration: bool = field(
default=False, metadata={"help": "treat duration as discrete variable"}
)
# F0 modeling
ignore_f0_input: bool = field(
default=False, metadata={"help": "whether F0 should be zeroed out"}
)
discrete_f0: bool = field(
default=False, metadata={"help": "load quantized f0. get bin from config"}
)
log_f0: bool = field(
default=False, metadata={"help": "whether f0 should be modeled in log space"}
)
normalize_f0_mean: bool = field(
default=False, metadata={"help": "whether normalize f0 by speaker mean"}
)
normalize_f0_std: bool = field(
default=False, metadata={"help": "whether normalize f0 by speaker stddev"}
)
interpolate_f0: bool = field(
default=False,
metadata={"help": "whether interpolate f0 for non-voiced segments"},
)
# input/output streams
stream_shifts: str = field(
default="0,0",
metadata={
"help": (
"comma-separated integer list denoting right-shift for "
"duration and pitch streams"
)
},
)
@register_task("speech_unit_modeling", dataclass=SpeechUnitModelingConfig)
class SpeechUnitLanguageModelingTask(FairseqTask):
def __init__(self, cfg: SpeechUnitModelingConfig) -> None:
super().__init__(cfg)
assert not self.cfg.normalize_f0_std or self.cfg.normalize_f0_mean
self.data_config = ExpressiveCodeDataConfig(cfg.data)
self._source_dictionary = self._target_dictionary = UnitDictionary(
n_units=self.data_config.n_units
)
self._source_duration_dictionary = self._target_duration_dictionary = (
UnitDictionary(n_units=self.cfg.max_token_duration + 1, clip=True)
if self.cfg.discrete_duration
else None
)
self._source_f0_dictionary = self._target_f0_dictionary = (
UnitDictionary(n_units=self.data_config.f0_vq_n_units)
if self.cfg.discrete_f0
else None
)
self._channel_names = ["token", "duration", "f0"]
self._channel_sizes = [
len(self.target_dictionary),
len(self.target_duration_dictionary) if self.cfg.discrete_duration else 1,
len(self.target_f0_dictionary) if self.cfg.discrete_f0 else 1,
]
@property
def source_dictionary(self) -> Optional[Dictionary]:
return self._source_dictionary
@property
def source_duration_dictionary(self) -> Optional[Dictionary]:
return self._source_duration_dictionary
@property
def source_f0_dictionary(self) -> Optional[Dictionary]:
return self._source_f0_dictionary
@property
def channel_names(self) -> List[str]:
return self._channel_names
@property
def channel_sizes(self) -> List[int]:
return self._channel_sizes
@property
def dictionary(self) -> Optional[Dictionary]:
return self._source_dictionary
@property
def target_dictionary(self) -> Optional[Dictionary]:
return self._target_dictionary
@property
def target_duration_dictionary(self) -> Optional[Dictionary]:
return self._target_duration_dictionary
@property
def target_f0_dictionary(self) -> Optional[Dictionary]:
return self._target_f0_dictionary
@property
def dictionaries(self) -> List[Dictionary]:
return [self._dictionaries[l] for l in self.cfg.labels]
@classmethod
def setup_task(
cls, cfg: SpeechUnitModelingConfig, **kwargs
) -> "SpeechUnitLanguageModelingTask":
return cls(cfg)
def load_dataset(self, split: str, **kwargs) -> None:
self.datasets[split] = CodeDataset(
manifest=self.data_config.manifests[split],
dictionary=self.source_dictionary,
dur_dictionary=self.source_duration_dictionary,
f0_dictionary=self.source_f0_dictionary,
config=self.data_config,
discrete_dur=self.cfg.discrete_duration,
discrete_f0=self.cfg.discrete_f0,
log_f0=self.cfg.log_f0,
normalize_f0_mean=self.cfg.normalize_f0_mean,
normalize_f0_std=self.cfg.normalize_f0_std,
interpolate_f0=self.cfg.interpolate_f0,
shifts=self.cfg.stream_shifts,
)
def max_positions(self) -> Tuple[int, int]:
return (sys.maxsize, sys.maxsize)
def build_criterion(self, cfg: DictConfig):
import fairseq.criterions
return fairseq.criterions.build_criterion(cfg, self)
|
|
"""
Utilities for handling RAR and ZIP archives
Provides wrapper archive and exception classes to simplify
archive extraction
"""
import os
import shutil
import zipfile
from loguru import logger
try:
import rarfile
except ImportError:
rarfile = None
logger = logger.bind(name='archive')
class ArchiveError(Exception):
"""Base exception for archive"""
pass
class NeedRarFile(ArchiveError):
"""Exception to be raised when rarfile module is missing"""
pass
class BadArchive(ArchiveError):
"""Wrapper exception for BadZipFile and BadRarFile"""
pass
class NeedFirstVolume(ArchiveError):
"""Wrapper exception for rarfile.NeedFirstVolume"""
pass
class PathError(ArchiveError):
"""Exception to be raised when an archive file doesn't exist"""
pass
class FSError(ArchiveError):
"""Exception to be raised on OS/IO exceptions"""
pass
class FileAlreadyExists(ArchiveError):
"""Exception to be raised when destination file already exists"""
pass
def rarfile_set_tool_path(config):
"""
Manually set the path of unrar executable if it can't be resolved from the
PATH environment variable
"""
unrar_tool = config['unrar_tool']
if unrar_tool:
if not rarfile:
logger.error('rar_tool specified with no rarfile module installed.')
else:
rarfile.UNRAR_TOOL = unrar_tool
logger.debug('Set RarFile.unrar_tool to: {}', unrar_tool)
def rarfile_set_path_sep(separator):
"""
Set the path separator on rarfile module
"""
if rarfile:
rarfile.PATH_SEP = separator
def makepath(path):
"""Make directories as needed"""
if not os.path.exists(path):
logger.debug('Creating path: {}', path)
os.makedirs(path)
class Archive:
"""
Base archive class. Assumes an interface similar to
zipfile.ZipFile or rarfile.RarFile
"""
def __init__(self, archive_object, path):
self.path = path
self.archive = archive_object(self.path)
def close(self):
"""Release open resources."""
self.archive.close()
def delete(self):
"""Delete the volumes that make up this archive"""
volumes = self.volumes()
self.close()
try:
for volume in volumes:
os.remove(volume)
logger.verbose('Deleted archive: {}', volume)
except (IOError, os.error) as error:
raise FSError(error)
def volumes(self):
"""Returns the list of volumes that comprise this archive"""
return [self.path]
def infolist(self):
"""Returns a list of info objects describing the contents of this archive"""
infolist = []
for info in self.archive.infolist():
try:
archive_info = ArchiveInfo(info)
infolist.append(archive_info)
except ValueError as e:
logger.debug(e)
return infolist
def open(self, member):
"""Returns file-like object from where the data of a member file can be read."""
return self.archive.open(member)
def extract_file(self, member, destination):
"""Extract a member file to the specified destination"""
try:
with self.open(member) as source:
with open(destination, 'wb') as target:
shutil.copyfileobj(source, target)
except (IOError, os.error) as error:
raise FSError(error)
logger.verbose('Extracted: {}', member)
class RarArchive(Archive):
"""
Wrapper class for rarfile.RarFile
"""
def __init__(self, path):
if not rarfile:
raise NeedRarFile('Python module rarfile needed to handle RAR archives')
try:
super().__init__(rarfile.RarFile, path)
except rarfile.BadRarFile as error:
raise BadArchive(error)
except rarfile.NeedFirstVolume as error:
raise NeedFirstVolume(error)
except rarfile.Error as error:
raise ArchiveError(error)
def volumes(self):
"""Returns the list of volumes that comprise this archive"""
return self.archive.volumelist()
def open(self, member):
"""Returns file-like object from where the data of a member file can be read."""
try:
return super().open(member)
except rarfile.Error as error:
raise ArchiveError(error)
class ZipArchive(Archive):
"""
Wrapper class for zipfile.ZipFile
"""
def __init__(self, path):
try:
super().__init__(zipfile.ZipFile, path)
except zipfile.BadZipfile as error:
raise BadArchive(error)
def open(self, member):
"""Returns file-like object from where the data of a member file can be read."""
try:
return super().open(member)
except zipfile.BadZipfile as error:
raise ArchiveError(error)
class ArchiveInfo:
"""Wrapper class for archive info objects"""
def __init__(self, info):
self.info = info
self.path = info.filename
self.filename = os.path.basename(self.path)
if self._is_dir():
raise ValueError('Appears to be a directory: %s' % self.path)
def _is_dir(self):
"""Indicates if info object looks to be a directory"""
if hasattr(self.info, 'isdir'):
return self.info.isdir()
else:
return not self.filename
def extract(self, archive, destination):
"""Extract ArchiveInfo object to the specified destination"""
dest_dir = os.path.dirname(destination)
if os.path.exists(destination):
raise FileAlreadyExists('File already exists: %s' % destination)
logger.debug('Creating path: {}', dest_dir)
makepath(dest_dir)
try:
archive.extract_file(self.info, destination)
except Exception as error:
if os.path.exists(destination):
logger.debug('Cleaning up partially extracted file: {}', destination)
os.remove(destination)
raise error
def open_archive(archive_path):
"""
Returns the appropriate archive object
"""
archive = None
if not os.path.exists(archive_path):
raise PathError('Path doesn\'t exist')
if zipfile.is_zipfile(archive_path):
archive = ZipArchive(archive_path)
logger.debug('Successfully opened ZIP: {}', archive_path)
elif rarfile and rarfile.is_rarfile(archive_path):
archive = RarArchive(archive_path)
logger.debug('Successfully opened RAR: {}', archive_path)
else:
if not rarfile:
logger.warning('Rarfile module not installed; unable to handle RAR archives.')
return archive
def is_archive(path):
"""
Attempts to open an entry as an archive; returns True on success, False on failure.
"""
archive = None
try:
archive = open_archive(path)
if archive:
archive.close()
return True
except (IOError, ArchiveError) as error:
logger.debug('Failed to open file as archive: {} ({})', path, error)
return False
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys
from winsys._compat import unittest
import uuid
import winerror
import win32api
import win32con
import win32security
import pywintypes
from winsys.tests import utils as testutils
from winsys import registry, utils
GUID = str(uuid.uuid1())
TEST_KEY = r"HKEY_CURRENT_USER\Software\winsys"
TEST_KEY1 = r"HKEY_CURRENT_USER\Software\winsys1"
TEST_KEY2 = r"HKEY_CURRENT_USER\Software\winsys1\winsys2"
#
# Utility functions
#
def remove_key(root, key):
hkey = win32api.RegOpenKeyEx(root, key, 0, win32con.KEY_ALL_ACCESS)
for name, reserved, klass, last_written in win32api.RegEnumKeyEx(hkey):
remove_key(hkey, name)
win32api.RegDeleteKey(root, key)
def remove_access(path=r"software\winsys"):
hKey = win32api.RegOpenKeyEx(
win32con.HKEY_CURRENT_USER, path, 0,
win32con.READ_CONTROL|win32con.WRITE_DAC
)
dacl = win32security.ACL()
win32security.SetSecurityInfo(
hKey, win32security.SE_REGISTRY_KEY,
win32security.DACL_SECURITY_INFORMATION | win32security.PROTECTED_DACL_SECURITY_INFORMATION,
None, None, dacl, None
)
def restore_access(path=r"software\winsys"):
hKey = win32api.RegOpenKeyEx(
win32con.HKEY_CURRENT_USER, path,
0,
win32con.READ_CONTROL|win32con.WRITE_DAC
)
win32security.SetSecurityInfo(
hKey, win32security.SE_REGISTRY_KEY,
win32security.DACL_SECURITY_INFORMATION | win32security.UNPROTECTED_DACL_SECURITY_INFORMATION,
None, None, None, None
)
def keys_are_equal(key0, key1):
return \
list((utils.relative_to(key.moniker, key0), list(values)) for key, subkeys, values in registry.walk(key0)) == \
list((utils.relative_to(key.moniker, key1), list(values)) for key, subkeys, values in registry.walk(key1))
def key0_subset_of_key1(key0, key1):
s0 = set((utils.relative_to(key.moniker, key0), frozenset(values)) for key, subkeys, values in registry.walk(key0))
s1 = set((utils.relative_to(key.moniker, key1), frozenset(values)) for key, subkeys, values in registry.walk(key1))
return s0 < s1
@unittest.skipUnless(testutils.i_am_admin(), "These tests must be run as Administrator")
class TestRegistry(unittest.TestCase):
#
# Fixtures
#
def setUp(self):
hwinsys = win32api.RegCreateKey(win32con.HKEY_CURRENT_USER, r"Software\winsys")
hKey = win32api.RegOpenKeyEx(win32con.HKEY_CURRENT_USER, r"Software\winsys", 0, win32con.KEY_WRITE)
win32api.RegSetValueEx(hKey, "winsys1", None, win32con.REG_SZ, GUID)
win32api.RegSetValueEx(hKey, "winsys1", "value", win32con.REG_SZ, GUID)
win32api.RegSetValueEx(hKey, "winsys2", None, win32con.REG_SZ, GUID)
hSubkey = win32api.RegCreateKey(hKey, "winsys2")
win32api.RegSetValueEx(hSubkey, "winsys2", None, win32con.REG_SZ, GUID)
hKey = win32api.RegOpenKeyEx(win32con.HKEY_CURRENT_USER, r"Software\winsys", 0, win32con.KEY_WRITE)
hSubkey = win32api.RegCreateKey(hKey, "win:sys3")
win32api.RegSetValueEx(hSubkey, "winsys3", None, win32con.REG_SZ, GUID)
self.setup_set_value()
def tearDown(self):
hKey = win32api.RegOpenKeyEx(win32con.HKEY_CURRENT_USER, r"Software\winsys", 0, win32con.READ_CONTROL|win32con.WRITE_DAC)
dacl = win32security.ACL()
sid, _, _ = win32security.LookupAccountName(None, win32api.GetUserName())
dacl.AddAccessAllowedAce(win32security.ACL_REVISION_DS, win32con.KEY_ALL_ACCESS, sid)
win32security.SetSecurityInfo(
hKey, win32security.SE_REGISTRY_KEY,
win32security.DACL_SECURITY_INFORMATION | win32security.UNPROTECTED_DACL_SECURITY_INFORMATION,
None, None, dacl, None
)
remove_key(win32con.HKEY_CURRENT_USER, r"Software\winsys")
#
# Fixtures
#
#~ def setup_key_with_colon():
#~ hKey = win32api.RegOpenKeyEx(win32con.HKEY_CURRENT_USER, r"Software\winsys", 0, win32con.KEY_WRITE)
#~ hSubkey = win32api.RegCreateKey(hKey, "win:sys3")
#~ win32api.RegSetValueEx(hSubkey, "winsys3", None, win32con.REG_SZ, GUID)
#~ def teardown_key_with_colon():
#~ hKey = win32api.RegOpenKeyEx(win32con.HKEY_CURRENT_USER, r"Software\winsys", 0, win32con.KEY_WRITE)
#~ win32api.RegDeleteKey(hKey, "win:sys3")
#
# TESTS
#
#
# test disabled until I can figure out a way to make it fail!
#
#~ def test_moniker_ill_formed():
#~ assert_raises(registry.x_moniker_ill_formed, registry._parse_moniker, r"IN\VA:LID\MONI\KER")
def test_moniker_computer_only(self):
with self.assertRaises(registry.x_moniker_no_root):
registry._parse_moniker(r"\\computer")
def test_moniker_invalid_root(self):
with self.assertRaises(registry.x_moniker_no_root):
registry._parse_moniker(r"<nonsense>")
def test_moniker_slash_and_root(self):
self.assertEqual(registry._parse_moniker(r"\HKLM"),(None, win32con.HKEY_LOCAL_MACHINE, "", None))
def test_moniker_root_only(self):
self.assertEqual(registry._parse_moniker("HKLM"),(None, win32con.HKEY_LOCAL_MACHINE, "", None))
def test_moniker_computer_and_root(self):
self.assertEqual(registry._parse_moniker(r"\\COMPUTER\HKLM"),("COMPUTER", win32con.HKEY_LOCAL_MACHINE, "", None))
def test_moniker_root_and_body(self):
self.assertEqual(registry._parse_moniker(r"HKLM\Software\Microsoft"),(None, win32con.HKEY_LOCAL_MACHINE, r"Software\Microsoft", None))
def test_moniker_computer_root_and_body(self):
self.assertEqual(registry._parse_moniker(r"\\COMPUTER\HKLM\Software\Microsoft"),("COMPUTER", win32con.HKEY_LOCAL_MACHINE, r"Software\Microsoft", None))
def test_moniker_body_only(self):
with self.assertRaises(registry.x_moniker_no_root):
registry._parse_moniker(r"Software\Microsoft")
def test_moniker_default_value(self):
self.assertEqual(registry._parse_moniker(r"HKLM\Software\Microsoft:"),(None, win32con.HKEY_LOCAL_MACHINE, r"Software\Microsoft", ""))
def test_moniker_value(self):
self.assertEqual(registry._parse_moniker(r"HKLM\Software\Microsoft:value"),(None, win32con.HKEY_LOCAL_MACHINE, r"Software\Microsoft", "value"))
def test_moniker_create(self):
parts = "COMPUTER", win32con.HKEY_LOCAL_MACHINE, "PATH", "VALUE"
self.assertEqual(registry._parse_moniker(registry.create_moniker(*parts)), parts)
def test_moniker_create_named_root(self):
parts = "COMPUTER", "HKLM", "PATH", "VALUE"
result = "COMPUTER", win32con.HKEY_LOCAL_MACHINE, "PATH", "VALUE"
self.assertEqual(registry._parse_moniker(registry.create_moniker(*parts)), result)
def test_moniker_create(self):
parts = "COMPUTER", win32con.HKEY_LOCAL_MACHINE, "PATH", None
self.assertEqual(registry._parse_moniker(registry.create_moniker(*parts)), parts)
def test_registry_None(self):
self.assertIs(registry.registry(None), None)
def test_registry_Key(self):
key = registry.registry("HKLM")
self.assertIs(registry.registry(key), key)
def test_registry_key_no_value(self):
self.assertEqual(registry.registry(TEST_KEY + r"\win:sys3", accept_value=False).winsys3, GUID)
def test_registry_value(self):
self.assertEqual(registry.registry(TEST_KEY + r":winsys1"), GUID)
def test_registry_string(self):
self.assertEqual(registry.registry(TEST_KEY).winsys1, GUID)
def test_registry_other(self):
with self.assertRaises(registry.x_registry):
hKey = win32api.RegOpenKey(win32con.HKEY_LOCAL_MACHINE, "Software")
registry.registry(hKey)
def test_values(self):
values = registry.values(TEST_KEY)
self.assertEqual(next(values),('winsys1', GUID))
self.assertEqual(next(values),('winsys2', GUID))
def test_values_access_denied(self):
with self.assertRaises(registry.exc.x_access_denied):
key = registry.registry(TEST_KEY, win32con.KEY_ENUMERATE_SUB_KEYS)
next(registry.values(key))
def test_values_ignore_access_denied(self):
key = registry.registry(TEST_KEY, win32con.KEY_ENUMERATE_SUB_KEYS)
values = registry.values(key, ignore_access_errors=True)
self.assertEqual(list(values), [])
def test_keys(self):
keys = registry.keys(TEST_KEY)
self.assertEqual(next(keys), registry.registry(TEST_KEY) + r"win:sys3")
def test_keys_access_denied(self):
with self.assertRaises(registry.exc.x_access_denied):
key = registry.registry(TEST_KEY, win32con.KEY_NOTIFY)
keys = registry.keys(key, ignore_access_errors=False)
next(keys)
def test_keys_ignore_access_denied(self):
key = registry.registry(TEST_KEY, win32con.KEY_NOTIFY)
keys = registry.keys(key, ignore_access_errors=True)
self.assertEqual(list(keys), [])
def test_copy_does_not_exist(self):
key0 = TEST_KEY
key1 = TEST_KEY1
registry.copy(key0, key1)
try:
self.assertTrue(keys_are_equal(key0, key1))
finally:
registry.delete(key1)
def test_copy_exists_empty(self):
key0 = registry.registry(TEST_KEY)
key1 = registry.registry(TEST_KEY1)
self.assertFalse(key1)
key1.create()
self.assertTrue(key1)
registry.copy(key0, key1)
try:
self.assertTrue(keys_are_equal(key0, key1))
finally:
key1.delete()
def test_copy_exists_not_empty_keys(self):
key0 = registry.registry(TEST_KEY)
key1 = registry.registry(TEST_KEY1)
self.assertFalse(key1)
key1.create()
self.assertTrue(key1)
try:
key1.create("winsys4")
registry.copy(key0, key1)
self.assertTrue(key0_subset_of_key1(key0, key1))
finally:
key1.delete()
def test_copy_exists_not_empty_values(self):
key0 = registry.registry(TEST_KEY)
key1 = registry.registry(TEST_KEY1, access="F")
self.assertFalse(key1)
key1.create()
self.assertTrue(key1)
try:
key1.winsys4 = GUID
registry.copy(key0, key1)
self.assertEqual(set(set(key1.flat()) - set(key0.flat())),
set([("winsys4", GUID), key1, key1 + "win:sys3", key1 + "winsys2"])
)
finally:
key1.delete()
def test_create_does_not_exist(self):
key1 = registry.registry(TEST_KEY1)
self.assertFalse(key1)
registry.create(key1)
try:
self.assertTrue(key1)
finally:
key1.delete()
def test_create_does_not_exist_deep(self):
key1 = registry.registry(TEST_KEY1)
key2 = registry.registry(TEST_KEY2)
self.assertFalse(key1)
self.assertFalse(key2)
registry.create(key2)
try:
self.assertTrue(key1)
self.assertTrue(key2)
finally:
key1.delete()
def test_create_does_exist(self):
key = registry.registry(TEST_KEY)
self.assertTrue(key)
registry.create(key)
self.assertTrue(key)
def test_walk(self):
walker = registry.walk(TEST_KEY)
key, subkeys, values = next(walker)
self.assertEqual(key, registry.registry(TEST_KEY))
self.assertEqual(list(values), [("winsys1", GUID),("winsys2", GUID)])
key, subkeys, values = next(walker)
self.assertEqual(key, registry.registry(TEST_KEY) + "win:sys3")
key, subkeys, values = next(walker)
self.assertEqual(key, registry.registry(TEST_KEY) + "winsys2")
self.assertEqual(list(values), [("winsys2", GUID)])
def test_walk_access_denied(self):
with self.assertRaises(registry.exc.x_access_denied):
key = registry.registry(TEST_KEY, access=registry.REGISTRY_ACCESS.KEY_NOTIFY)
walker = registry.walk(key)
key, keys, values = next(walker)
list(keys)
def test_walk_ignore_access_denied(self):
key = registry.registry(TEST_KEY, access=registry.REGISTRY_ACCESS.KEY_NOTIFY)
walker = registry.walk(key, ignore_access_errors=True)
key, keys, values = next(walker)
list(keys) == [key + "winsys2"]
def test_flat(self):
key = registry.registry(TEST_KEY)
self.assertEqual(
list(registry.flat(key)),
[
key,
("winsys1", GUID),
("winsys2", GUID),
key + "win:sys3",
("winsys3", GUID),
key + "winsys2",
("winsys2", GUID)
]
)
def test_flat_access_denied(self):
with self.assertRaises(registry.exc.x_access_denied):
key = registry.registry(TEST_KEY, access=registry.REGISTRY_ACCESS.KEY_NOTIFY)
list(registry.flat(key))
def test_flat_ignore_access_denied(self):
remove_access(r"software\winsys\winsys2")
try:
key = registry.registry(TEST_KEY)
self.assertEqual(
list(registry.flat(key, ignore_access_errors=True)),
[
key,
("winsys1", GUID),
("winsys2", GUID),
key + "win:sys3",
("winsys3", GUID),
])
finally:
restore_access(r"software\winsys\winsys2")
def test_parent(self):
self.assertEqual(registry.parent(TEST_KEY + r"\winsys2"), registry.registry(TEST_KEY))
def test_identical_functions(self):
functions = "values keys delete create walk flat copy parent".split()
for function in functions:
self.assertIs(getattr(registry, function).__code__, getattr(registry.Registry, function).__code__)
def test_Registry_init(self):
key = registry.Registry(TEST_KEY, access=win32con.KEY_ALL_ACCESS)
self.assertEqual(key.moniker, TEST_KEY)
self.assertEqual(key.name, "winsys")
self.assertEqual(key.access, win32con.KEY_ALL_ACCESS)
self.assertEqual(key.id, registry._parse_moniker(TEST_KEY.lower()))
def test_Registry_init_access(self):
for k, v in registry.Registry.ACCESS.items():
self.assertEqual(registry.registry(TEST_KEY, k).access, v)
def test_Registry_access(self):
access = registry.Registry._access
self.assertIs(access(None), None)
self.assertEqual(access(1), 1)
for k, v in registry.Registry.ACCESS.items():
self.assertEqual(registry.registry(TEST_KEY, k).access, v)
def test_Registry_eq(self):
self.assertEqual(registry.registry(TEST_KEY.upper(), access="R"), registry.registry(TEST_KEY.lower(), access="R"))
def test_Registry_neq(self):
self.assertNotEqual(
registry.registry(TEST_KEY.upper(), access="R"),
registry.registry(TEST_KEY.lower(), access="W")
)
def test_Registry_add(self):
self.assertEqual(registry.registry(TEST_KEY) + "test", registry.registry(TEST_KEY + registry.sep + "test"))
def test_Registry_pyobject(self):
self.assertIsInstance(registry.registry(TEST_KEY).pyobject(), pywintypes.HANDLEType)
def test_Registry_pyobject_not_exists(self):
with self.assertRaises(registry.exc.x_not_found):
self.assertFalse(registry.registry(TEST_KEY + "xxx"))
registry.registry(TEST_KEY + "xxx").pyobject()
def test_Registry_as_string(self):
key = registry.registry(TEST_KEY)
self.assertEqual(key.as_string(), key.moniker)
def test_Registry_security(self):
security_information = win32security.OWNER_SECURITY_INFORMATION | win32security.DACL_SECURITY_INFORMATION
key = registry.registry(TEST_KEY)
security = key.security(security_information)
sd = win32security.GetSecurityInfo(
win32api.RegOpenKey(win32con.HKEY_CURRENT_USER, r"software\winsys"),
win32security.SE_REGISTRY_KEY,
security_information
)
self.assertEqual(
security.as_string(),
win32security.ConvertSecurityDescriptorToStringSecurityDescriptor(
sd,
win32security.SDDL_REVISION_1,
security_information
)
)
def test_Registry_nonzero_exists(self):
win32api.RegCreateKey(win32con.HKEY_CURRENT_USER, r"Software\winsys1")
try:
self.assertTrue(registry.registry(TEST_KEY1))
finally:
remove_key(win32con.HKEY_CURRENT_USER, r"Software\winsys1")
def test_Registry_nonzero_not_exists(self):
try:
win32api.RegOpenKey(win32con.HKEY_CURRENT_USER, r"Software\winsys1")
except win32api.error as error:
errno, errctx, errmsg = error.args
if errno != winerror.ERROR_FILE_NOT_FOUND:
raise
else:
raise RuntimeError("Key exists but should not")
self.assertFalse(registry.registry(TEST_KEY1))
def test_Registry_dumped(self):
#
# Just test it doesn't fall over
#
dump = registry.registry("HKLM").dumped()
def test_Registry_get_value(self):
self.assertEqual(
registry.registry(TEST_KEY).get_value("winsys1"),
win32api.RegQueryValueEx(win32api.RegOpenKey(win32con.HKEY_CURRENT_USER, r"software\winsys"), "winsys1")[0]
)
def test_Registry_get_key(self):
self.assertEqual(
registry.registry(TEST_KEY).get_key("winsys1"),
registry.registry(TEST_KEY + r"\winsys1")
)
def test_Registry_getattr_value(self):
value, type = win32api.RegQueryValueEx(win32api.RegOpenKey(win32con.HKEY_CURRENT_USER, r"software\winsys"), "winsys1")
self.assertEqual(registry.registry(TEST_KEY).winsys1, value)
def test_Registry_getattr_value_shadows_key(self):
value, type = win32api.RegQueryValueEx(win32api.RegOpenKey(win32con.HKEY_CURRENT_USER, r"software\winsys"), "winsys2")
self.assertEqual(registry.registry(TEST_KEY).winsys2, value)
def test_Registry_getattr_key(self):
win32api.RegCreateKey(win32con.HKEY_CURRENT_USER, r"software\winsys\winsys3")
try:
self.assertEqual(registry.registry(TEST_KEY).winsys3, registry.registry(TEST_KEY).get_key("winsys3"))
finally:
win32api.RegDeleteKey(win32con.HKEY_CURRENT_USER, r"Software\winsys\winsys3")
def setup_set_value(self):
try:
win32api.RegDeleteValue(
win32api.RegOpenKeyEx(
win32con.HKEY_CURRENT_USER,
r"Software\winsys",
0,
win32con.KEY_ALL_ACCESS
),
"winsys4"
)
except win32api.error as error:
errno, errctx, errmsg = error.args
if errno == 2:
pass
else:
raise
#~ @with_setup(setup_set_value)
def test_Registry_set_value_type(self):
registry.registry(TEST_KEY, access="F").set_value("winsys4", b"abc", win32con.REG_BINARY)
self.assertEqual(
win32api.RegQueryValueEx(win32api.RegOpenKey(win32con.HKEY_CURRENT_USER, r"software\winsys"), "winsys4"),
(b"abc", win32con.REG_BINARY)
)
#~ @with_setup(setup_set_value)
def test_Registry_set_value_int(self):
registry.registry(TEST_KEY, access="F").set_value("winsys4", 1)
self.assertEqual(
win32api.RegQueryValueEx(win32api.RegOpenKey(win32con.HKEY_CURRENT_USER, r"software\winsys"), "winsys4"),
(1, win32con.REG_DWORD)
)
#~ @with_setup(setup_set_value)
def test_Registry_set_value_multi(self):
registry.registry(TEST_KEY, access="F").set_value("winsys4", ['a', 'b', 'c'])
self.assertEqual(
win32api.RegQueryValueEx(win32api.RegOpenKey(win32con.HKEY_CURRENT_USER, r"software\winsys"), "winsys4"),
(['a', 'b', 'c'], win32con.REG_MULTI_SZ)
)
#~ @with_setup(setup_set_value)
def test_Registry_set_value_expand_even_percent(self):
registry.registry(TEST_KEY, access="F").set_value("winsys4", "%TEMP%")
self.assertEqual(
win32api.RegQueryValueEx(win32api.RegOpenKey(win32con.HKEY_CURRENT_USER, r"software\winsys"), "winsys4"),
("%TEMP%", win32con.REG_EXPAND_SZ)
)
#~ @with_setup(setup_set_value)
def test_Registry_set_value_expand_odd_percent(self):
registry.registry(TEST_KEY, access="F").set_value("winsys4", "50%")
self.assertEqual(
win32api.RegQueryValueEx(win32api.RegOpenKey(win32con.HKEY_CURRENT_USER, r"software\winsys"), "winsys4"),
("50%", win32con.REG_SZ)
)
#~ @with_setup(setup_set_value)
def test_Registry_set_value_empty_string(self):
registry.registry(TEST_KEY, "F").set_value("winsys4", "")
self.assertEqual(
win32api.RegQueryValueEx(win32api.RegOpenKey(win32con.HKEY_CURRENT_USER, r"software\winsys"), "winsys4"),
("", win32con.REG_SZ)
)
#~ @with_setup(setup_set_value)
def test_Registry_set_value_non_empty_string(self):
registry.registry(TEST_KEY, access="F").set_value("winsys4", "winsys")
self.assertEqual(
win32api.RegQueryValueEx(win32api.RegOpenKey(win32con.HKEY_CURRENT_USER, r"software\winsys"), "winsys4"),
("winsys", win32con.REG_SZ)
)
#~ @with_setup(setup_set_value)
def test_Registry_set_value_none(self):
registry.registry(TEST_KEY, access="F").set_value("winsys4", None)
self.assertEqual(
win32api.RegQueryValueEx(win32api.RegOpenKey(win32con.HKEY_CURRENT_USER, r"software\winsys"), "winsys4"),
("", win32con.REG_SZ)
)
#~ @with_setup(setup_set_value)
def test_Registry_set_value_default(self):
registry.registry(TEST_KEY, access="F").set_value("", "test")
self.assertEqual(
win32api.RegQueryValueEx(win32api.RegOpenKey(win32con.HKEY_CURRENT_USER, r"software\winsys"), None),
("test", win32con.REG_SZ)
)
def test_Registry_add(self):
key0 = registry.registry(TEST_KEY, access="F")
new_key = key0.create("winsys1")
self.assertEqual(new_key, key0 + "winsys1")
def test_Registry_from_string(self):
key = registry.Registry.from_string(TEST_KEY)
self.assertEqual(key.moniker, TEST_KEY)
self.assertEqual(key.access, registry.Registry._access(registry.Registry.DEFAULT_ACCESS))
self.assertEqual(key.id, registry._parse_moniker(TEST_KEY.lower()))
def test_Registry_from_string_value(self):
self.assertEqual(
registry.Registry.from_string(TEST_KEY + ":winsys1"),
registry.Registry.from_string(TEST_KEY).get_value("winsys1")
)
if __name__ == "__main__":
unittest.main()
if sys.stdout.isatty(): raw_input("Press enter...")
|
|
#!/usr/bin/env python2
import binascii
import json
import os
import ssl
import sys
try:
import libnacl
import requests
__import__('pyasn1') # not using module itself
except ImportError:
sys.stderr.write(
'Please install all dependancies (pip install -r requirements.txt):\n')
raise
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.poolmanager import PoolManager
from pyasn1.type import univ, namedtype
from pyasn1.codec.der import decoder as der_decoder
class SaneTLS(HTTPAdapter):
"""By default, requests doesn't set sane defaults for TLS.
We'll at least make sure that Python is using TLSv1. Your Python stack may
or may not include support for the TLSv1 protocol.
"""
def __init__(self, ca_certs):
"""Construct a new SantTLS instance."""
super(SaneTLS, self).__init__()
def init_poolmanager(self, connections, maxsize, block=False):
"""Initialize a new PoolManager instance to satisfy urllib3."""
self.poolmanager = PoolManager(
num_pools=connections,
maxsize=maxsize,
block=block,
ssl_version=ssl.PROTOCOL_TLSv1,
)
# Our sane requests handler
sane = requests.Session()
# Python's json encoder doesn't understand bytes (which should default to a
# base64 encoded string).
json_bytes = lambda b: binascii.b2a_base64(b).rstrip().decode('utf-8')
class Key(univ.Sequence):
"""ASN.1 sequence for a serialized key."""
componentType = namedtype.NamedTypes(
namedtype.NamedType('id', univ.ObjectIdentifier()),
namedtype.NamedType('publicKey', univ.OctetString()),
namedtype.OptionalNamedType('privateKey', univ.OctetString()),
)
class Client(object):
"""Secrets client."""
OIDPrivateKey = univ.ObjectIdentifier('1.3.6.1.4.1.27266.11.17.2')
OIDPublicKey = univ.ObjectIdentifier('1.3.6.1.4.1.27266.11.17.1')
PEMPrivateKey = "SECRETS PRIVATE KEY"
PEMPublicKey = "SECRETS PUBLIC KEY"
def __init__(self, keyfile, baseurl, verify=None):
"""Initialize a new secrets client.
Supplied are a efault private key and base URL.
"""
self.keyfile = keyfile
self.baseurl = baseurl
self.verify = verify
self.key = Client.loadKeyFile(self.PEMPrivateKey, self.keyfile)
if self.key.getComponentByName('id') != self.OIDPrivateKey:
raise TypeError(
'Key file {0} does not contain a SECRETS private key'.format(
keyfile))
self.publicKey = self.key.getComponentByName('publicKey').asOctets()
self.publicKeyBase64 = binascii.b2a_base64(self.publicKey).rstrip()
self.privateKey = self.key.getComponentByName('privateKey').asOctets()
if self.baseurl.endswith('/'):
self.baseurl = self.baseurl.rstrip('/')
@classmethod
def loadKeyFile(cls, type, keyFile):
"""Load a typed key from disk."""
with open(keyFile, "r") as fd:
keyData = fd.read()
derData = Client.parsePEM(type, keyData)
return der_decoder.decode(derData, asn1Spec=Key())[0]
@classmethod
def parsePEM(cls, type, data):
"""Parse a PEM block and return the base64 decoded substrate."""
header = '-----BEGIN {0}-----'.format(type)
footer = '-----END {0}-----'.format(type)
parsed = []
keep = False
for line in data.splitlines():
line = line.strip()
if keep:
if line == footer:
if not parsed:
raise ValueError(
'Could not decode {0} PEM block'.format(type))
return ''.join(parsed).decode('base64')
else:
parsed.append(line)
elif line == header:
keep = True
raise ValueError('Could not find {0} PEM block'.format(type))
def decrypt(self, s):
"""Decrypt a secrets structure ``s`` with our private key."""
key = None
nonce = binascii.a2b_base64(s['nonce'])
sender = binascii.a2b_base64(s['sender'])
secret = s['keys'].get(self.publicKeyBase64)
if secret is None:
print repr(self.publicKeyBase64), 'not in', s['keys'].keys()
raise ValueError('This node is not in the list of recipients')
box = binascii.a2b_base64(secret)
key = libnacl.crypto_box_open(box, nonce, sender, self.privateKey)
box = binascii.a2b_base64(s['secret'])
return libnacl.crypto_secretbox_open(box, nonce, key)
def encrypt_to(self, message, recipients):
"""Encrypt a secret message to ``recipients`` using our private key."""
nonce = os.urandom(24)
key = os.urandom(32)
secret = dict(
sender=json_bytes(self.publicKey),
nonce=json_bytes(nonce),
secret=json_bytes(libnacl.crypto_secretbox(message, nonce, key)),
keys={},
)
print recipients
for pub in recipients:
box = libnacl.crypto_box(key, nonce, pub, self.privateKey)
secret['keys'][json_bytes(pub)] = json_bytes(box)
return secret
def _get_json(self, url):
result = sane.get(
''.join([self.baseurl, url]),
verify=self.verify,
)
return result.json()
def _put_json(self, url, data):
sane.put(
''.join([self.baseurl, url]),
verify=self.verify,
data=json.dumps(data)
)
def command_cat(self, group, filename):
"""Command line ``cat`` command."""
data = self._get_json('/group/{0}/data/{1}/'.format(group, filename))
print self.decrypt(data)
def command_ls(self, group=None):
"""Command line ``ls`` command."""
if group is None:
for name in self._get_json('/group/'):
print name
else:
url = '/group/{0}/data/'.format(group)
for key in self._get_json(url).get('keys', {}):
print key
def command_put(self, group, name, filename=None):
"""Command line ``put`` command."""
recipients = self._get_json('/group/{0}/'.format(group))
if filename is None:
data = os.stdin.read()
else:
data = open(filename, 'rb').read()
pubs = map(binascii.a2b_base64, recipients.values())
secret = self.encrypt_to(data, pubs)
self._put_json('/group/{0}/data/{1}/'.format(group, name), secret)
def run():
"""Command line handler."""
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
'-c', '--cafile', default='../testdata/secrets.pem',
help='CA certificates file')
parser.add_argument(
'-k', '--keyfile', default='testdata/client.box',
help='private key file')
parser.add_argument(
'-u', '--url', default='https://localhost:6443',
help='server URL')
parser.add_argument('command', nargs=1)
parser.add_argument('args', nargs='*')
args = parser.parse_args()
# Set TLS options
sane.mount('https://', SaneTLS(args.cafile))
client = Client(args.keyfile, args.url, args.cafile)
command = getattr(client, 'command_{0}'.format(args.command[0]))
if command is None:
return parser.usage()
return command(*args.args)
if __name__ == '__main__':
sys.exit(run())
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_get_request(
location: str,
edge_zone: str,
publisher_name: str,
offer: str,
skus: str,
version: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-03-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/edgeZones/{edgeZone}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus/{skus}/versions/{version}')
path_format_arguments = {
"location": _SERIALIZER.url("location", location, 'str'),
"edgeZone": _SERIALIZER.url("edge_zone", edge_zone, 'str'),
"publisherName": _SERIALIZER.url("publisher_name", publisher_name, 'str'),
"offer": _SERIALIZER.url("offer", offer, 'str'),
"skus": _SERIALIZER.url("skus", skus, 'str'),
"version": _SERIALIZER.url("version", version, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_request(
location: str,
edge_zone: str,
publisher_name: str,
offer: str,
skus: str,
subscription_id: str,
*,
expand: Optional[str] = None,
top: Optional[int] = None,
orderby: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-03-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/edgeZones/{edgeZone}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus/{skus}/versions')
path_format_arguments = {
"location": _SERIALIZER.url("location", location, 'str'),
"edgeZone": _SERIALIZER.url("edge_zone", edge_zone, 'str'),
"publisherName": _SERIALIZER.url("publisher_name", publisher_name, 'str'),
"offer": _SERIALIZER.url("offer", offer, 'str'),
"skus": _SERIALIZER.url("skus", skus, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if expand is not None:
query_parameters['$expand'] = _SERIALIZER.query("expand", expand, 'str')
if top is not None:
query_parameters['$top'] = _SERIALIZER.query("top", top, 'int')
if orderby is not None:
query_parameters['$orderby'] = _SERIALIZER.query("orderby", orderby, 'str')
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_offers_request(
location: str,
edge_zone: str,
publisher_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-03-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/edgeZones/{edgeZone}/publishers/{publisherName}/artifacttypes/vmimage/offers')
path_format_arguments = {
"location": _SERIALIZER.url("location", location, 'str'),
"edgeZone": _SERIALIZER.url("edge_zone", edge_zone, 'str'),
"publisherName": _SERIALIZER.url("publisher_name", publisher_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_publishers_request(
location: str,
edge_zone: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-03-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/edgeZones/{edgeZone}/publishers')
path_format_arguments = {
"location": _SERIALIZER.url("location", location, 'str'),
"edgeZone": _SERIALIZER.url("edge_zone", edge_zone, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_skus_request(
location: str,
edge_zone: str,
publisher_name: str,
offer: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-03-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/edgeZones/{edgeZone}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus')
path_format_arguments = {
"location": _SERIALIZER.url("location", location, 'str'),
"edgeZone": _SERIALIZER.url("edge_zone", edge_zone, 'str'),
"publisherName": _SERIALIZER.url("publisher_name", publisher_name, 'str'),
"offer": _SERIALIZER.url("offer", offer, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class VirtualMachineImagesEdgeZoneOperations(object):
"""VirtualMachineImagesEdgeZoneOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2021_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def get(
self,
location: str,
edge_zone: str,
publisher_name: str,
offer: str,
skus: str,
version: str,
**kwargs: Any
) -> "_models.VirtualMachineImage":
"""Gets a virtual machine image in an edge zone.
:param location: The name of a supported Azure region.
:type location: str
:param edge_zone: The name of the edge zone.
:type edge_zone: str
:param publisher_name: A valid image publisher.
:type publisher_name: str
:param offer: A valid image publisher offer.
:type offer: str
:param skus: A valid image SKU.
:type skus: str
:param version: A valid image SKU version.
:type version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualMachineImage, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_03_01.models.VirtualMachineImage
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineImage"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
location=location,
edge_zone=edge_zone,
publisher_name=publisher_name,
offer=offer,
skus=skus,
version=version,
subscription_id=self._config.subscription_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.CloudErrorAutoGenerated, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualMachineImage', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/edgeZones/{edgeZone}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus/{skus}/versions/{version}'} # type: ignore
@distributed_trace
def list(
self,
location: str,
edge_zone: str,
publisher_name: str,
offer: str,
skus: str,
expand: Optional[str] = None,
top: Optional[int] = None,
orderby: Optional[str] = None,
**kwargs: Any
) -> List["_models.VirtualMachineImageResource"]:
"""Gets a list of all virtual machine image versions for the specified location, edge zone,
publisher, offer, and SKU.
:param location: The name of a supported Azure region.
:type location: str
:param edge_zone: The name of the edge zone.
:type edge_zone: str
:param publisher_name: A valid image publisher.
:type publisher_name: str
:param offer: A valid image publisher offer.
:type offer: str
:param skus: A valid image SKU.
:type skus: str
:param expand: The expand expression to apply on the operation.
:type expand: str
:param top: An integer value specifying the number of images to return that matches supplied
values.
:type top: int
:param orderby: Specifies the order of the results returned. Formatted as an OData query.
:type orderby: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of VirtualMachineImageResource, or the result of cls(response)
:rtype: list[~azure.mgmt.compute.v2021_03_01.models.VirtualMachineImageResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.VirtualMachineImageResource"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_request(
location=location,
edge_zone=edge_zone,
publisher_name=publisher_name,
offer=offer,
skus=skus,
subscription_id=self._config.subscription_id,
expand=expand,
top=top,
orderby=orderby,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.CloudErrorAutoGenerated, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('[VirtualMachineImageResource]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/edgeZones/{edgeZone}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus/{skus}/versions'} # type: ignore
@distributed_trace
def list_offers(
self,
location: str,
edge_zone: str,
publisher_name: str,
**kwargs: Any
) -> List["_models.VirtualMachineImageResource"]:
"""Gets a list of virtual machine image offers for the specified location, edge zone and
publisher.
:param location: The name of a supported Azure region.
:type location: str
:param edge_zone: The name of the edge zone.
:type edge_zone: str
:param publisher_name: A valid image publisher.
:type publisher_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of VirtualMachineImageResource, or the result of cls(response)
:rtype: list[~azure.mgmt.compute.v2021_03_01.models.VirtualMachineImageResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.VirtualMachineImageResource"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_offers_request(
location=location,
edge_zone=edge_zone,
publisher_name=publisher_name,
subscription_id=self._config.subscription_id,
template_url=self.list_offers.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.CloudErrorAutoGenerated, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('[VirtualMachineImageResource]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_offers.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/edgeZones/{edgeZone}/publishers/{publisherName}/artifacttypes/vmimage/offers'} # type: ignore
@distributed_trace
def list_publishers(
self,
location: str,
edge_zone: str,
**kwargs: Any
) -> List["_models.VirtualMachineImageResource"]:
"""Gets a list of virtual machine image publishers for the specified Azure location and edge zone.
:param location: The name of a supported Azure region.
:type location: str
:param edge_zone: The name of the edge zone.
:type edge_zone: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of VirtualMachineImageResource, or the result of cls(response)
:rtype: list[~azure.mgmt.compute.v2021_03_01.models.VirtualMachineImageResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.VirtualMachineImageResource"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_publishers_request(
location=location,
edge_zone=edge_zone,
subscription_id=self._config.subscription_id,
template_url=self.list_publishers.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.CloudErrorAutoGenerated, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('[VirtualMachineImageResource]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_publishers.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/edgeZones/{edgeZone}/publishers'} # type: ignore
@distributed_trace
def list_skus(
self,
location: str,
edge_zone: str,
publisher_name: str,
offer: str,
**kwargs: Any
) -> List["_models.VirtualMachineImageResource"]:
"""Gets a list of virtual machine image SKUs for the specified location, edge zone, publisher, and
offer.
:param location: The name of a supported Azure region.
:type location: str
:param edge_zone: The name of the edge zone.
:type edge_zone: str
:param publisher_name: A valid image publisher.
:type publisher_name: str
:param offer: A valid image publisher offer.
:type offer: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of VirtualMachineImageResource, or the result of cls(response)
:rtype: list[~azure.mgmt.compute.v2021_03_01.models.VirtualMachineImageResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.VirtualMachineImageResource"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_skus_request(
location=location,
edge_zone=edge_zone,
publisher_name=publisher_name,
offer=offer,
subscription_id=self._config.subscription_id,
template_url=self.list_skus.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.CloudErrorAutoGenerated, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('[VirtualMachineImageResource]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_skus.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/edgeZones/{edgeZone}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus'} # type: ignore
|
|
#!/usr/bin/env python3
import glooey
import pyglet
import autoprop
# Import everything from glooey into this namespace. We'll overwrite the
# widgets we want to overwrite and everything else will be directly available.
from glooey import *
# Create a resource loader that knows where the assets for this theme are
# stored.
from glooey.themes import ResourceLoader
assets = ResourceLoader('kenney')
assets.add_font('font/kenvector_future.ttf')
assets.add_font('font/kenvector_future_thin.ttf')
glooey.drawing.colors = {
'light blue': glooey.Color.from_hex('#35baf3'),
'blue': glooey.Color.from_hex('#1ea7e1'),
'dark blue': glooey.Color.from_hex('#166e93'),
'light red': glooey.Color.from_hex('#fa8132'),
'red': glooey.Color.from_hex('#e86a17'),
'dark red': glooey.Color.from_hex('#aa4e11'),
'light green': glooey.Color.from_hex('#88e060'),
'green': glooey.Color.from_hex('#73cd4b'),
'dark green': glooey.Color.from_hex('#47832c'),
'light yellow': glooey.Color.from_hex('#ffd948'),
'yellow': glooey.Color.from_hex('#ffcc00'),
'dark yellow': glooey.Color.from_hex('#a88600'),
'white': glooey.Color.from_hex('#ffffff'),
'light grey': glooey.Color.from_hex('#eeeeee'),
'dark grey': glooey.Color.from_hex('#aaaaaa'),
'black': glooey.Color.from_hex('#000000'),
}
@autoprop
class BigLabel(glooey.Label):
custom_color = 'dark grey'
custom_font_name = 'KenVector Future'
custom_font_size = 12
@autoprop
class Label(glooey.Label):
custom_color = 'dark grey'
custom_font_name = 'KenVector Future Thin'
custom_font_size = 10
@autoprop
class Form(glooey.Form):
class Label(glooey.EditableLabel):
custom_padding = 14
custom_top_padding = 12
custom_bottom_padding = 10
custom_color = 'dark grey'
custom_selection_color = 'white'
custom_selection_background_color = 'blue'
custom_font_name = 'KenVector Future Thin'
custom_font_size = 10
class Base(glooey.Background):
custom_center = assets.texture('form/center.png')
custom_top = assets.texture('form/top.png')
custom_left = assets.texture('form/left.png')
custom_right = assets.texture('form/right.png')
custom_bottom = assets.texture('form/bottom.png')
custom_top_left = assets.image('form/top_left.png')
custom_top_right = assets.image('form/top_right.png')
custom_bottom_left = assets.image('form/bottom_left.png')
custom_bottom_right = assets.image('form/bottom_right.png')
@autoprop
class Frame(glooey.Frame):
custom_color = 'grey'
class Box(glooey.Bin):
custom_padding = 18
def __init__(self):
super().__init__()
self.color = self.custom_color
def get_color(self):
return self._color
def set_color(self, new_color):
self._color = new_color
style = f'frames/{self._color}'
self.decoration.set_appearance(
center=assets.texture(f'{style}/center.png'),
top=assets.texture(f'{style}/top.png'),
left=assets.texture(f'{style}/left.png'),
bottom=assets.texture(f'{style}/bottom.png'),
right=assets.texture(f'{style}/right.png'),
top_left=assets.image(f'{style}/top_left.png'),
top_right=assets.image(f'{style}/top_right.png'),
bottom_left=assets.image(f'{style}/bottom_left.png'),
bottom_right=assets.image(f'{style}/bottom_right.png'),
)
@autoprop
class BlueFrame(Frame):
custom_color = 'blue'
@autoprop
class RedFrame(Frame):
custom_color = 'red'
@autoprop
class GreenFrame(Frame):
custom_color = 'green'
@autoprop
class YellowFrame(Frame):
custom_color = 'yellow'
@autoprop
class GreyFrame(Frame):
custom_color = 'grey'
@autoprop
class Menu(glooey.Widget):
custom_color = 'blue'
custom_text = None
custom_alignment = 'center'
class Title(Label):
custom_alignment = 'center'
custom_color = 'white'
custom_top_padding = 12
custom_bottom_padding = 8
class Header(glooey.Frame):
custom_alignment = 'fill horz'
class Body(Frame):
custom_alignment = 'fill'
def __init__(self, title=None):
super().__init__()
self._vbox = glooey.VBox()
self._title = self.Title(title or self.custom_text)
self._header = self.Header()
self._body = self.Body()
self._header.add(self._title)
self._vbox.add(self._header, 0)
self._vbox.add(self._body)
self._attach_child(self._vbox)
self.color = self.custom_color
def add(self, widget):
self._body.add(widget)
def clear(self):
self._body.clear()
def get_color(self):
return self._color
def set_color(self, new_color):
self._color = new_color
header = f'frames/{self._color}'
body = f'frames/grey'
self._header.decoration.set_appearance(
center=assets.texture(f'{header}/center.png'),
top=assets.texture(f'{header}/top.png'),
left=assets.texture(f'{header}/left.png'),
right=assets.texture(f'{header}/right.png'),
top_left=assets.image(f'{header}/top_left.png'),
top_right=assets.image(f'{header}/top_right.png'),
)
self._body.decoration.set_appearance(
center=assets.texture(f'{body}/center.png'),
bottom=assets.texture(f'{body}/bottom.png'),
left=assets.texture(f'{body}/left.png'),
right=assets.texture(f'{body}/right.png'),
bottom_left=assets.image(f'{body}/bottom_left.png'),
bottom_right=assets.image(f'{body}/bottom_right.png'),
)
@autoprop
class BlueMenu(Menu):
custom_color = 'blue'
@autoprop
class RedMenu(Menu):
custom_color = 'red'
@autoprop
class GreenMenu(Menu):
custom_color = 'green'
@autoprop
class YellowMenu(Menu):
custom_color = 'yellow'
class Title(Menu.Title):
custom_color = 'dark yellow'
class HRule(glooey.Background):
custom_center = assets.texture('dividers/horz.png')
custom_htile = True
custom_vtile = False
custom_vert_padding = 8
class VRule(glooey.Background):
custom_center = assets.texture('dividers/vert.png')
custom_htile = False
custom_vtile = True
custom_horz_padding = 18
@autoprop
class Button(glooey.Button):
custom_color = 'blue' # 'red', 'green', 'yellow', 'grey'
custom_gloss = 'high' # 'low', 'matte'
custom_font_color = 'white'
class Foreground(Label):
custom_alignment = 'center'
custom_font_weight = 'bold'
custom_horz_padding = 30
def __init__(self, text=None):
super().__init__(text)
self._color = self.custom_color
self._gloss = self.custom_gloss
self._update_background()
self.foreground.color = self.custom_font_color
def on_rollover(self, widget, new_state, old_state):
if new_state == 'down':
self.foreground.top_padding = 2 * 4
if old_state == 'down':
self.foreground.top_padding = 0
def get_color(self):
return self._color
def set_color(self, new_color):
self._color = new_color
self._update_background()
def get_gloss(self):
return self._gloss
def set_gloss(self, new_gloss):
self._gloss = new_gloss
self._update_background()
def _update_background(self):
gloss = {
'high': 'high_gloss',
'low': 'low_gloss',
'matte': 'matte',
}
style = f'buttons/{self._color}/{gloss[self._gloss]}'
self.set_background(
base_left=assets.image(f'{style}/base_left.png'),
base_center=assets.texture(f'{style}/base_center.png'),
base_right=assets.image(f'{style}/base_right.png'),
down_left=assets.image(f'{style}/down_left.png'),
down_center=assets.texture(f'{style}/down_center.png'),
down_right=assets.image(f'{style}/down_right.png'),
)
@autoprop
class BlueButton(Button):
custom_color = 'blue'
@autoprop
class RedButton(Button):
custom_color = 'red'
@autoprop
class GreenButton(Button):
custom_color = 'green'
@autoprop
class YellowButton(Button):
custom_color = 'yellow'
custom_font_color = 'dark yellow'
@autoprop
class GreyButton(Button):
custom_color = 'grey'
custom_font_color = 'dark grey'
@autoprop
class RoundButton(glooey.Button):
custom_color = 'red'
custom_icon = 'cross'
class Foreground(BigLabel):
custom_color = 'white'
custom_alignment = 'center'
custom_font_size = 16
def __init__(self):
super().__init__()
self.color = self.custom_color
self.icon = self.custom_icon
def get_color(self):
return self._color
def set_color(self, new_color):
self._color = new_color
self.set_background(
base_image=assets.image(f'buttons/{self._color}/round.png'),
)
def get_icon(self):
return self._icon
def set_icon(self, new_icon):
self._icon = new_icon
icon_color = 'grey' if self._color == 'grey' else 'white'
self.image = assets.image(f'icons/{icon_color}/{self._icon}.png')
@autoprop
class BlueRoundButton(RoundButton):
custom_color = 'blue'
@autoprop
class RedRoundButton(RoundButton):
custom_color = 'red'
@autoprop
class GreenRoundButton(RoundButton):
custom_color = 'green'
@autoprop
class YellowRoundButton(RoundButton):
custom_color = 'yellow'
custom_font_color = 'dark yellow'
@autoprop
class GreyRoundButton(RoundButton):
custom_color = 'grey'
custom_font_color = 'dark grey'
@autoprop
class Checkbox(glooey.Checkbox):
custom_color = 'blue'
custom_icon = 'checkmark' # 'cross'
def __init__(self):
super().__init__()
self._color = self.custom_color
self._icon = self.custom_icon
self._update_style()
def get_color(self):
return self._color
def set_color(self, new_color):
self._color = new_color
self._update_style()
def get_icon(self):
return self._icon
def set_icon(self, new_icon):
self._icon = new_icon
self._update_style()
def _update_style(self):
style = f'buttons/{self._color}/checkbox'
self.set_images(
checked_base=assets.image(f'{style}/{self._icon}.png'),
unchecked_base=assets.image(f'{style}/box.png'),
)
@autoprop
class BlueCheckbox(Checkbox):
custom_color = 'blue'
@autoprop
class RedCheckbox(Checkbox):
custom_color = 'red'
@autoprop
class GreenCheckbox(Checkbox):
custom_color = 'green'
@autoprop
class YellowCheckbox(Checkbox):
custom_color = 'yellow'
@autoprop
class GreyCheckbox(Checkbox):
custom_color = 'grey'
@autoprop
class RadioButton(glooey.RadioButton):
custom_color = 'blue'
def __init__(self):
super().__init__()
self.color = self.custom_color
def get_color(self):
return self._color
def set_color(self, new_color):
self._color = new_color
style = f'buttons/{new_color}/radio'
self.set_images(
checked_base=assets.image(f'{style}/tick.png'),
unchecked_base=assets.image(f'{style}/box.png'),
)
@autoprop
class BlueRadioButton(RadioButton):
custom_color = 'blue'
@autoprop
class RedRadioButton(RadioButton):
custom_color = 'red'
@autoprop
class GreenRadioButton(RadioButton):
custom_color = 'green'
@autoprop
class YellowRadioButton(RadioButton):
custom_color = 'yellow'
@autoprop
class GreyRadioButton(RadioButton):
custom_color = 'grey'
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tensor utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util.deprecation import deprecated
__all__ = [
'assert_same_float_dtype',
'assert_scalar',
'assert_scalar_int',
'convert_to_tensor_or_sparse_tensor',
'is_tensor',
'reduce_sum_n',
'remove_squeezable_dimensions',
'with_shape',
'with_same_shape']
# Temporary for backwards compatibility
is_tensor = tensor_util.is_tensor
assert_same_float_dtype = check_ops.assert_same_float_dtype
assert_scalar = check_ops.assert_scalar
convert_to_tensor_or_sparse_tensor = (
sparse_tensor.convert_to_tensor_or_sparse_tensor)
def reduce_sum_n(tensors, name=None):
"""Reduce tensors to a scalar sum.
This reduces each tensor in `tensors` to a scalar via `tf.reduce_sum`, then
adds them via `tf.add_n`.
Args:
tensors: List of tensors, all of the same numeric type.
name: Tensor name, and scope for all other ops.
Returns:
Total loss tensor, or None if no losses have been configured.
Raises:
ValueError: if `losses` is missing or empty.
"""
if not tensors:
raise ValueError('No tensors provided.')
with ops.name_scope(name, 'reduce_sum_n', tensors) as name_scope:
tensors = [
math_ops.reduce_sum(t, name='%s/sum' % t.op.name) for t in tensors]
if len(tensors) == 1:
return tensors[0]
return math_ops.add_n(tensors, name=name_scope)
@deprecated(None,
"Please switch to tf.confusion_matrix.remove_squeezable_dimensions. Note "
"that order of the inputs and ouputs of labels and predictions have also "
"been switched.")
def remove_squeezable_dimensions(predictions, labels, name=None):
"""Squeeze last dim if ranks of `predictions` and `labels` differ by 1.
This will use static shape if available. Otherwise, it will add graph
operations, which could result in a performance hit.
Args:
predictions: Predicted values, a `Tensor` of arbitrary dimensions.
labels: Label values, a `Tensor` whose dimensions match `predictions`.
name: Name of the op.
Returns:
Tuple of `predictions` and `labels`, possibly with last dim squeezed.
"""
with ops.name_scope(name, 'remove_squeezable_dimensions',
[predictions, labels]):
predictions = ops.convert_to_tensor(predictions)
labels = ops.convert_to_tensor(labels)
predictions_shape = predictions.get_shape()
predictions_rank = predictions_shape.ndims
labels_shape = labels.get_shape()
labels_rank = labels_shape.ndims
if (labels_rank is not None) and (predictions_rank is not None):
# Use static rank.
rank_diff = predictions_rank - labels_rank
if rank_diff == -1:
labels = array_ops.squeeze(labels, [-1])
elif rank_diff == 1:
predictions = array_ops.squeeze(predictions, [-1])
return predictions, labels
# Use dynamic rank.
rank_diff = array_ops.rank(predictions) - array_ops.rank(labels)
if (predictions_rank is None) or (
predictions_shape.dims[-1].is_compatible_with(1)):
predictions = control_flow_ops.cond(
math_ops.equal(1, rank_diff),
lambda: array_ops.squeeze(predictions, [-1]),
lambda: predictions)
if (labels_rank is None) or (
labels_shape.dims[-1].is_compatible_with(1)):
labels = control_flow_ops.cond(
math_ops.equal(-1, rank_diff),
lambda: array_ops.squeeze(labels, [-1]),
lambda: labels)
return predictions, labels
def _all_equal(tensor0, tensor1):
with ops.name_scope('all_equal', values=[tensor0, tensor1]) as scope:
return math_ops.reduce_all(
math_ops.equal(tensor0, tensor1, name='equal'), name=scope)
def _is_rank(expected_rank, actual_tensor):
"""Returns whether actual_tensor's rank is expected_rank.
Args:
expected_rank: Integer defining the expected rank, or tensor of same.
actual_tensor: Tensor to test.
Returns:
New tensor.
"""
with ops.name_scope('is_rank', values=[actual_tensor]) as scope:
expected = ops.convert_to_tensor(expected_rank, name='expected')
actual = array_ops.rank(actual_tensor, name='actual')
return math_ops.equal(expected, actual, name=scope)
def _is_shape(expected_shape, actual_tensor, actual_shape=None):
"""Returns whether actual_tensor's shape is expected_shape.
Args:
expected_shape: Integer list defining the expected shape, or tensor of same.
actual_tensor: Tensor to test.
actual_shape: Shape of actual_tensor, if we already have it.
Returns:
New tensor.
"""
with ops.name_scope('is_shape', values=[actual_tensor]) as scope:
is_rank = _is_rank(array_ops.size(expected_shape), actual_tensor)
if actual_shape is None:
actual_shape = array_ops.shape(actual_tensor, name='actual')
shape_equal = _all_equal(
ops.convert_to_tensor(expected_shape, name='expected'),
actual_shape)
return math_ops.logical_and(is_rank, shape_equal, name=scope)
def _assert_shape_op(expected_shape, actual_tensor):
"""Asserts actual_tensor's shape is expected_shape.
Args:
expected_shape: List of integers defining the expected shape, or tensor of
same.
actual_tensor: Tensor to test.
Returns:
New assert tensor.
"""
with ops.name_scope('assert_shape', values=[actual_tensor]) as scope:
actual_shape = array_ops.shape(actual_tensor, name='actual')
is_shape = _is_shape(expected_shape, actual_tensor, actual_shape)
return control_flow_ops.Assert(
is_shape, [
'Wrong shape for %s [expected] [actual].' % actual_tensor.name,
expected_shape,
actual_shape
], name=scope)
def with_same_shape(expected_tensor, tensor):
"""Assert tensors are the same shape, from the same graph.
Args:
expected_tensor: Tensor with expected shape.
tensor: Tensor of actual values.
Returns:
The original tensor argument, possibly with assert ops added.
"""
with ops.name_scope('%s/' % tensor.op.name, values=[expected_tensor, tensor]):
tensor_shape = expected_tensor.get_shape()
expected_shape = (
tensor_shape.as_list() if tensor_shape.is_fully_defined()
else array_ops.shape(expected_tensor, name='expected_shape'))
return with_shape(expected_shape, tensor)
def with_shape(expected_shape, tensor):
"""Asserts tensor has expected shape.
If tensor shape and expected_shape, are fully defined, assert they match.
Otherwise, add assert op that will validate the shape when tensor is
evaluated, and set shape on tensor.
Args:
expected_shape: Expected shape to assert, as a 1D array of ints, or tensor
of same.
tensor: Tensor whose shape we're validating.
Returns:
tensor, perhaps with a dependent assert operation.
Raises:
ValueError: if tensor has an invalid shape.
"""
if isinstance(tensor, sparse_tensor.SparseTensor):
raise ValueError('SparseTensor not supported.')
# Shape type must be 1D int32.
if tensor_util.is_tensor(expected_shape):
if expected_shape.dtype.base_dtype != dtypes.int32:
raise ValueError(
'Invalid dtype %s for shape %s expected of tensor %s.' % (
expected_shape.dtype, expected_shape, tensor.name))
if isinstance(expected_shape, (list, tuple)):
if not expected_shape:
expected_shape = np.asarray([], dtype=np.int32)
else:
np_expected_shape = np.asarray(expected_shape)
expected_shape = (
np.asarray(expected_shape, dtype=np.int32)
if np_expected_shape.dtype == np.int64 else np_expected_shape)
if isinstance(expected_shape, np.ndarray):
if expected_shape.ndim > 1:
raise ValueError(
'Invalid rank %s for shape %s expected of tensor %s.' % (
expected_shape.ndim, expected_shape, tensor.name))
if expected_shape.dtype != np.int32:
raise ValueError(
'Invalid dtype %s for shape %s expected of tensor %s.' % (
expected_shape.dtype, expected_shape, tensor.name))
actual_shape = tensor.get_shape()
if (not actual_shape.is_fully_defined()
or tensor_util.is_tensor(expected_shape)):
with ops.name_scope('%s/' % tensor.op.name, values=[tensor]):
if (not tensor_util.is_tensor(expected_shape)
and (len(expected_shape) < 1)):
# TODO(irving): Remove scalar special case
return array_ops.reshape(tensor, [])
with ops.control_dependencies([_assert_shape_op(expected_shape, tensor)]):
result = array_ops.identity(tensor)
if not tensor_util.is_tensor(expected_shape):
result.set_shape(expected_shape)
return result
if (not tensor_util.is_tensor(expected_shape) and
not actual_shape.is_compatible_with(expected_shape)):
if (len(expected_shape) < 1) and actual_shape.is_compatible_with([1]):
# TODO(irving): Remove scalar special case.
with ops.name_scope('%s/' % tensor.op.name, values=[tensor]):
return array_ops.reshape(tensor, [])
raise ValueError('Invalid shape for tensor %s, expected %s, got %s.' % (
tensor.name, expected_shape, actual_shape))
return tensor
def assert_scalar_int(tensor, name=None):
"""Assert `tensor` is 0-D, of type `tf.int32` or `tf.int64`.
Args:
tensor: `Tensor` to test.
name: Name of the op and of the new `Tensor` if one is created.
Returns:
`tensor`, for chaining.
Raises:
ValueError: if `tensor` is not 0-D, of integer type.
"""
with ops.name_scope(name, 'assert_scalar_int', [tensor]) as name_scope:
tensor = ops.convert_to_tensor(tensor)
data_type = tensor.dtype
if not data_type.base_dtype.is_integer:
raise ValueError('Expected integer type for %s, received type: %s.'
% (tensor.name, data_type))
return check_ops.assert_scalar(tensor, name=name_scope)
|
|
from __future__ import unicode_literals
from six.moves.urllib.request import urlopen
from six.moves.urllib.error import HTTPError
import boto
from boto.exception import S3ResponseError
from boto.s3.key import Key
from boto.s3.connection import OrdinaryCallingFormat
from freezegun import freeze_time
import requests
import sure # noqa
from moto import mock_s3bucket_path
def create_connection(key=None, secret=None):
return boto.connect_s3(key, secret, calling_format=OrdinaryCallingFormat())
class MyModel(object):
def __init__(self, name, value):
self.name = name
self.value = value
def save(self):
conn = create_connection('the_key', 'the_secret')
bucket = conn.get_bucket('mybucket')
k = Key(bucket)
k.key = self.name
k.set_contents_from_string(self.value)
@mock_s3bucket_path
def test_my_model_save():
# Create Bucket so that test can run
conn = create_connection('the_key', 'the_secret')
conn.create_bucket('mybucket')
####################################
model_instance = MyModel('steve', 'is awesome')
model_instance.save()
conn.get_bucket('mybucket').get_key('steve').get_contents_as_string().should.equal(b'is awesome')
@mock_s3bucket_path
def test_missing_key():
conn = create_connection('the_key', 'the_secret')
bucket = conn.create_bucket("foobar")
bucket.get_key("the-key").should.equal(None)
@mock_s3bucket_path
def test_missing_key_urllib2():
conn = create_connection('the_key', 'the_secret')
conn.create_bucket("foobar")
urlopen.when.called_with("http://s3.amazonaws.com/foobar/the-key").should.throw(HTTPError)
@mock_s3bucket_path
def test_empty_key():
conn = create_connection('the_key', 'the_secret')
bucket = conn.create_bucket("foobar")
key = Key(bucket)
key.key = "the-key"
key.set_contents_from_string("")
bucket.get_key("the-key").get_contents_as_string().should.equal(b'')
@mock_s3bucket_path
def test_empty_key_set_on_existing_key():
conn = create_connection('the_key', 'the_secret')
bucket = conn.create_bucket("foobar")
key = Key(bucket)
key.key = "the-key"
key.set_contents_from_string("foobar")
bucket.get_key("the-key").get_contents_as_string().should.equal(b'foobar')
key.set_contents_from_string("")
bucket.get_key("the-key").get_contents_as_string().should.equal(b'')
@mock_s3bucket_path
def test_large_key_save():
conn = create_connection('the_key', 'the_secret')
bucket = conn.create_bucket("foobar")
key = Key(bucket)
key.key = "the-key"
key.set_contents_from_string("foobar" * 100000)
bucket.get_key("the-key").get_contents_as_string().should.equal(b'foobar' * 100000)
@mock_s3bucket_path
def test_copy_key():
conn = create_connection('the_key', 'the_secret')
bucket = conn.create_bucket("foobar")
key = Key(bucket)
key.key = "the-key"
key.set_contents_from_string("some value")
bucket.copy_key('new-key', 'foobar', 'the-key')
bucket.get_key("the-key").get_contents_as_string().should.equal(b"some value")
bucket.get_key("new-key").get_contents_as_string().should.equal(b"some value")
@mock_s3bucket_path
def test_set_metadata():
conn = create_connection('the_key', 'the_secret')
bucket = conn.create_bucket("foobar")
key = Key(bucket)
key.key = 'the-key'
key.set_metadata('md', 'Metadatastring')
key.set_contents_from_string("Testval")
bucket.get_key('the-key').get_metadata('md').should.equal('Metadatastring')
@freeze_time("2012-01-01 12:00:00")
@mock_s3bucket_path
def test_last_modified():
# See https://github.com/boto/boto/issues/466
conn = create_connection()
bucket = conn.create_bucket("foobar")
key = Key(bucket)
key.key = "the-key"
key.set_contents_from_string("some value")
rs = bucket.get_all_keys()
rs[0].last_modified.should.equal('2012-01-01T12:00:00Z')
bucket.get_key("the-key").last_modified.should.equal('Sun, 01 Jan 2012 12:00:00 GMT')
@mock_s3bucket_path
def test_missing_bucket():
conn = create_connection('the_key', 'the_secret')
conn.get_bucket.when.called_with('mybucket').should.throw(S3ResponseError)
@mock_s3bucket_path
def test_bucket_with_dash():
conn = create_connection('the_key', 'the_secret')
conn.get_bucket.when.called_with('mybucket-test').should.throw(S3ResponseError)
@mock_s3bucket_path
def test_bucket_deletion():
conn = create_connection('the_key', 'the_secret')
bucket = conn.create_bucket("foobar")
key = Key(bucket)
key.key = "the-key"
key.set_contents_from_string("some value")
# Try to delete a bucket that still has keys
conn.delete_bucket.when.called_with("foobar").should.throw(S3ResponseError)
bucket.delete_key("the-key")
conn.delete_bucket("foobar")
# Get non-existing bucket
conn.get_bucket.when.called_with("foobar").should.throw(S3ResponseError)
# Delete non-existant bucket
conn.delete_bucket.when.called_with("foobar").should.throw(S3ResponseError)
@mock_s3bucket_path
def test_get_all_buckets():
conn = create_connection('the_key', 'the_secret')
conn.create_bucket("foobar")
conn.create_bucket("foobar2")
buckets = conn.get_all_buckets()
buckets.should.have.length_of(2)
@mock_s3bucket_path
def test_post_to_bucket():
conn = create_connection('the_key', 'the_secret')
bucket = conn.create_bucket("foobar")
requests.post("https://s3.amazonaws.com/foobar", {
'key': 'the-key',
'file': 'nothing'
})
bucket.get_key('the-key').get_contents_as_string().should.equal(b'nothing')
@mock_s3bucket_path
def test_post_with_metadata_to_bucket():
conn = create_connection('the_key', 'the_secret')
bucket = conn.create_bucket("foobar")
requests.post("https://s3.amazonaws.com/foobar", {
'key': 'the-key',
'file': 'nothing',
'x-amz-meta-test': 'metadata'
})
bucket.get_key('the-key').get_metadata('test').should.equal('metadata')
@mock_s3bucket_path
def test_bucket_method_not_implemented():
requests.patch.when.called_with("https://s3.amazonaws.com/foobar").should.throw(NotImplementedError)
@mock_s3bucket_path
def test_key_method_not_implemented():
requests.post.when.called_with("https://s3.amazonaws.com/foobar/foo").should.throw(NotImplementedError)
@mock_s3bucket_path
def test_bucket_name_with_dot():
conn = create_connection()
bucket = conn.create_bucket('firstname.lastname')
k = Key(bucket, 'somekey')
k.set_contents_from_string('somedata')
@mock_s3bucket_path
def test_key_with_special_characters():
conn = create_connection()
bucket = conn.create_bucket('test_bucket_name')
key = Key(bucket, 'test_list_keys_2/*x+?^@~!y')
key.set_contents_from_string('value1')
key_list = bucket.list('test_list_keys_2/', '/')
keys = [x for x in key_list]
keys[0].name.should.equal("test_list_keys_2/*x+?^@~!y")
@mock_s3bucket_path
def test_bucket_key_listing_order():
conn = create_connection()
bucket = conn.create_bucket('test_bucket')
prefix = 'toplevel/'
def store(name):
k = Key(bucket, prefix + name)
k.set_contents_from_string('somedata')
names = ['x/key', 'y.key1', 'y.key2', 'y.key3', 'x/y/key', 'x/y/z/key']
for name in names:
store(name)
delimiter = None
keys = [x.name for x in bucket.list(prefix, delimiter)]
keys.should.equal([
'toplevel/x/key', 'toplevel/x/y/key', 'toplevel/x/y/z/key',
'toplevel/y.key1', 'toplevel/y.key2', 'toplevel/y.key3'
])
delimiter = '/'
keys = [x.name for x in bucket.list(prefix, delimiter)]
keys.should.equal([
'toplevel/y.key1', 'toplevel/y.key2', 'toplevel/y.key3', 'toplevel/x/'
])
# Test delimiter with no prefix
delimiter = '/'
keys = [x.name for x in bucket.list(prefix=None, delimiter=delimiter)]
keys.should.equal(['toplevel'])
delimiter = None
keys = [x.name for x in bucket.list(prefix + 'x', delimiter)]
keys.should.equal(['toplevel/x/key', 'toplevel/x/y/key', 'toplevel/x/y/z/key'])
delimiter = '/'
keys = [x.name for x in bucket.list(prefix + 'x', delimiter)]
keys.should.equal(['toplevel/x/'])
|
|
# This file is part of Indico.
# Copyright (C) 2002 - 2022 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
import posixpath
from flask import current_app, render_template, request
from sqlalchemy.orm import load_only
from werkzeug.utils import cached_property
from indico.modules.admin.views import WPAdmin
from indico.modules.core.settings import social_settings
from indico.modules.events import Event
from indico.modules.events.layout import layout_settings, theme_settings
from indico.modules.events.layout.util import (build_menu_entry_name, get_css_url, get_menu_entry_by_name,
menu_entries_for_event)
from indico.modules.events.management.settings import privacy_settings
from indico.modules.events.models.events import EventType
from indico.modules.events.util import serialize_event_for_json_ld
from indico.util.date_time import format_date
from indico.util.mathjax import MathjaxMixin
from indico.util.string import strip_tags, truncate
from indico.web.flask.util import url_for
from indico.web.views import WPDecorated, WPJinjaMixin
def _get_print_url(event, theme=None, theme_override=False):
view = theme if theme_override else None
if event.type_ == EventType.conference:
return url_for('timetable.timetable', event, print='1', view=view)
elif event.type_ == EventType.meeting:
show_date = request.args.get('showDate')
show_session = request.args.get('showSession')
detail_level = request.args.get('detailLevel')
if show_date == 'all':
show_date = None
if show_session == 'all':
show_session = None
if detail_level in ('all', 'contribution'):
detail_level = None
return url_for('events.display', event, showDate=show_date, showSession=show_session, detailLevel=detail_level,
print='1', view=view)
elif event.type_ == EventType.lecture:
return url_for('events.display', event, print='1', view=view)
def render_event_header(event, conference_layout=False, theme=None, theme_override=False):
print_url = _get_print_url(event, theme, theme_override) if not conference_layout else None
show_nav_bar = event.type_ != EventType.conference or layout_settings.get(event, 'show_nav_bar')
themes = {tid: {'name': data['title'], 'user_visible': data.get('user_visible')}
for tid, data in theme_settings.get_themes_for(event.type_.name).items()}
return render_template('events/header.html',
event=event, print_url=print_url, show_nav_bar=show_nav_bar, themes=themes, theme=theme)
def render_event_footer(event, dark=False):
location = event.venue_name
if event.room_name:
location = f'{event.room_name} ({location})'
description = f'{truncate(event.description, 1000)}\n\n{event.short_external_url}'.strip()
google_calendar_params = {
'action': 'TEMPLATE',
'text': event.title,
'dates': '{}/{}'.format(event.start_dt.strftime('%Y%m%dT%H%M%SZ'),
event.end_dt.strftime('%Y%m%dT%H%M%SZ')),
'details': description,
'location': location,
'trp': False,
'sprop': [event.external_url, 'name:indico']
}
social_settings_data = social_settings.get_all()
show_social = social_settings_data['enabled'] and layout_settings.get(event, 'show_social_badges')
privacy_text = privacy_settings.get(event, 'privacy_policy')
privacy_urls = privacy_settings.get(event, 'privacy_policy_urls')
return render_template('events/footer.html',
event=event,
dark=dark,
social_settings=social_settings_data,
show_social=show_social,
google_calendar_params=google_calendar_params,
privacy_text=privacy_text,
privacy_urls=privacy_urls)
class WPEventAdmin(WPAdmin):
template_prefix = 'events/'
class WPEventBase(WPDecorated):
ALLOW_JSON = False
bundles = ('module_events.display.js', 'module_events.contributions.js', 'module_events.header.js',
'module_events.search.js')
@property
def page_metadata(self):
metadata = super().page_metadata
return {
'og': {
**metadata['og'],
'title': self.event.title,
'type': 'event',
'image': (self.event.logo_url if self.event.has_logo else
url_for('assets.image', filename='indico_square.png', _external=True)),
'description': self.event.description
},
'json_ld': serialize_event_for_json_ld(self.event, full=True),
'keywords': self.event.keywords
}
def __init__(self, rh, event_, **kwargs):
assert event_ == kwargs.setdefault('event', event_)
self.event = event_
WPDecorated.__init__(self, rh, **kwargs)
start_dt_local = event_.start_dt_display.astimezone(event_.display_tzinfo)
end_dt_local = event_.end_dt_display.astimezone(event_.display_tzinfo)
dates = ' ({})'.format(format_date(start_dt_local, format='long'))
if start_dt_local.date() != end_dt_local.date():
if start_dt_local.year == end_dt_local.year and start_dt_local.month == end_dt_local.month:
dates = ' ({}-{})'.format(start_dt_local.day, format_date(end_dt_local, format='long'))
else:
dates = ' ({} - {})'.format(format_date(start_dt_local, format='long'),
format_date(end_dt_local, format='long'))
self.title = f'{strip_tags(self.event.title)} {dates}'
page_title = kwargs.get('page_title')
if page_title:
self.title += f': {strip_tags(page_title)}'
def _get_header(self):
raise NotImplementedError # must be overridden by meeting/lecture and conference WPs
class WPSimpleEventDisplayBase(MathjaxMixin, WPEventBase):
"""Base class for displaying something on a lecture/meeting page."""
def __init__(self, rh, event_, **kwargs):
self.event = event_
WPEventBase.__init__(self, rh, event_, **kwargs)
def _get_header(self):
return render_event_header(self.event)
def _get_footer(self):
return render_event_footer(self.event)
class WPSimpleEventDisplay(WPSimpleEventDisplayBase):
bundles = (
'module_vc.js',
'module_vc.css',
'module_events.cloning.js',
'module_events.importing.js',
)
def __init__(self, rh, conf, theme_id, theme_override=False):
WPSimpleEventDisplayBase.__init__(self, rh, conf)
self.theme_id = theme_id
self.theme_file_name = theme_id.replace('-', '_')
self.theme = theme_settings.themes[theme_id]
self.theme_override = theme_override
@property
def additional_bundles(self):
plugin = self.theme.get('plugin')
print_stylesheet = self.theme.get('print_stylesheet')
if plugin:
manifest = plugin.manifest
if manifest is None:
raise RuntimeError(f'Assets for plugin {plugin.name} have not been built')
else:
manifest = current_app.manifest
return {
'screen': (manifest[f'themes_{self.theme_file_name}.css'],),
'print': ((manifest[f'themes_{self.theme_file_name}.print.css'],)
if print_stylesheet else ())
}
def _get_head_content(self):
return MathjaxMixin._get_head_content(self) + WPEventBase._get_head_content(self)
def get_extra_css_files(self):
custom_url = get_css_url(self.event)
return [custom_url] if custom_url else []
def _apply_decoration(self, body):
if request.args.get('frame') == 'no' or request.args.get('fr') == 'no' or request.args.get('print') == '1':
return render_template('events/display/print.html', content=body)
else:
return WPEventBase._apply_decoration(self, body)
def _get_header(self):
return render_event_header(self.event, theme=self.theme_id, theme_override=self.theme_override)
def _get_footer(self):
dark = self.theme.get('settings', {}).get('page_is_dark', True)
return render_event_footer(self.event, dark=dark)
def _get_body(self, params):
attached_items = self.event.attached_items
folders = [folder for folder in attached_items.get('folders', []) if folder.title != 'Internal Page Files']
files = attached_items.get('files', [])
lectures = []
if self.event.series is not None and self.event.series.show_links:
lectures = (Event.query.with_parent(self.event.series)
.filter(Event.id != self.event.id)
.options(load_only('series_pos', 'id'))
.order_by(Event.series_pos)
.all())
plugin = self.theme.get('plugin')
tpl_name = self.theme['template']
tpl = ((plugin.name + tpl_name)
if (plugin and tpl_name[0] == ':')
else posixpath.join('events/display', tpl_name))
return render_template(tpl,
event=self.event,
category=(self.event.category.title if self.event.category else None),
timezone=self.event.display_tzinfo,
theme_settings=self.theme.get('settings', {}),
theme_user_settings=layout_settings.get(self.event, 'timetable_theme_settings'),
files=files,
folders=folders,
lectures=lectures)
class WPConferenceDisplayBase(WPJinjaMixin, MathjaxMixin, WPEventBase):
menu_entry_plugin = None
menu_entry_name = None
bundles = ('conferences.css',)
def __init__(self, rh, event_, **kwargs):
assert event_ == kwargs.setdefault('event', event_)
self.event = event_
kwargs['conf_layout_params'] = self._get_layout_params()
kwargs.setdefault('page_title', self.sidemenu_title)
WPEventBase.__init__(self, rh, event_, **kwargs)
def _get_layout_params(self):
bg_color = layout_settings.get(self.event, 'header_background_color').replace('#', '').lower()
text_color = layout_settings.get(self.event, 'header_text_color').replace('#', '').lower()
announcement = ''
if layout_settings.get(self.event, 'show_announcement'):
announcement = layout_settings.get(self.event, 'announcement')
return {
'menu': menu_entries_for_event(self.event),
'active_menu_item': self.sidemenu_option,
'bg_color_css': 'background: #{0}; border-color: #{0};'.format(bg_color) if bg_color else '',
'text_color_css': f'color: #{text_color};' if text_color else '',
'announcement': announcement,
}
def get_extra_css_files(self):
theme_url = self._kwargs.get('css_url_override', get_css_url(self.event))
return [theme_url] if theme_url else []
def _get_header(self):
return render_event_header(self.event, conference_layout=True)
@cached_property
def sidemenu_entry(self):
if not self.menu_entry_name:
return None
name = build_menu_entry_name(self.menu_entry_name, self.menu_entry_plugin)
return get_menu_entry_by_name(name, self.event)
@cached_property
def sidemenu_option(self):
entry = self.sidemenu_entry
return entry.id if entry else None
@cached_property
def sidemenu_title(self):
entry = self.sidemenu_entry
return entry.localized_title if entry else ''
def _get_head_content(self):
return '\n'.join([
MathjaxMixin._get_head_content(self),
WPEventBase._get_head_content(self)
])
def _get_body(self, params):
return WPJinjaMixin._get_page_content(self, params)
def _get_footer(self):
return render_event_footer(self.event)
def _apply_decoration(self, body):
self.logo_url = self.event.logo_url if self.event.has_logo else None
css_override_form = self._kwargs.get('css_override_form')
if css_override_form:
override_html = render_template('events/layout/css_preview_header.html',
event=self.event, form=css_override_form,
download_url=self._kwargs['css_url_override'])
body = override_html + body
return WPEventBase._apply_decoration(self, body)
class WPConferenceDisplay(WPConferenceDisplayBase):
menu_entry_name = 'overview'
def _get_body(self, params):
return render_template('events/display/conference.html', **self._kwargs)
class WPAccessKey(WPJinjaMixin, WPDecorated):
template_prefix = 'events/'
def _get_body(self, params):
return self._get_page_content(params)
|
|
# Copyright (c) 2012-2015 Tycho Andersen
# Copyright (c) 2013 xarvh
# Copyright (c) 2013 horsik
# Copyright (c) 2013-2014 roger
# Copyright (c) 2013 Tao Sauvage
# Copyright (c) 2014 ramnes
# Copyright (c) 2014 Sean Vig
# Copyright (c) 2014 Adi Sieker
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from . import command
from . import hook
from . import utils
from . import xcbq
from six import MAXSIZE
class Key(object):
"""
Defines a keybinding.
"""
def __init__(self, modifiers, key, *commands):
"""
- modifiers: A list of modifier specifications. Modifier
specifications are one of: "shift", "lock", "control", "mod1",
"mod2", "mod3", "mod4", "mod5".
- key: A key specification, e.g. "a", "Tab", "Return", "space".
- *commands: A list of lazy command objects generated with the
command.lazy helper. If multiple Call objects are specified, they
are run in sequence.
"""
self.modifiers = modifiers
self.key = key
self.commands = commands
if key not in xcbq.keysyms:
raise utils.QtileError("Unknown key: %s" % key)
self.keysym = xcbq.keysyms[key]
try:
self.modmask = utils.translateMasks(self.modifiers)
except KeyError as v:
raise utils.QtileError(v)
def __repr__(self):
return "Key(%s, %s)" % (self.modifiers, self.key)
class Drag(object):
"""
Defines binding of a mouse to some dragging action
On each motion event command is executed
with two extra parameters added
x and y offset from previous move
It focuses clicked window by default
If you want to prevent it pass focus=None as an argument
"""
def __init__(self, modifiers, button, *commands, **kwargs):
self.start = kwargs.get("start", None)
self.focus = kwargs.get("focus", "before")
self.modifiers = modifiers
self.button = button
self.commands = commands
try:
self.button_code = int(self.button.replace('Button', ''))
self.modmask = utils.translateMasks(self.modifiers)
except KeyError as v:
raise utils.QtileError(v)
def __repr__(self):
return "Drag(%s, %s)" % (self.modifiers, self.button)
class Click(object):
"""
Defines binding of a mouse click
It focuses clicked window by default
If you want to prevent it pass focus=None as an argument
"""
def __init__(self, modifiers, button, *commands, **kwargs):
self.focus = kwargs.get("focus", "before")
self.modifiers = modifiers
self.button = button
self.commands = commands
try:
self.button_code = int(self.button.replace('Button', ''))
self.modmask = utils.translateMasks(self.modifiers)
except KeyError as v:
raise utils.QtileError(v)
def __repr__(self):
return "Click(%s, %s)" % (self.modifiers, self.button)
class EzConfig(object):
'''
Helper class for defining key and button bindings in an emacs-like format.
Inspired by Xmonad's XMonad.Util.EZConfig.
'''
modifier_keys = {
'M': 'mod4',
'A': 'mod1',
'S': 'shift',
'C': 'control',
}
def parse(self, spec):
'''
Splits an emacs keydef into modifiers and keys. For example:
"M-S-a" -> ['mod4', 'shift'], 'a'
"A-<minus>" -> ['mod1'], 'minus'
"C-<Tab>" -> ['control'], 'Tab'
'''
mods = []
keys = []
for key in spec.split('-'):
if not key:
break
if key in self.modifier_keys:
if keys:
msg = 'Modifiers must always come before key/btn: %s'
raise utils.QtileError(msg % spec)
mods.append(self.modifier_keys[key])
continue
if len(key) == 1:
keys.append(key)
continue
if len(key) > 3 and key[0] == '<' and key[-1] == '>':
keys.append(key[1:-1])
continue
if not keys:
msg = 'Invalid key/btn specifier: %s'
raise utils.QtileError(msg % spec)
if len(keys) > 1:
msg = 'Key chains are not supported: %s' % spec
raise utils.QtileError(msg)
return mods, keys[0]
class EzKey(EzConfig, Key):
def __init__(self, keydef, *commands):
modkeys, key = self.parse(keydef)
super(EzKey, self).__init__(modkeys, key, *commands)
class EzClick(EzConfig, Click):
def __init__(self, btndef, *commands, **kwargs):
modkeys, button = self.parse(btndef)
button = 'Button%s' % button
super(EzClick, self).__init__(modkeys, button, *commands, **kwargs)
class EzDrag(EzConfig, Drag):
def __init__(self, btndef, *commands, **kwargs):
modkeys, button = self.parse(btndef)
button = 'Button%s' % button
super(EzDrag, self).__init__(modkeys, button, *commands, **kwargs)
class ScreenRect(object):
def __init__(self, x, y, width, height):
self.x = x
self.y = y
self.width = width
self.height = height
def __repr__(self):
return '<%s %d,%d %d,%d>' % (
self.__class__.__name__,
self.x, self.y,
self.width, self.height
)
def hsplit(self, columnwidth):
assert columnwidth > 0
assert columnwidth < self.width
return (
self.__class__(self.x, self.y, columnwidth, self.height),
self.__class__(
self.x + columnwidth, self.y,
self.width - columnwidth, self.height
)
)
def vsplit(self, rowheight):
assert rowheight > 0
assert rowheight < self.height
return (
self.__class__(self.x, self.y, self.width, rowheight),
self.__class__(
self.x, self.y + rowheight,
self.width, self.height - rowheight
)
)
class Screen(command.CommandObject):
"""
A physical screen, and its associated paraphernalia.
"""
def __init__(self, top=None, bottom=None, left=None, right=None,
x=None, y=None, width=None, height=None):
"""
- top, bottom, left, right: Instances of Gap/Bar objects, or None.
Note that bar.Bar objects can only be placed at the top or the
bottom of the screen (bar.Gap objects can be placed anywhere).
x,y,width and height aren't specified usually unless you are
using 'fake screens'.
"""
self.group = None
self.previous_group = None
self.top = top
self.bottom = bottom
self.left = left
self.right = right
self.qtile = None
self.index = None
# x position of upper left corner can be > 0
# if one screen is "right" of the other
self.x = x
self.y = y
self.width = width
self.height = height
def _configure(self, qtile, index, x, y, width, height, group):
self.qtile = qtile
self.index = index
self.x = x
self.y = y
self.width = width
self.height = height
self.setGroup(group)
for i in self.gaps:
i._configure(qtile, self)
@property
def gaps(self):
return (i for i in [self.top, self.bottom, self.left, self.right] if i)
@property
def dx(self):
return self.x + self.left.size if self.left else self.x
@property
def dy(self):
return self.y + self.top.size if self.top else self.y
@property
def dwidth(self):
val = self.width
if self.left:
val -= self.left.size
if self.right:
val -= self.right.size
return val
@property
def dheight(self):
val = self.height
if self.top:
val -= self.top.size
if self.bottom:
val -= self.bottom.size
return val
def get_rect(self):
return ScreenRect(self.dx, self.dy, self.dwidth, self.dheight)
def setGroup(self, new_group, save_prev=True):
"""
Put group on this screen
"""
if new_group.screen == self:
return
if save_prev:
self.previous_group = self.group
if new_group is None:
return
if new_group.screen:
# g1 <-> s1 (self)
# g2 (new_group) <-> s2 to
# g1 <-> s2
# g2 <-> s1
g1 = self.group
s1 = self
g2 = new_group
s2 = new_group.screen
s2.group = g1
g1._setScreen(s2)
s1.group = g2
g2._setScreen(s1)
else:
old_group = self.group
self.group = new_group
# display clients of the new group and then hide from old group
# to remove the screen flickering
new_group._setScreen(self)
if old_group is not None:
old_group._setScreen(None)
hook.fire("setgroup")
hook.fire("focus_change")
hook.fire(
"layout_change",
self.group.layouts[self.group.currentLayout],
self.group
)
def _items(self, name):
if name == "layout":
return (True, list(range(len(self.group.layouts))))
elif name == "window":
return (True, [i.window.wid for i in self.group.windows])
elif name == "bar":
return (False, [x.position for x in self.gaps])
def _select(self, name, sel):
if name == "layout":
if sel is None:
return self.group.layout
else:
return utils.lget(self.group.layouts, sel)
elif name == "window":
if sel is None:
return self.group.currentWindow
else:
for i in self.group.windows:
if i.window.wid == sel:
return i
elif name == "bar":
return getattr(self, sel)
def resize(self, x=None, y=None, w=None, h=None):
x = x or self.x
y = y or self.y
w = w or self.width
h = h or self.height
self._configure(self.qtile, self.index, x, y, w, h, self.group)
for bar in [self.top, self.bottom, self.left, self.right]:
if bar:
bar.draw()
self.qtile._eventloop.call_soon(self.group.layoutAll())
def cmd_info(self):
"""
Returns a dictionary of info for this screen.
"""
return dict(
index=self.index,
width=self.width,
height=self.height,
x=self.x,
y=self.y
)
def cmd_resize(self, x=None, y=None, w=None, h=None):
"""
Resize the screen.
"""
self.resize(x, y, w, h)
def cmd_next_group(self, skip_empty=False, skip_managed=False):
"""
Switch to the next group.
"""
n = self.group.nextGroup(skip_empty, skip_managed)
self.setGroup(n)
return n.name
def cmd_prev_group(self, skip_empty=False, skip_managed=False):
"""
Switch to the previous group.
"""
n = self.group.prevGroup(skip_empty, skip_managed)
self.setGroup(n)
return n.name
def cmd_togglegroup(self, groupName=None):
"""
Switch to the selected group or to the previously active one.
"""
group = self.qtile.groupMap.get(groupName)
if group in (self.group, None):
group = self.previous_group
self.setGroup(group)
class Group(object):
"""
Represents a "dynamic" group. These groups can spawn apps, only allow
certain Matched windows to be on them, hide when they're not in use, etc.
"""
def __init__(self, name, matches=None, exclusive=False,
spawn=None, layout=None, layouts=None, persist=True, init=True,
layout_opts=None, screen_affinity=None, position=MAXSIZE):
"""
:param name: the name of this group
:type name: string
:param matches: list of ``Match`` objects whose windows will be assigned to this group
:type matches: default ``None``
:param exclusive: when other apps are started in this group, should we allow them here or not?
:type exclusive: boolean
:param spawn: this will be ``exec()`` d when the group is created
:type spawn: string
:param layout: the default layout for this group (e.g. 'max' or 'stack')
:type layout: string
:param layouts: the group layouts list overriding global layouts
:type layouts: list
:param persist: should this group stay alive with no member windows?
:type persist: boolean
:param init: is this group alive when qtile starts?
:type init: boolean
:param position: group position
:type position: int
"""
self.name = name
self.exclusive = exclusive
self.spawn = spawn
self.layout = layout
self.layouts = layouts or []
self.persist = persist
self.init = init
self.matches = matches or []
self.layout_opts = layout_opts or {}
self.screen_affinity = screen_affinity
self.position = position
class Match(object):
"""
Match for dynamic groups
It can match by title, class or role.
"""
def __init__(self, title=None, wm_class=None, role=None, wm_type=None,
wm_instance_class=None, net_wm_pid=None):
"""
``Match`` supports both regular expression objects (i.e. the result of
``re.compile()``) or strings (match as a "include" match). If a window
matches any of the things in any of the lists, it is considered a
match.
:param title: things to match against the title (WM_NAME)
:param wm_class: things to match against the second string in
WM_CLASS atom
:param role: things to match against the WM_ROLE atom
:param wm_type: things to match against the WM_TYPE atom
:param wm_instance_class: things to match against the first string in
WM_CLASS atom
:param net_wm_pid: things to match against the _NET_WM_PID atom
(only int allowed in this rule)
"""
if not title:
title = []
if not wm_class:
wm_class = []
if not role:
role = []
if not wm_type:
wm_type = []
if not wm_instance_class:
wm_instance_class = []
if not net_wm_pid:
net_wm_pid = []
try:
net_wm_pid = list(map(int, net_wm_pid))
except ValueError:
error = 'Invalid rule for net_wm_pid: "%s" '\
'only ints allowed' % str(net_wm_pid)
raise utils.QtileError(error)
self._rules = [('title', t) for t in title]
self._rules += [('wm_class', w) for w in wm_class]
self._rules += [('role', r) for r in role]
self._rules += [('wm_type', r) for r in wm_type]
self._rules += [('wm_instance_class', w) for w in wm_instance_class]
self._rules += [('net_wm_pid', w) for w in net_wm_pid]
def compare(self, client):
for _type, rule in self._rules:
if _type == "net_wm_pid":
def match_func(value):
return rule == value
else:
match_func = getattr(rule, 'match', None) or \
getattr(rule, 'count')
if _type == 'title':
value = client.name
elif _type == 'wm_class':
value = None
_value = client.window.get_wm_class()
if _value and len(_value) > 1:
value = _value[1]
elif _type == 'wm_instance_class':
value = client.window.get_wm_class()
if value:
value = value[0]
elif _type == 'wm_type':
value = client.window.get_wm_type()
elif _type == 'net_wm_pid':
value = client.window.get_net_wm_pid()
else:
value = client.window.get_wm_window_role()
if value and match_func(value):
return True
return False
def map(self, callback, clients):
""" Apply callback to each client that matches this Match """
for c in clients:
if self.compare(c):
callback(c)
class Rule(object):
"""
A Rule contains a Match object, and a specification about what to do
when that object is matched.
"""
def __init__(self, match, group=None, float=False, intrusive=False,
break_on_match=True):
"""
:param match: ``Match`` object associated with this ``Rule``
:param float: auto float this window?
:param intrusive: override the group's exclusive setting?
:param break_on_match: Should we stop applying rules if this rule is
matched?
"""
self.match = match
self.group = group
self.float = float
self.intrusive = intrusive
self.break_on_match = break_on_match
def matches(self, w):
return self.match.compare(w)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""Convolutional-recurrent layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.keras import activations
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import constraints
from tensorflow.python.keras import initializers
from tensorflow.python.keras import regularizers
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.engine.input_spec import InputSpec
from tensorflow.python.keras.layers.recurrent import DropoutRNNCellMixin
from tensorflow.python.keras.layers.recurrent import RNN
from tensorflow.python.keras.utils import conv_utils
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.util.tf_export import keras_export
class ConvRNN2D(RNN):
"""Base class for convolutional-recurrent layers.
Arguments:
cell: A RNN cell instance. A RNN cell is a class that has:
- a `call(input_at_t, states_at_t)` method, returning
`(output_at_t, states_at_t_plus_1)`. The call method of the
cell can also take the optional argument `constants`, see
section "Note on passing external constants" below.
- a `state_size` attribute. This can be a single integer
(single state) in which case it is
the number of channels of the recurrent state
(which should be the same as the number of channels of the cell
output). This can also be a list/tuple of integers
(one size per state). In this case, the first entry
(`state_size[0]`) should be the same as
the size of the cell output.
return_sequences: Boolean. Whether to return the last output.
in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state
in addition to the output.
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
input_shape: Use this argument to specify the shape of the
input when this layer is the first one in a model.
Call arguments:
inputs: A 5D tensor.
mask: Binary tensor of shape `(samples, timesteps)` indicating whether
a given timestep should be masked.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. This argument is passed to the cell
when calling it. This is for use with cells that use dropout.
initial_state: List of initial state tensors to be passed to the first
call of the cell.
constants: List of constant tensors to be passed to the cell at each
timestep.
Input shape:
5D tensor with shape:
`(samples, timesteps, channels, rows, cols)`
if data_format='channels_first' or 5D tensor with shape:
`(samples, timesteps, rows, cols, channels)`
if data_format='channels_last'.
Output shape:
- If `return_state`: a list of tensors. The first tensor is
the output. The remaining tensors are the last states,
each 4D tensor with shape:
`(samples, filters, new_rows, new_cols)`
if data_format='channels_first'
or 4D tensor with shape:
`(samples, new_rows, new_cols, filters)`
if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
- If `return_sequences`: 5D tensor with shape:
`(samples, timesteps, filters, new_rows, new_cols)`
if data_format='channels_first'
or 5D tensor with shape:
`(samples, timesteps, new_rows, new_cols, filters)`
if data_format='channels_last'.
- Else, 4D tensor with shape:
`(samples, filters, new_rows, new_cols)`
if data_format='channels_first'
or 4D tensor with shape:
`(samples, new_rows, new_cols, filters)`
if data_format='channels_last'.
Masking:
This layer supports masking for input data with a variable number
of timesteps.
Note on using statefulness in RNNs:
You can set RNN layers to be 'stateful', which means that the states
computed for the samples in one batch will be reused as initial states
for the samples in the next batch. This assumes a one-to-one mapping
between samples in different successive batches.
To enable statefulness:
- Specify `stateful=True` in the layer constructor.
- Specify a fixed batch size for your model, by passing
- If sequential model:
`batch_input_shape=(...)` to the first layer in your model.
- If functional model with 1 or more Input layers:
`batch_shape=(...)` to all the first layers in your model.
This is the expected shape of your inputs
*including the batch size*.
It should be a tuple of integers,
e.g. `(32, 10, 100, 100, 32)`.
Note that the number of rows and columns should be specified
too.
- Specify `shuffle=False` when calling fit().
To reset the states of your model, call `.reset_states()` on either
a specific layer, or on your entire model.
Note on specifying the initial state of RNNs:
You can specify the initial state of RNN layers symbolically by
calling them with the keyword argument `initial_state`. The value of
`initial_state` should be a tensor or list of tensors representing
the initial state of the RNN layer.
You can specify the initial state of RNN layers numerically by
calling `reset_states` with the keyword argument `states`. The value of
`states` should be a numpy array or list of numpy arrays representing
the initial state of the RNN layer.
Note on passing external constants to RNNs:
You can pass "external" constants to the cell using the `constants`
keyword argument of `RNN.__call__` (as well as `RNN.call`) method. This
requires that the `cell.call` method accepts the same keyword argument
`constants`. Such constants can be used to condition the cell
transformation on additional static inputs (not changing over time),
a.k.a. an attention mechanism.
"""
def __init__(self,
cell,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
**kwargs):
if unroll:
raise TypeError('Unrolling isn\'t possible with '
'convolutional RNNs.')
if isinstance(cell, (list, tuple)):
# The StackedConvRNN2DCells isn't implemented yet.
raise TypeError('It is not possible at the moment to'
'stack convolutional cells.')
super(ConvRNN2D, self).__init__(cell,
return_sequences,
return_state,
go_backwards,
stateful,
unroll,
**kwargs)
self.input_spec = [InputSpec(ndim=5)]
self.states = None
self._num_constants = None
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
if isinstance(input_shape, list):
input_shape = input_shape[0]
cell = self.cell
if cell.data_format == 'channels_first':
rows = input_shape[3]
cols = input_shape[4]
elif cell.data_format == 'channels_last':
rows = input_shape[2]
cols = input_shape[3]
rows = conv_utils.conv_output_length(rows,
cell.kernel_size[0],
padding=cell.padding,
stride=cell.strides[0],
dilation=cell.dilation_rate[0])
cols = conv_utils.conv_output_length(cols,
cell.kernel_size[1],
padding=cell.padding,
stride=cell.strides[1],
dilation=cell.dilation_rate[1])
if cell.data_format == 'channels_first':
output_shape = input_shape[:2] + (cell.filters, rows, cols)
elif cell.data_format == 'channels_last':
output_shape = input_shape[:2] + (rows, cols, cell.filters)
if not self.return_sequences:
output_shape = output_shape[:1] + output_shape[2:]
if self.return_state:
output_shape = [output_shape]
if cell.data_format == 'channels_first':
output_shape += [(input_shape[0], cell.filters, rows, cols)
for _ in range(2)]
elif cell.data_format == 'channels_last':
output_shape += [(input_shape[0], rows, cols, cell.filters)
for _ in range(2)]
return output_shape
@tf_utils.shape_type_conversion
def build(self, input_shape):
# Note input_shape will be list of shapes of initial states and
# constants if these are passed in __call__.
if self._num_constants is not None:
constants_shape = input_shape[-self._num_constants:] # pylint: disable=E1130
else:
constants_shape = None
if isinstance(input_shape, list):
input_shape = input_shape[0]
batch_size = input_shape[0] if self.stateful else None
self.input_spec[0] = InputSpec(shape=(batch_size, None) + input_shape[2:5])
# allow cell (if layer) to build before we set or validate state_spec
if isinstance(self.cell, Layer):
step_input_shape = (input_shape[0],) + input_shape[2:]
if constants_shape is not None:
self.cell.build([step_input_shape] + constants_shape)
else:
self.cell.build(step_input_shape)
# set or validate state_spec
if hasattr(self.cell.state_size, '__len__'):
state_size = list(self.cell.state_size)
else:
state_size = [self.cell.state_size]
if self.state_spec is not None:
# initial_state was passed in call, check compatibility
if self.cell.data_format == 'channels_first':
ch_dim = 1
elif self.cell.data_format == 'channels_last':
ch_dim = 3
if [spec.shape[ch_dim] for spec in self.state_spec] != state_size:
raise ValueError(
'An initial_state was passed that is not compatible with '
'`cell.state_size`. Received `state_spec`={}; '
'However `cell.state_size` is '
'{}'.format([spec.shape for spec in self.state_spec],
self.cell.state_size))
else:
if self.cell.data_format == 'channels_first':
self.state_spec = [InputSpec(shape=(None, dim, None, None))
for dim in state_size]
elif self.cell.data_format == 'channels_last':
self.state_spec = [InputSpec(shape=(None, None, None, dim))
for dim in state_size]
if self.stateful:
self.reset_states()
self.built = True
def get_initial_state(self, inputs):
# (samples, timesteps, rows, cols, filters)
initial_state = K.zeros_like(inputs)
# (samples, rows, cols, filters)
initial_state = K.sum(initial_state, axis=1)
shape = list(self.cell.kernel_shape)
shape[-1] = self.cell.filters
initial_state = self.cell.input_conv(initial_state,
array_ops.zeros(tuple(shape),
initial_state.dtype),
padding=self.cell.padding)
if hasattr(self.cell.state_size, '__len__'):
return [initial_state for _ in self.cell.state_size]
else:
return [initial_state]
def call(self,
inputs,
mask=None,
training=None,
initial_state=None,
constants=None):
# note that the .build() method of subclasses MUST define
# self.input_spec and self.state_spec with complete input shapes.
inputs, initial_state, constants = self._process_inputs(
inputs, initial_state, constants)
if isinstance(mask, list):
mask = mask[0]
timesteps = K.int_shape(inputs)[1]
kwargs = {}
if generic_utils.has_arg(self.cell.call, 'training'):
kwargs['training'] = training
if constants:
if not generic_utils.has_arg(self.cell.call, 'constants'):
raise ValueError('RNN cell does not support constants')
def step(inputs, states):
constants = states[-self._num_constants:] # pylint: disable=invalid-unary-operand-type
states = states[:-self._num_constants] # pylint: disable=invalid-unary-operand-type
return self.cell.call(inputs, states, constants=constants, **kwargs)
else:
def step(inputs, states):
return self.cell.call(inputs, states, **kwargs)
last_output, outputs, states = K.rnn(step,
inputs,
initial_state,
constants=constants,
go_backwards=self.go_backwards,
mask=mask,
input_length=timesteps)
if self.stateful:
updates = [
K.update(self_state, state)
for self_state, state in zip(self.states, states)
]
self.add_update(updates)
if self.return_sequences:
output = outputs
else:
output = last_output
if self.return_state:
if not isinstance(states, (list, tuple)):
states = [states]
else:
states = list(states)
return [output] + states
else:
return output
def reset_states(self, states=None):
if not self.stateful:
raise AttributeError('Layer must be stateful.')
input_shape = self.input_spec[0].shape
state_shape = self.compute_output_shape(input_shape)
if self.return_state:
state_shape = state_shape[0]
if self.return_sequences:
state_shape = state_shape[:1].concatenate(state_shape[2:])
if None in state_shape:
raise ValueError('If a RNN is stateful, it needs to know '
'its batch size. Specify the batch size '
'of your input tensors: \n'
'- If using a Sequential model, '
'specify the batch size by passing '
'a `batch_input_shape` '
'argument to your first layer.\n'
'- If using the functional API, specify '
'the time dimension by passing a '
'`batch_shape` argument to your Input layer.\n'
'The same thing goes for the number of rows and '
'columns.')
# helper function
def get_tuple_shape(nb_channels):
result = list(state_shape)
if self.cell.data_format == 'channels_first':
result[1] = nb_channels
elif self.cell.data_format == 'channels_last':
result[3] = nb_channels
else:
raise KeyError
return tuple(result)
# initialize state if None
if self.states[0] is None:
if hasattr(self.cell.state_size, '__len__'):
self.states = [K.zeros(get_tuple_shape(dim))
for dim in self.cell.state_size]
else:
self.states = [K.zeros(get_tuple_shape(self.cell.state_size))]
elif states is None:
if hasattr(self.cell.state_size, '__len__'):
for state, dim in zip(self.states, self.cell.state_size):
K.set_value(state, np.zeros(get_tuple_shape(dim)))
else:
K.set_value(self.states[0],
np.zeros(get_tuple_shape(self.cell.state_size)))
else:
if not isinstance(states, (list, tuple)):
states = [states]
if len(states) != len(self.states):
raise ValueError('Layer ' + self.name + ' expects ' +
str(len(self.states)) + ' states, ' +
'but it received ' + str(len(states)) +
' state values. Input received: ' + str(states))
for index, (value, state) in enumerate(zip(states, self.states)):
if hasattr(self.cell.state_size, '__len__'):
dim = self.cell.state_size[index]
else:
dim = self.cell.state_size
if value.shape != get_tuple_shape(dim):
raise ValueError('State ' + str(index) +
' is incompatible with layer ' +
self.name + ': expected shape=' +
str(get_tuple_shape(dim)) +
', found shape=' + str(value.shape))
# TODO(anjalisridhar): consider batch calls to `set_value`.
K.set_value(state, value)
class ConvLSTM2DCell(DropoutRNNCellMixin, Layer):
"""Cell class for the ConvLSTM2D layer.
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of n integers, specifying the
dimensions of the convolution window.
strides: An integer or tuple/list of n integers,
specifying the strides of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: An integer or tuple/list of n integers, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step.
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state.
bias_initializer: Initializer for the bias vector.
unit_forget_bias: Boolean.
If True, add 1 to the bias of the forget gate at initialization.
Use in combination with `bias_initializer="zeros"`.
This is recommended in [Jozefowicz et al., 2015](
http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
Call arguments:
inputs: A 4D tensor.
states: List of state tensors corresponding to the previous timestep.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. Only relevant when `dropout` or
`recurrent_dropout` is used.
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1),
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
**kwargs):
super(ConvLSTM2DCell, self).__init__(**kwargs)
self.filters = filters
self.kernel_size = conv_utils.normalize_tuple(kernel_size, 2, 'kernel_size')
self.strides = conv_utils.normalize_tuple(strides, 2, 'strides')
self.padding = conv_utils.normalize_padding(padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, 2,
'dilation_rate')
self.activation = activations.get(activation)
self.recurrent_activation = activations.get(recurrent_activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.unit_forget_bias = unit_forget_bias
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
self.state_size = (self.filters, self.filters)
def build(self, input_shape):
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape[channel_axis] is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = input_shape[channel_axis]
kernel_shape = self.kernel_size + (input_dim, self.filters * 4)
self.kernel_shape = kernel_shape
recurrent_kernel_shape = self.kernel_size + (self.filters, self.filters * 4)
self.kernel = self.add_weight(shape=kernel_shape,
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.recurrent_kernel = self.add_weight(
shape=recurrent_kernel_shape,
initializer=self.recurrent_initializer,
name='recurrent_kernel',
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
if self.use_bias:
if self.unit_forget_bias:
def bias_initializer(_, *args, **kwargs):
return K.concatenate([
self.bias_initializer((self.filters,), *args, **kwargs),
initializers.get('ones')((self.filters,), *args, **kwargs),
self.bias_initializer((self.filters * 2,), *args, **kwargs),
])
else:
bias_initializer = self.bias_initializer
self.bias = self.add_weight(
shape=(self.filters * 4,),
name='bias',
initializer=bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.built = True
def call(self, inputs, states, training=None):
h_tm1 = states[0] # previous memory state
c_tm1 = states[1] # previous carry state
# dropout matrices for input units
dp_mask = self.get_dropout_mask_for_cell(inputs, training, count=4)
# dropout matrices for recurrent units
rec_dp_mask = self.get_recurrent_dropout_mask_for_cell(
h_tm1, training, count=4)
if 0 < self.dropout < 1.:
inputs_i = inputs * dp_mask[0]
inputs_f = inputs * dp_mask[1]
inputs_c = inputs * dp_mask[2]
inputs_o = inputs * dp_mask[3]
else:
inputs_i = inputs
inputs_f = inputs
inputs_c = inputs
inputs_o = inputs
if 0 < self.recurrent_dropout < 1.:
h_tm1_i = h_tm1 * rec_dp_mask[0]
h_tm1_f = h_tm1 * rec_dp_mask[1]
h_tm1_c = h_tm1 * rec_dp_mask[2]
h_tm1_o = h_tm1 * rec_dp_mask[3]
else:
h_tm1_i = h_tm1
h_tm1_f = h_tm1
h_tm1_c = h_tm1
h_tm1_o = h_tm1
(kernel_i, kernel_f,
kernel_c, kernel_o) = array_ops.split(self.kernel, 4, axis=3)
(recurrent_kernel_i,
recurrent_kernel_f,
recurrent_kernel_c,
recurrent_kernel_o) = array_ops.split(self.recurrent_kernel, 4, axis=3)
if self.use_bias:
bias_i, bias_f, bias_c, bias_o = array_ops.split(self.bias, 4)
else:
bias_i, bias_f, bias_c, bias_o = None, None, None, None
x_i = self.input_conv(inputs_i, kernel_i, bias_i, padding=self.padding)
x_f = self.input_conv(inputs_f, kernel_f, bias_f, padding=self.padding)
x_c = self.input_conv(inputs_c, kernel_c, bias_c, padding=self.padding)
x_o = self.input_conv(inputs_o, kernel_o, bias_o, padding=self.padding)
h_i = self.recurrent_conv(h_tm1_i, recurrent_kernel_i)
h_f = self.recurrent_conv(h_tm1_f, recurrent_kernel_f)
h_c = self.recurrent_conv(h_tm1_c, recurrent_kernel_c)
h_o = self.recurrent_conv(h_tm1_o, recurrent_kernel_o)
i = self.recurrent_activation(x_i + h_i)
f = self.recurrent_activation(x_f + h_f)
c = f * c_tm1 + i * self.activation(x_c + h_c)
o = self.recurrent_activation(x_o + h_o)
h = o * self.activation(c)
return h, [h, c]
def input_conv(self, x, w, b=None, padding='valid'):
conv_out = K.conv2d(x, w, strides=self.strides,
padding=padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
if b is not None:
conv_out = K.bias_add(conv_out, b,
data_format=self.data_format)
return conv_out
def recurrent_conv(self, x, w):
conv_out = K.conv2d(x, w, strides=(1, 1),
padding='same',
data_format=self.data_format)
return conv_out
def get_config(self):
config = {'filters': self.filters,
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'data_format': self.data_format,
'dilation_rate': self.dilation_rate,
'activation': activations.serialize(self.activation),
'recurrent_activation': activations.serialize(
self.recurrent_activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(
self.kernel_initializer),
'recurrent_initializer': initializers.serialize(
self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'unit_forget_bias': self.unit_forget_bias,
'kernel_regularizer': regularizers.serialize(
self.kernel_regularizer),
'recurrent_regularizer': regularizers.serialize(
self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'kernel_constraint': constraints.serialize(
self.kernel_constraint),
'recurrent_constraint': constraints.serialize(
self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout}
base_config = super(ConvLSTM2DCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.ConvLSTM2D')
class ConvLSTM2D(ConvRNN2D):
"""Convolutional LSTM.
It is similar to an LSTM layer, but the input transformations
and recurrent transformations are both convolutional.
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of n integers, specifying the
dimensions of the convolution window.
strides: An integer or tuple/list of n integers,
specifying the strides of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, time, ..., channels)`
while `channels_first` corresponds to
inputs with shape `(batch, time, channels, ...)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: An integer or tuple/list of n integers, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
activation: Activation function to use.
By default hyperbolic tangent activation function is applied
(`tanh(x)`).
recurrent_activation: Activation function to use
for the recurrent step.
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state.
bias_initializer: Initializer for the bias vector.
unit_forget_bias: Boolean.
If True, add 1 to the bias of the forget gate at initialization.
Use in combination with `bias_initializer="zeros"`.
This is recommended in [Jozefowicz et al., 2015](
http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to.
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence.
go_backwards: Boolean (default False).
If True, process the input sequence backwards.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
Call arguments:
inputs: A 5D tensor.
mask: Binary tensor of shape `(samples, timesteps)` indicating whether
a given timestep should be masked.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. This argument is passed to the cell
when calling it. This is only relevant if `dropout` or `recurrent_dropout`
are set.
initial_state: List of initial state tensors to be passed to the first
call of the cell.
Input shape:
- If data_format='channels_first'
5D tensor with shape:
`(samples, time, channels, rows, cols)`
- If data_format='channels_last'
5D tensor with shape:
`(samples, time, rows, cols, channels)`
Output shape:
- If `return_sequences`
- If data_format='channels_first'
5D tensor with shape:
`(samples, time, filters, output_row, output_col)`
- If data_format='channels_last'
5D tensor with shape:
`(samples, time, output_row, output_col, filters)`
- Else
- If data_format ='channels_first'
4D tensor with shape:
`(samples, filters, output_row, output_col)`
- If data_format='channels_last'
4D tensor with shape:
`(samples, output_row, output_col, filters)`
where `o_row` and `o_col` depend on the shape of the filter and
the padding
Raises:
ValueError: in case of invalid constructor arguments.
References:
- [Shi et al., 2015](http://arxiv.org/abs/1506.04214v1)
(the current implementation does not include the feedback loop on the
cells output).
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1),
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
return_sequences=False,
go_backwards=False,
stateful=False,
dropout=0.,
recurrent_dropout=0.,
**kwargs):
cell = ConvLSTM2DCell(filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
unit_forget_bias=unit_forget_bias,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
dtype=kwargs.get('dtype'))
super(ConvLSTM2D, self).__init__(cell,
return_sequences=return_sequences,
go_backwards=go_backwards,
stateful=stateful,
**kwargs)
self.activity_regularizer = regularizers.get(activity_regularizer)
def call(self, inputs, mask=None, training=None, initial_state=None):
self._maybe_reset_cell_dropout_mask(self.cell)
return super(ConvLSTM2D, self).call(inputs,
mask=mask,
training=training,
initial_state=initial_state)
@property
def filters(self):
return self.cell.filters
@property
def kernel_size(self):
return self.cell.kernel_size
@property
def strides(self):
return self.cell.strides
@property
def padding(self):
return self.cell.padding
@property
def data_format(self):
return self.cell.data_format
@property
def dilation_rate(self):
return self.cell.dilation_rate
@property
def activation(self):
return self.cell.activation
@property
def recurrent_activation(self):
return self.cell.recurrent_activation
@property
def use_bias(self):
return self.cell.use_bias
@property
def kernel_initializer(self):
return self.cell.kernel_initializer
@property
def recurrent_initializer(self):
return self.cell.recurrent_initializer
@property
def bias_initializer(self):
return self.cell.bias_initializer
@property
def unit_forget_bias(self):
return self.cell.unit_forget_bias
@property
def kernel_regularizer(self):
return self.cell.kernel_regularizer
@property
def recurrent_regularizer(self):
return self.cell.recurrent_regularizer
@property
def bias_regularizer(self):
return self.cell.bias_regularizer
@property
def kernel_constraint(self):
return self.cell.kernel_constraint
@property
def recurrent_constraint(self):
return self.cell.recurrent_constraint
@property
def bias_constraint(self):
return self.cell.bias_constraint
@property
def dropout(self):
return self.cell.dropout
@property
def recurrent_dropout(self):
return self.cell.recurrent_dropout
def get_config(self):
config = {'filters': self.filters,
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'data_format': self.data_format,
'dilation_rate': self.dilation_rate,
'activation': activations.serialize(self.activation),
'recurrent_activation': activations.serialize(
self.recurrent_activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(
self.kernel_initializer),
'recurrent_initializer': initializers.serialize(
self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'unit_forget_bias': self.unit_forget_bias,
'kernel_regularizer': regularizers.serialize(
self.kernel_regularizer),
'recurrent_regularizer': regularizers.serialize(
self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer': regularizers.serialize(
self.activity_regularizer),
'kernel_constraint': constraints.serialize(
self.kernel_constraint),
'recurrent_constraint': constraints.serialize(
self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout}
base_config = super(ConvLSTM2D, self).get_config()
del base_config['cell']
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
return cls(**config)
|
|
# Copyright 2017 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import grpc
import mock
import pytest
from google.api_core import exceptions
from google.api_core import grpc_helpers
import google.auth.credentials
from google.longrunning import operations_pb2
def test__patch_callable_name():
callable = mock.Mock(spec=['__class__'])
callable.__class__ = mock.Mock(spec=['__name__'])
callable.__class__.__name__ = 'TestCallable'
grpc_helpers._patch_callable_name(callable)
assert callable.__name__ == 'TestCallable'
def test__patch_callable_name_no_op():
callable = mock.Mock(spec=['__name__'])
callable.__name__ = 'test_callable'
grpc_helpers._patch_callable_name(callable)
assert callable.__name__ == 'test_callable'
class RpcErrorImpl(grpc.RpcError, grpc.Call):
def __init__(self, code):
super(RpcErrorImpl, self).__init__()
self._code = code
def code(self):
return self._code
def details(self):
return None
def test_wrap_unary_errors():
grpc_error = RpcErrorImpl(grpc.StatusCode.INVALID_ARGUMENT)
callable_ = mock.Mock(spec=['__call__'], side_effect=grpc_error)
wrapped_callable = grpc_helpers._wrap_unary_errors(callable_)
with pytest.raises(exceptions.InvalidArgument) as exc_info:
wrapped_callable(1, 2, three='four')
callable_.assert_called_once_with(1, 2, three='four')
assert exc_info.value.response == grpc_error
def test_wrap_stream_okay():
expected_responses = [1, 2, 3]
callable_ = mock.Mock(spec=[
'__call__'], return_value=iter(expected_responses))
wrapped_callable = grpc_helpers._wrap_stream_errors(callable_)
got_iterator = wrapped_callable(1, 2, three='four')
responses = list(got_iterator)
callable_.assert_called_once_with(1, 2, three='four')
assert responses == expected_responses
def test_wrap_stream_iterable_iterface():
response_iter = mock.create_autospec(grpc.Call, instance=True)
callable_ = mock.Mock(spec=['__call__'], return_value=response_iter)
wrapped_callable = grpc_helpers._wrap_stream_errors(callable_)
got_iterator = wrapped_callable()
callable_.assert_called_once_with()
# Check each aliased method in the grpc.Call interface
got_iterator.add_callback(mock.sentinel.callback)
response_iter.add_callback.assert_called_once_with(mock.sentinel.callback)
got_iterator.cancel()
response_iter.cancel.assert_called_once_with()
got_iterator.code()
response_iter.code.assert_called_once_with()
got_iterator.details()
response_iter.details.assert_called_once_with()
got_iterator.initial_metadata()
response_iter.initial_metadata.assert_called_once_with()
got_iterator.is_active()
response_iter.is_active.assert_called_once_with()
got_iterator.time_remaining()
response_iter.time_remaining.assert_called_once_with()
got_iterator.trailing_metadata()
response_iter.trailing_metadata.assert_called_once_with()
def test_wrap_stream_errors_invocation():
grpc_error = RpcErrorImpl(grpc.StatusCode.INVALID_ARGUMENT)
callable_ = mock.Mock(spec=['__call__'], side_effect=grpc_error)
wrapped_callable = grpc_helpers._wrap_stream_errors(callable_)
with pytest.raises(exceptions.InvalidArgument) as exc_info:
wrapped_callable(1, 2, three='four')
callable_.assert_called_once_with(1, 2, three='four')
assert exc_info.value.response == grpc_error
class RpcResponseIteratorImpl(object):
def __init__(self, exception):
self._exception = exception
def next(self):
raise self._exception
__next__ = next
def test_wrap_stream_errors_iterator():
grpc_error = RpcErrorImpl(grpc.StatusCode.UNAVAILABLE)
response_iter = RpcResponseIteratorImpl(grpc_error)
callable_ = mock.Mock(spec=['__call__'], return_value=response_iter)
wrapped_callable = grpc_helpers._wrap_stream_errors(callable_)
got_iterator = wrapped_callable(1, 2, three='four')
with pytest.raises(exceptions.ServiceUnavailable) as exc_info:
next(got_iterator)
callable_.assert_called_once_with(1, 2, three='four')
assert exc_info.value.response == grpc_error
@mock.patch('google.api_core.grpc_helpers._wrap_unary_errors')
def test_wrap_errors_non_streaming(wrap_unary_errors):
callable_ = mock.create_autospec(grpc.UnaryUnaryMultiCallable)
result = grpc_helpers.wrap_errors(callable_)
assert result == wrap_unary_errors.return_value
wrap_unary_errors.assert_called_once_with(callable_)
@mock.patch('google.api_core.grpc_helpers._wrap_stream_errors')
def test_wrap_errors_streaming(wrap_stream_errors):
callable_ = mock.create_autospec(grpc.UnaryStreamMultiCallable)
result = grpc_helpers.wrap_errors(callable_)
assert result == wrap_stream_errors.return_value
wrap_stream_errors.assert_called_once_with(callable_)
@mock.patch('grpc.composite_channel_credentials')
@mock.patch(
'google.auth.default',
return_value=(mock.sentinel.credentials, mock.sentinel.projet))
@mock.patch('grpc.secure_channel')
def test_create_channel_implicit(
grpc_secure_channel, default, composite_creds_call):
target = 'example.com:443'
composite_creds = composite_creds_call.return_value
channel = grpc_helpers.create_channel(target)
assert channel is grpc_secure_channel.return_value
default.assert_called_once_with(scopes=None)
if (grpc_helpers.HAS_GRPC_GCP):
grpc_secure_channel.assert_called_once_with(
target, composite_creds, None)
else:
grpc_secure_channel.assert_called_once_with(
target, composite_creds)
@mock.patch('grpc.composite_channel_credentials')
@mock.patch(
'google.auth.default',
return_value=(mock.sentinel.credentials, mock.sentinel.projet))
@mock.patch('grpc.secure_channel')
def test_create_channel_implicit_with_ssl_creds(
grpc_secure_channel, default, composite_creds_call):
target = 'example.com:443'
ssl_creds = grpc.ssl_channel_credentials()
grpc_helpers.create_channel(target, ssl_credentials=ssl_creds)
default.assert_called_once_with(scopes=None)
composite_creds_call.assert_called_once_with(ssl_creds, mock.ANY)
composite_creds = composite_creds_call.return_value
if (grpc_helpers.HAS_GRPC_GCP):
grpc_secure_channel.assert_called_once_with(
target, composite_creds, None)
else:
grpc_secure_channel.assert_called_once_with(
target, composite_creds)
@mock.patch('grpc.composite_channel_credentials')
@mock.patch(
'google.auth.default',
return_value=(mock.sentinel.credentials, mock.sentinel.projet))
@mock.patch('grpc.secure_channel')
def test_create_channel_implicit_with_scopes(
grpc_secure_channel, default, composite_creds_call):
target = 'example.com:443'
composite_creds = composite_creds_call.return_value
channel = grpc_helpers.create_channel(target, scopes=['one', 'two'])
assert channel is grpc_secure_channel.return_value
default.assert_called_once_with(scopes=['one', 'two'])
if (grpc_helpers.HAS_GRPC_GCP):
grpc_secure_channel.assert_called_once_with(
target, composite_creds, None)
else:
grpc_secure_channel.assert_called_once_with(
target, composite_creds)
@mock.patch('grpc.composite_channel_credentials')
@mock.patch('google.auth.credentials.with_scopes_if_required')
@mock.patch('grpc.secure_channel')
def test_create_channel_explicit(
grpc_secure_channel, auth_creds, composite_creds_call):
target = 'example.com:443'
composite_creds = composite_creds_call.return_value
channel = grpc_helpers.create_channel(
target, credentials=mock.sentinel.credentials)
auth_creds.assert_called_once_with(mock.sentinel.credentials, None)
assert channel is grpc_secure_channel.return_value
if (grpc_helpers.HAS_GRPC_GCP):
grpc_secure_channel.assert_called_once_with(
target, composite_creds, None)
else:
grpc_secure_channel.assert_called_once_with(
target, composite_creds)
@mock.patch('grpc.composite_channel_credentials')
@mock.patch('grpc.secure_channel')
def test_create_channel_explicit_scoped(
grpc_secure_channel, composite_creds_call):
target = 'example.com:443'
scopes = ['1', '2']
composite_creds = composite_creds_call.return_value
credentials = mock.create_autospec(
google.auth.credentials.Scoped, instance=True)
credentials.requires_scopes = True
channel = grpc_helpers.create_channel(
target,
credentials=credentials,
scopes=scopes)
credentials.with_scopes.assert_called_once_with(scopes)
assert channel is grpc_secure_channel.return_value
if (grpc_helpers.HAS_GRPC_GCP):
grpc_secure_channel.assert_called_once_with(
target, composite_creds, None)
else:
grpc_secure_channel.assert_called_once_with(
target, composite_creds)
@pytest.mark.skipif(not grpc_helpers.HAS_GRPC_GCP,
reason='grpc_gcp module not available')
@mock.patch('grpc_gcp.secure_channel')
def test_create_channel_with_grpc_gcp(grpc_gcp_secure_channel):
target = 'example.com:443'
scopes = ['test_scope']
credentials = mock.create_autospec(
google.auth.credentials.Scoped, instance=True)
credentials.requires_scopes = True
grpc_helpers.create_channel(
target,
credentials=credentials,
scopes=scopes)
grpc_gcp_secure_channel.assert_called()
credentials.with_scopes.assert_called_once_with(scopes)
@pytest.mark.skipif(grpc_helpers.HAS_GRPC_GCP,
reason='grpc_gcp module not available')
@mock.patch('grpc.secure_channel')
def test_create_channel_without_grpc_gcp(grpc_secure_channel):
target = 'example.com:443'
scopes = ['test_scope']
credentials = mock.create_autospec(
google.auth.credentials.Scoped, instance=True)
credentials.requires_scopes = True
grpc_helpers.create_channel(
target,
credentials=credentials,
scopes=scopes)
grpc_secure_channel.assert_called()
credentials.with_scopes.assert_called_once_with(scopes)
class TestChannelStub(object):
def test_single_response(self):
channel = grpc_helpers.ChannelStub()
stub = operations_pb2.OperationsStub(channel)
expected_request = operations_pb2.GetOperationRequest(name='meep')
expected_response = operations_pb2.Operation(name='moop')
channel.GetOperation.response = expected_response
response = stub.GetOperation(expected_request)
assert response == expected_response
assert channel.requests == [('GetOperation', expected_request)]
assert channel.GetOperation.requests == [expected_request]
def test_no_response(self):
channel = grpc_helpers.ChannelStub()
stub = operations_pb2.OperationsStub(channel)
expected_request = operations_pb2.GetOperationRequest(name='meep')
with pytest.raises(ValueError) as exc_info:
stub.GetOperation(expected_request)
assert exc_info.match('GetOperation')
def test_missing_method(self):
channel = grpc_helpers.ChannelStub()
with pytest.raises(AttributeError):
channel.DoesNotExist.response
def test_exception_response(self):
channel = grpc_helpers.ChannelStub()
stub = operations_pb2.OperationsStub(channel)
expected_request = operations_pb2.GetOperationRequest(name='meep')
channel.GetOperation.response = RuntimeError()
with pytest.raises(RuntimeError):
stub.GetOperation(expected_request)
def test_callable_response(self):
channel = grpc_helpers.ChannelStub()
stub = operations_pb2.OperationsStub(channel)
expected_request = operations_pb2.GetOperationRequest(name='meep')
expected_response = operations_pb2.Operation(name='moop')
on_get_operation = mock.Mock(
spec=('__call__',), return_value=expected_response)
channel.GetOperation.response = on_get_operation
response = stub.GetOperation(expected_request)
assert response == expected_response
on_get_operation.assert_called_once_with(expected_request)
def test_multiple_responses(self):
channel = grpc_helpers.ChannelStub()
stub = operations_pb2.OperationsStub(channel)
expected_request = operations_pb2.GetOperationRequest(name='meep')
expected_responses = [
operations_pb2.Operation(name='foo'),
operations_pb2.Operation(name='bar'),
operations_pb2.Operation(name='baz'),
]
channel.GetOperation.responses = iter(expected_responses)
response1 = stub.GetOperation(expected_request)
response2 = stub.GetOperation(expected_request)
response3 = stub.GetOperation(expected_request)
assert response1 == expected_responses[0]
assert response2 == expected_responses[1]
assert response3 == expected_responses[2]
assert channel.requests == [('GetOperation', expected_request)] * 3
assert channel.GetOperation.requests == [expected_request] * 3
with pytest.raises(StopIteration):
stub.GetOperation(expected_request)
def test_multiple_responses_and_single_response_error(self):
channel = grpc_helpers.ChannelStub()
stub = operations_pb2.OperationsStub(channel)
channel.GetOperation.responses = []
channel.GetOperation.response = mock.sentinel.response
with pytest.raises(ValueError):
stub.GetOperation(operations_pb2.GetOperationRequest())
def test_call_info(self):
channel = grpc_helpers.ChannelStub()
stub = operations_pb2.OperationsStub(channel)
expected_request = operations_pb2.GetOperationRequest(name='meep')
expected_response = operations_pb2.Operation(name='moop')
expected_metadata = [('red', 'blue'), ('two', 'shoe')]
expected_credentials = mock.sentinel.credentials
channel.GetOperation.response = expected_response
response = stub.GetOperation(
expected_request, timeout=42, metadata=expected_metadata,
credentials=expected_credentials)
assert response == expected_response
assert channel.requests == [('GetOperation', expected_request)]
assert channel.GetOperation.calls == [
(expected_request, 42, expected_metadata, expected_credentials)]
def test_unary_unary(self):
channel = grpc_helpers.ChannelStub()
method_name = 'GetOperation'
callable_stub = channel.unary_unary(method_name)
assert callable_stub._method == method_name
assert callable_stub._channel == channel
def test_unary_stream(self):
channel = grpc_helpers.ChannelStub()
method_name = 'GetOperation'
callable_stub = channel.unary_stream(method_name)
assert callable_stub._method == method_name
assert callable_stub._channel == channel
def test_stream_unary(self):
channel = grpc_helpers.ChannelStub()
method_name = 'GetOperation'
callable_stub = channel.stream_unary(method_name)
assert callable_stub._method == method_name
assert callable_stub._channel == channel
def test_stream_stream(self):
channel = grpc_helpers.ChannelStub()
method_name = 'GetOperation'
callable_stub = channel.stream_stream(method_name)
assert callable_stub._method == method_name
assert callable_stub._channel == channel
def test_subscribe_unsubscribe(self):
channel = grpc_helpers.ChannelStub()
assert channel.subscribe(None) is None
assert channel.unsubscribe(None) is None
def test_close(self):
channel = grpc_helpers.ChannelStub()
assert channel.close() is None
|
|
__author__ = 'frank'
import os
import os.path
import traceback
import zstacklib.utils.uuidhelper as uuidhelper
from kvmagent import kvmagent
from kvmagent.plugins.imagestore import ImageStoreClient
from zstacklib.utils import jsonobject
from zstacklib.utils import linux
from zstacklib.utils import shell
from zstacklib.utils import traceable_shell
from zstacklib.utils import rollback
from zstacklib.utils.bash import *
from zstacklib.utils.report import *
from zstacklib.utils.plugin import completetask
logger = log.get_logger(__name__)
class AgentCommand(object):
def __init__(self):
pass
class AgentResponse(object):
def __init__(self):
self.totalCapacity = None
self.availableCapacity = None
self.success = None
self.error = None
class InitRsp(AgentResponse):
def __init__(self):
super(InitRsp, self).__init__()
self.localStorageUsedCapacity = None
class CopyBitsFromRemoteCmd(AgentCommand):
@log.sensitive_fields("dstPassword")
def __init__(self):
super(CopyBitsFromRemoteCmd, self).__init__()
self.sendCommandUrl = None
self.paths = []
self.dstIp = None
self.dstPassword = None
self.dstUsername = None
self.dstPort = 22
self.stage = None
self.volumeUuid = None
class RevertVolumeFromSnapshotRsp(AgentResponse):
def __init__(self):
super(RevertVolumeFromSnapshotRsp, self).__init__()
self.newVolumeInstallPath = None
self.size = None
class ReinitImageRsp(AgentResponse):
def __init__(self):
super(ReinitImageRsp, self).__init__()
self.newVolumeInstallPath = None
class MergeSnapshotRsp(AgentResponse):
def __init__(self):
super(MergeSnapshotRsp, self).__init__()
self.size = None
self.actualSize = None
class RebaseAndMergeSnapshotsRsp(AgentResponse):
def __init__(self):
super(RebaseAndMergeSnapshotsRsp, self).__init__()
self.size = None
self.actualSize = None
class CheckBitsRsp(AgentResponse):
def __init__(self):
super(CheckBitsRsp, self).__init__()
self.existing = False
class GetMd5Rsp(AgentResponse):
def __init__(self):
super(GetMd5Rsp, self).__init__()
self.md5s = None
class GetBackingFileRsp(AgentResponse):
def __init__(self):
super(GetBackingFileRsp, self).__init__()
self.size = None
self.backingFilePath = None
class GetVolumeSizeRsp(AgentResponse):
def __init__(self):
super(GetVolumeSizeRsp, self).__init__()
self.actualSize = None
self.size = None
class GetVolumeBaseImagePathRsp(AgentResponse):
def __init__(self):
super(GetVolumeBaseImagePathRsp, self).__init__()
self.path = None
self.size = None
class GetQCOW2ReferenceRsp(AgentResponse):
def __init__(self):
super(GetQCOW2ReferenceRsp, self).__init__()
self.referencePaths = None
class ResizeVolumeRsp(AgentResponse):
def __init__(self):
super(ResizeVolumeRsp, self).__init__()
self.size = None
class ListResponse(AgentResponse):
def __init__(self):
super(ListResponse, self).__init__()
self.paths = []
class CheckInitializedFileRsp(AgentResponse):
def __init__(self):
super(CheckInitializedFileRsp, self).__init__()
self.existed = True
class GetDownloadBitsFromKvmHostProgressRsp(AgentResponse):
def __init__(self):
super(GetDownloadBitsFromKvmHostProgressRsp, self).__init__()
self.totalSize = None
class DownloadBitsFromKvmHostRsp(AgentResponse):
def __init__(self):
super(DownloadBitsFromKvmHostRsp, self).__init__()
self.format = None
class LocalStoragePlugin(kvmagent.KvmAgent):
INIT_PATH = "/localstorage/init"
GET_PHYSICAL_CAPACITY_PATH = "/localstorage/getphysicalcapacity"
CREATE_EMPTY_VOLUME_PATH = "/localstorage/volume/createempty"
CREATE_FOLDER_PATH = "/localstorage/volume/createfolder"
CREATE_VOLUME_FROM_CACHE_PATH = "/localstorage/volume/createvolumefromcache"
DELETE_BITS_PATH = "/localstorage/delete"
DELETE_DIR_PATH = "/localstorage/deletedir"
UPLOAD_BIT_PATH = "/localstorage/sftp/upload"
DOWNLOAD_BIT_PATH = "/localstorage/sftp/download"
UPLOAD_TO_IMAGESTORE_PATH = "/localstorage/imagestore/upload"
COMMIT_TO_IMAGESTORE_PATH = "/localstorage/imagestore/commit"
DOWNLOAD_FROM_IMAGESTORE_PATH = "/localstorage/imagestore/download"
REVERT_SNAPSHOT_PATH = "/localstorage/snapshot/revert"
MERGE_SNAPSHOT_PATH = "/localstorage/snapshot/merge"
MERGE_AND_REBASE_SNAPSHOT_PATH = "/localstorage/snapshot/mergeandrebase"
OFFLINE_MERGE_PATH = "/localstorage/snapshot/offlinemerge"
CREATE_TEMPLATE_FROM_VOLUME = "/localstorage/volume/createtemplate"
CHECK_BITS_PATH = "/localstorage/checkbits"
REBASE_ROOT_VOLUME_TO_BACKING_FILE_PATH = "/localstorage/volume/rebaserootvolumetobackingfile"
VERIFY_SNAPSHOT_CHAIN_PATH = "/localstorage/snapshot/verifychain"
REBASE_SNAPSHOT_BACKING_FILES_PATH = "/localstorage/snapshot/rebasebackingfiles"
COPY_TO_REMOTE_BITS_PATH = "/localstorage/copytoremote"
GET_MD5_PATH = "/localstorage/getmd5"
CHECK_MD5_PATH = "/localstorage/checkmd5"
GET_BACKING_FILE_PATH = "/localstorage/volume/getbackingfile"
GET_VOLUME_SIZE = "/localstorage/volume/getsize"
GET_BASE_IMAGE_PATH = "/localstorage/volume/getbaseimagepath"
GET_QCOW2_REFERENCE = "/localstorage/getqcow2reference"
CONVERT_QCOW2_TO_RAW = "/localstorage/imagestore/convert/raw"
RESIZE_VOLUME_PATH = "/localstorage/volume/resize"
REINIT_IMAGE_PATH = "/localstorage/reinit/image"
CHECK_INITIALIZED_FILE = "/localstorage/check/initializedfile"
CREATE_INITIALIZED_FILE = "/localstorage/create/initializedfile"
DOWNLOAD_BITS_FROM_KVM_HOST_PATH = "/localstorage/kvmhost/download"
CANCEL_DOWNLOAD_BITS_FROM_KVM_HOST_PATH = "/localstorage/kvmhost/download/cancel"
GET_DOWNLOAD_BITS_FROM_KVM_HOST_PROGRESS_PATH = "/localstorage/kvmhost/download/progress"
LOCAL_NOT_ROOT_USER_MIGRATE_TMP_PATH = "primary_storage_tmp_dir"
def start(self):
http_server = kvmagent.get_http_server()
http_server.register_async_uri(self.INIT_PATH, self.init)
http_server.register_async_uri(self.GET_PHYSICAL_CAPACITY_PATH, self.get_physical_capacity)
http_server.register_async_uri(self.CREATE_EMPTY_VOLUME_PATH, self.create_empty_volume)
http_server.register_async_uri(self.CREATE_FOLDER_PATH, self.create_folder)
http_server.register_async_uri(self.CREATE_VOLUME_FROM_CACHE_PATH, self.create_root_volume_from_template)
http_server.register_async_uri(self.DELETE_BITS_PATH, self.delete)
http_server.register_async_uri(self.DELETE_DIR_PATH, self.deletedir)
http_server.register_async_uri(self.DOWNLOAD_BIT_PATH, self.download_from_sftp)
http_server.register_async_uri(self.UPLOAD_BIT_PATH, self.upload_to_sftp)
http_server.register_async_uri(self.UPLOAD_TO_IMAGESTORE_PATH, self.upload_to_imagestore)
http_server.register_async_uri(self.COMMIT_TO_IMAGESTORE_PATH, self.commit_to_imagestore)
http_server.register_async_uri(self.DOWNLOAD_FROM_IMAGESTORE_PATH, self.download_from_imagestore)
http_server.register_async_uri(self.REVERT_SNAPSHOT_PATH, self.revert_snapshot)
http_server.register_async_uri(self.REINIT_IMAGE_PATH, self.reinit_image)
http_server.register_async_uri(self.MERGE_SNAPSHOT_PATH, self.merge_snapshot)
http_server.register_async_uri(self.MERGE_AND_REBASE_SNAPSHOT_PATH, self.merge_and_rebase_snapshot)
http_server.register_async_uri(self.OFFLINE_MERGE_PATH, self.offline_merge_snapshot)
http_server.register_async_uri(self.CREATE_TEMPLATE_FROM_VOLUME, self.create_template_from_volume)
http_server.register_async_uri(self.CHECK_BITS_PATH, self.check_bits)
http_server.register_async_uri(self.REBASE_ROOT_VOLUME_TO_BACKING_FILE_PATH, self.rebase_root_volume_to_backing_file)
http_server.register_async_uri(self.VERIFY_SNAPSHOT_CHAIN_PATH, self.verify_backing_file_chain)
http_server.register_async_uri(self.REBASE_SNAPSHOT_BACKING_FILES_PATH, self.rebase_backing_files)
http_server.register_async_uri(self.COPY_TO_REMOTE_BITS_PATH, self.copy_bits_to_remote, cmd=CopyBitsFromRemoteCmd())
http_server.register_async_uri(self.GET_MD5_PATH, self.get_md5)
http_server.register_async_uri(self.CHECK_MD5_PATH, self.check_md5)
http_server.register_async_uri(self.GET_BACKING_FILE_PATH, self.get_backing_file_path)
http_server.register_async_uri(self.GET_VOLUME_SIZE, self.get_volume_size)
http_server.register_async_uri(self.GET_BASE_IMAGE_PATH, self.get_volume_base_image_path)
http_server.register_async_uri(self.GET_QCOW2_REFERENCE, self.get_qcow2_reference)
http_server.register_async_uri(self.CONVERT_QCOW2_TO_RAW, self.convert_qcow2_to_raw)
http_server.register_async_uri(self.RESIZE_VOLUME_PATH, self.resize_volume)
http_server.register_async_uri(self.CHECK_INITIALIZED_FILE, self.check_initialized_file)
http_server.register_async_uri(self.CREATE_INITIALIZED_FILE, self.create_initialized_file)
http_server.register_async_uri(self.DOWNLOAD_BITS_FROM_KVM_HOST_PATH, self.download_from_kvmhost)
http_server.register_async_uri(self.CANCEL_DOWNLOAD_BITS_FROM_KVM_HOST_PATH, self.cancel_download_from_kvmhost)
http_server.register_async_uri(self.GET_DOWNLOAD_BITS_FROM_KVM_HOST_PROGRESS_PATH, self.get_download_bits_from_kvmhost_progress)
self.imagestore_client = ImageStoreClient()
def stop(self):
pass
@kvmagent.replyerror
def cancel_download_from_kvmhost(self, req):
return self.cancel_download_from_sftp(req)
@kvmagent.replyerror
def get_download_bits_from_kvmhost_progress(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = GetDownloadBitsFromKvmHostProgressRsp()
rsp.totalSize = linux.get_total_file_size(cmd.volumePaths)
return jsonobject.dumps(rsp)
def cancel_download_from_sftp(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = AgentResponse()
shell.run("pkill -9 -f '%s'" % cmd.primaryStorageInstallPath)
self.do_delete_bits(cmd.primaryStorageInstallPath)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
@completetask
def download_from_kvmhost(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = DownloadBitsFromKvmHostRsp()
install_path = cmd.primaryStorageInstallPath
# todo: assume agent will not restart, maybe need clean
last_task = self.load_and_save_task(req, rsp, os.path.exists, install_path)
if last_task and last_task.agent_pid == os.getpid():
rsp = self.wait_task_complete(last_task)
return jsonobject.dumps(rsp)
self.do_download_from_sftp(cmd)
rsp.format = linux.get_img_fmt(install_path)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def check_initialized_file(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
file_path = cmd.filePath
rsp = CheckInitializedFileRsp()
if file_path is None:
rsp.success = False
rsp.error = "input file path is None"
else:
rsp.existed = os.path.exists(file_path)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def create_initialized_file(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
file_path = cmd.filePath
rsp = AgentResponse()
if file_path is None:
rsp.success = False
rsp.error = "input file path is None"
else:
if not os.path.exists(file_path):
f = open(file_path, 'w')
f.close()
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def resize_volume(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
install_path = cmd.installPath
rsp = ResizeVolumeRsp()
shell.call("qemu-img resize %s %s" % (install_path, cmd.size))
ret = linux.qcow2_virtualsize(install_path)
rsp.size = ret
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def convert_qcow2_to_raw(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
return self.imagestore_client.convert_image_raw(cmd)
@kvmagent.replyerror
def get_qcow2_reference(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
out = shell.call('find %s/ -type f' % cmd.searchingDir)
rsp = GetQCOW2ReferenceRsp()
rsp.referencePaths = []
real_path = os.path.realpath(cmd.path)
for f in out.splitlines():
backing_file = linux.qcow2_get_backing_file(f)
if os.path.realpath(backing_file) == real_path:
rsp.referencePaths.append(f)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def get_volume_size(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = GetVolumeSizeRsp()
rsp.size, rsp.actualSize = linux.qcow2_size_and_actual_size(cmd.installPath)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def get_volume_base_image_path(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = GetVolumeBaseImagePathRsp()
if not os.path.basename(cmd.volumeInstallDir).endswith(cmd.volumeUuid):
raise Exception('maybe you pass a wrong install dir')
path = linux.get_qcow2_base_image_recusively(cmd.volumeInstallDir, cmd.imageCacheDir)
if not path:
return jsonobject.dumps(rsp)
rsp.path = path
rsp.size = linux.get_qcow2_file_chain_size(path)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def get_backing_file_path(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
out = linux.qcow2_get_backing_file(cmd.path)
rsp = GetBackingFileRsp()
if out:
rsp.backingFilePath = out
rsp.size = os.path.getsize(out)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def get_md5(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = GetMd5Rsp()
rsp.md5s = []
if cmd.sendCommandUrl:
Report.url = cmd.sendCommandUrl
report = Report(cmd.threadContext, cmd.threadContextStack)
report.processType = "LocalStorageMigrateVolume"
PFILE = shell.call('mktemp /tmp/tmp-XXXXXX').strip()
total = 0
written = 0
for to in cmd.md5s:
total = total + os.path.getsize(to.path)
start = 0
end = 10
if cmd.stage:
start, end = get_scale(cmd.stage)
def _get_progress(synced):
logger.debug("getProgress in get_md5")
if not os.path.exists(PFILE):
return synced
last = linux.tail_1(PFILE).strip()
if not last or not last.isdigit():
return synced
percent = int(round((float(written) * 100 + os.path.getsize(to.path) * float(last)) / total * (end - start) / 100) + start)
report.progress_report(str(percent), "report")
return synced
report.resourceUuid = cmd.volumeUuid
if start == 0:
report.progress_report("0", "start")
else:
report.progress_report(str(start), "report")
for to in cmd.md5s:
_, md5, _ = bash_progress_1("pv -n %s 2>%s | md5sum | cut -d ' ' -f 1" % (to.path, PFILE), _get_progress)
rsp.md5s.append({
'resourceUuid': to.resourceUuid,
'path': to.path,
'md5': md5
})
written += os.path.getsize(to.path)
percent = int(round(float(written) / float(total) * (end - start) + start))
report.progress_report(percent, "report")
if os.path.exists(PFILE):
os.remove(PFILE)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def check_md5(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
if cmd.sendCommandUrl:
Report.url = cmd.sendCommandUrl
report = Report(cmd.threadContext, cmd.threadContextStack)
report.processType = "LocalStorageMigrateVolume"
PFILE = shell.call('mktemp /tmp/tmp-XXXXXX').strip()
total = 0
written = 0
start = 90
end = 100
if cmd.stage:
start, end = get_scale(cmd.stage)
for to in cmd.md5s:
total = total + os.path.getsize(to.path)
def _get_progress(synced):
logger.debug("getProgress in check_md5")
if not os.path.exists(PFILE):
return synced
last = linux.tail_1(PFILE).strip()
if not last or not last.isdigit():
return synced
percent = int(round((float(written) * 100 + os.path.getsize(to.path) * float(last)) / total * (end - start) / 100) + start)
report.progress_report(percent, "report")
return synced
report.resourceUuid = cmd.volumeUuid
for to in cmd.md5s:
_, dst_md5, _ = bash_progress_1("pv -n %s 2>%s | md5sum | cut -d ' ' -f 1" % (to.path, PFILE), _get_progress)
if dst_md5 != to.md5:
raise Exception("MD5 unmatch. The file[uuid:%s, path:%s]'s md5 (src host:%s, dst host:%s)" %
(to.resourceUuid, to.path, to.md5, dst_md5))
written += os.path.getsize(to.path)
percent = int(round(float(written) / float(total) * (end - start) + start))
report.progress_report(percent, "report")
if os.path.exists(PFILE):
os.remove(PFILE)
rsp = AgentResponse()
if end == 100:
report.progress_report("100", "finish")
else:
report.progress_report(str(end), "report")
return jsonobject.dumps(rsp)
@staticmethod
def _get_disk_capacity(path):
if not path:
raise Exception('storage path cannot be None')
return linux.get_disk_capacity_by_df(path)
@kvmagent.replyerror
@in_bash
def copy_bits_to_remote(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
if cmd.dstUsername != 'root':
raise Exception("cannot support migrate to non-root user host")
chain = sum([linux.qcow2_get_file_chain(p) for p in cmd.paths], [])
if cmd.sendCommandUrl:
Report.url = cmd.sendCommandUrl
report = Report(cmd.threadContext, cmd.threadContextStack)
report.processType = "LocalStorageMigrateVolume"
report.resourceUuid = cmd.volumeUuid
PFILE = shell.call('mktemp /tmp/tmp-XXXXXX').strip()
PASSWORD_FILE = linux.write_to_temp_file(cmd.dstPassword)
start = 10
end = 90
if cmd.stage:
start, end = get_scale(cmd.stage)
total = 0
for path in set(chain):
total = total + os.path.getsize(path)
written = 0
def _get_progress(synced):
logger.debug("getProgress in localstorage-agent, synced: %s, total: %s" % (synced, total))
if not os.path.exists(PFILE):
return synced
fpread = open(PFILE, 'r')
lines = fpread.readlines()
if not lines:
fpread.close()
return synced
last = str(lines[-1]).strip().split('\r')[-1]
if not last or len(last.split()) < 1:
fpread.close()
return synced
line = last.split()[0]
if not line.isdigit():
return synced
if total > 0:
synced = long(line)
if synced < total:
percent = int(round(float(written + synced) / float(total) * (end - start) + start))
report.progress_report(percent, "report")
synced = written
fpread.close()
return synced
for path in set(chain):
PATH = path
USER = cmd.dstUsername
IP = cmd.dstIp
PORT = (cmd.dstPort and cmd.dstPort or "22")
DIR = os.path.dirname(path)
_, _, err = bash_progress_1(
# Fixes ZSTAC-13430: handle extremely complex password like ~ ` !@#$%^&*()_+-=[]{}|?<>;:'"/ .
'rsync -av --progress --relative {{PATH}} --rsh="/usr/bin/sshpass -f{{PASSWORD_FILE}} ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -p {{PORT}} -l {{USER}}" {{IP}}:/ 1>{{PFILE}}', _get_progress, False)
if err:
linux.rm_file_force(PASSWORD_FILE)
linux.rm_file_force(PFILE)
raise Exception('fail to migrate vm to host, because %s' % str(err))
written += os.path.getsize(path)
bash_errorout('/usr/bin/sshpass -f{{PASSWORD_FILE}} ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -p {{PORT}} {{USER}}@{{IP}} "/bin/sync {{PATH}}"')
percent = int(round(float(written) / float(total) * (end - start) + start))
report.progress_report(percent, "report")
linux.rm_file_force(PASSWORD_FILE)
linux.rm_file_force(PFILE)
rsp = AgentResponse()
rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity(cmd.storagePath)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def verify_backing_file_chain(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
for sp in cmd.snapshots:
if not os.path.exists(sp.path):
raise Exception('cannot find the file[%s]' % sp.path)
if sp.parentPath and not os.path.exists(sp.parentPath):
raise Exception('cannot find the backing file[%s]' % sp.parentPath)
if sp.parentPath:
out = linux.qcow2_get_backing_file(sp.path)
if sp.parentPath != out:
raise Exception("resource[Snapshot or Volume, uuid:%s, path:%s]'s backing file[%s] is not equal to %s" %
(sp.snapshotUuid, sp.path, out, sp.parentPath))
return jsonobject.dumps(AgentResponse())
@kvmagent.replyerror
def rebase_backing_files(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
for sp in cmd.snapshots:
if sp.parentPath:
linux.qcow2_rebase_no_check(sp.parentPath, sp.path)
return jsonobject.dumps(AgentResponse())
@kvmagent.replyerror
def check_bits(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = CheckBitsRsp()
rsp.existing = os.path.exists(cmd.path)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
@rollback.rollback
def create_template_from_volume(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = AgentResponse()
dirname = os.path.dirname(cmd.installPath)
if not os.path.exists(dirname):
os.makedirs(dirname, 0755)
@rollback.rollbackable
def _0():
linux.rm_file_force(cmd.insallPath)
_0()
t_shell = traceable_shell.get_shell(cmd)
linux.create_template(cmd.volumePath, cmd.installPath, shell=t_shell)
logger.debug('successfully created template[%s] from volume[%s]' % (cmd.installPath, cmd.volumePath))
rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity(cmd.storagePath)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def revert_snapshot(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = RevertVolumeFromSnapshotRsp()
install_path = cmd.snapshotInstallPath
new_volume_path = os.path.join(os.path.dirname(install_path), '{0}.qcow2'.format(uuidhelper.uuid()))
linux.qcow2_clone_with_cmd(install_path, new_volume_path, cmd)
size = linux.qcow2_virtualsize(new_volume_path)
rsp.newVolumeInstallPath = new_volume_path
rsp.size = size
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def reinit_image(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = ReinitImageRsp()
install_path = cmd.imagePath
dirname = os.path.dirname(cmd.volumePath)
if not os.path.exists(dirname):
os.makedirs(dirname, 0775)
new_volume_path = os.path.join(dirname, '{0}.qcow2'.format(uuidhelper.uuid()))
linux.qcow2_clone_with_cmd(install_path, new_volume_path, cmd)
rsp.newVolumeInstallPath = new_volume_path
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def merge_snapshot(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = MergeSnapshotRsp()
workspace_dir = os.path.dirname(cmd.workspaceInstallPath)
if not os.path.exists(workspace_dir):
os.makedirs(workspace_dir)
linux.create_template(cmd.snapshotInstallPath, cmd.workspaceInstallPath)
rsp.size, rsp.actualSize = linux.qcow2_size_and_actual_size(cmd.workspaceInstallPath)
rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity(cmd.storagePath)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def merge_and_rebase_snapshot(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
snapshots = cmd.snapshotInstallPaths
count = len(snapshots)
for i in range(count):
if i+1 < count:
target = snapshots[i]
backing_file = snapshots[i+1]
linux.qcow2_rebase_no_check(backing_file, target)
latest = snapshots[0]
rsp = RebaseAndMergeSnapshotsRsp()
workspace_dir = os.path.dirname(cmd.workspaceInstallPath)
if not os.path.exists(workspace_dir):
os.makedirs(workspace_dir)
linux.create_template(latest, cmd.workspaceInstallPath)
rsp.size, rsp.actualSize = linux.qcow2_size_and_actual_size(cmd.workspaceInstallPath)
rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity(cmd.storagePath)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def offline_merge_snapshot(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = AgentResponse()
if not cmd.fullRebase:
linux.qcow2_rebase(cmd.srcPath, cmd.destPath)
else:
tmp = os.path.join(os.path.dirname(cmd.destPath), '%s.qcow2' % uuidhelper.uuid())
linux.create_template(cmd.destPath, tmp)
shell.call("mv %s %s" % (tmp, cmd.destPath))
rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity(cmd.storagePath)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def get_physical_capacity(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = AgentResponse()
rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity(cmd.storagePath)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def rebase_root_volume_to_backing_file(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
linux.qcow2_rebase_no_check(cmd.backingFilePath, cmd.rootVolumePath)
return jsonobject.dumps(AgentResponse())
@kvmagent.replyerror
def init(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
if not os.path.exists(cmd.path):
os.makedirs(cmd.path, 0755)
if cmd.initFilePath:
if not os.path.exists(cmd.initFilePath):
f = open(cmd.initFilePath, 'w')
f.close()
rsp = InitRsp()
rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity(cmd.path)
rsp.localStorageUsedCapacity = linux.get_used_disk_apparent_size(cmd.path, 0, 1)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def create_folder(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = AgentResponse()
try:
dirname = os.path.dirname(cmd.installUrl)
if not os.path.exists(dirname):
os.makedirs(dirname)
except Exception as e:
logger.warn(linux.get_exception_stacktrace())
rsp.error = 'unable to create folder at %s, because %s' % (cmd.installUrl, str(e))
rsp.success = False
return jsonobject.dumps(rsp)
logger.debug('successfully create folder at %s' % cmd.installUrl)
rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity(cmd.storagePath)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def create_empty_volume(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = AgentResponse()
try:
self.do_create_empty_volume(cmd)
except Exception as e:
logger.warn(linux.get_exception_stacktrace())
rsp.error = 'unable to create empty volume[uuid:%s, name:%s], %s' % (cmd.volumeUuid, cmd.name, str(e))
rsp.success = False
return jsonobject.dumps(rsp)
logger.debug('successfully create empty volume[uuid:%s, size:%s] at %s' % (cmd.volumeUuid, cmd.size, cmd.installUrl))
rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity(cmd.storagePath)
return jsonobject.dumps(rsp)
def do_create_empty_volume(self, cmd):
dirname = os.path.dirname(cmd.installUrl)
if not os.path.exists(dirname):
os.makedirs(dirname)
if cmd.backingFile:
linux.qcow2_create_with_backing_file_and_cmd(cmd.backingFile, cmd.installUrl, cmd)
else:
linux.qcow2_create_with_cmd(cmd.installUrl, cmd.size, cmd)
@kvmagent.replyerror
def create_root_volume_from_template(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = AgentResponse()
if not os.path.exists(cmd.templatePathInCache):
rsp.error = "unable to find image in cache"
rsp.success = False
logger.debug('error: %s: %s' % (rsp.error, cmd.templatePathInCache))
return jsonobject.dumps(rsp)
dirname = os.path.dirname(cmd.installUrl)
if not os.path.exists(dirname):
os.makedirs(dirname, 0775)
linux.qcow2_clone_with_cmd(cmd.templatePathInCache, cmd.installUrl, cmd)
rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity(cmd.storagePath)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def delete(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = AgentResponse()
if cmd.path:
kvmagent.deleteImage(cmd.path)
rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity(cmd.storagePath)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def deletedir(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = AgentResponse()
linux.rm_dir_checked(cmd.path)
logger.debug('successfully delete %s' % cmd.path)
rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity(cmd.storagePath)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def upload_to_sftp(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = AgentResponse()
def upload():
if not os.path.exists(cmd.primaryStorageInstallPath):
raise kvmagent.KvmError('cannot find %s' % cmd.primaryStorageInstallPath)
linux.scp_upload(cmd.hostname, cmd.sshKey, cmd.primaryStorageInstallPath, cmd.backupStorageInstallPath, cmd.username, cmd.sshPort)
try:
upload()
except kvmagent.KvmError as e:
logger.warn(linux.get_exception_stacktrace())
rsp.error = str(e)
rsp.success = False
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def upload_to_imagestore(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
return self.imagestore_client.upload_to_imagestore(cmd, req)
@kvmagent.replyerror
def commit_to_imagestore(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
return self.imagestore_client.commit_to_imagestore(cmd, req)
@kvmagent.replyerror
def download_from_sftp(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = AgentResponse()
try:
self.do_download_from_sftp(cmd)
except Exception as e:
content = traceback.format_exc()
logger.warn(content)
err = "unable to download %s/%s, because %s" % (cmd.hostname, cmd.backupStorageInstallPath, str(e))
rsp.error = err
rsp.success = False
rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity(cmd.storagePath)
return jsonobject.dumps(rsp)
def do_download_from_sftp(self, cmd):
linux.scp_download(cmd.hostname, cmd.sshKey, cmd.backupStorageInstallPath, cmd.primaryStorageInstallPath, cmd.username, cmd.sshPort, cmd.bandWidth)
logger.debug('successfully download %s/%s to %s' % (cmd.hostname, cmd.backupStorageInstallPath, cmd.primaryStorageInstallPath))
@kvmagent.replyerror
def download_from_imagestore(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
cachedir = None if cmd.isData else cmd.storagePath
self.imagestore_client.download_from_imagestore(cachedir, cmd.hostname, cmd.backupStorageInstallPath, cmd.primaryStorageInstallPath)
if cmd.isData:
self.imagestore_client.clean_meta(cmd.primaryStorageInstallPath)
rsp = AgentResponse()
rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity(cmd.storagePath)
return jsonobject.dumps(rsp)
|
|
import floto
import floto.decisions
import floto.specs
import copy
import gzip
import logging
logger = logging.getLogger(__name__)
import json
class DecisionBuilder:
def __init__(self, *, activity_tasks, default_activity_task_list):
self.workflow_fail = False
self.workflow_complete = False
self.activity_tasks = activity_tasks
self.tasks_by_id = {task.id_:task for task in activity_tasks}
self.history = None
self.default_activity_task_list = default_activity_task_list
self.decision_input = floto.decider.DecisionInput()
self.execution_graph = None
self.first_event_id = None
self.last_event_id = None
self._decompress_generator_result = False
def get_decisions(self, history):
logger.debug('get_decisions...')
self._set_history(history)
self.first_event_id = self.history.previous_decision_id
self.last_event_id = self.history.decision_task_started_event_id
self._build_execution_graph()
self.workflow_fail = False
self.workflow_complete = False
decisions = self._collect_decisions()
return decisions
def _set_history(self, history):
self.history = history
self.decision_input.history = history
def is_terminate_workflow(self):
return self.workflow_fail or self.workflow_complete
def _collect_decisions(self):
logger.debug('DecisionBuilder._collect_decisions({},{})'.format(self.first_event_id,
self.last_event_id))
if self.first_event_id == 0:
return self.get_decisions_after_workflow_start()
decisions = []
events = self.history.get_events_for_decision(self.first_event_id, self.last_event_id)
if events['faulty']:
decisions.extend(self.get_decisions_faulty_tasks(events['faulty']))
if not self.is_terminate_workflow() and events['completed'] and \
self.all_workflow_tasks_finished(events['completed']):
decisions = self.get_decisions_after_successful_workflow_execution()
if not self.is_terminate_workflow() and events['completed']:
decisions.extend(self.get_decisions_after_activity_completion(events['completed']))
if not self.is_terminate_workflow() and events['decision_failed']:
decisions.extend(self.get_decisions_decision_failed(events['decision_failed']))
return decisions
def get_decisions_after_workflow_start(self):
logger.debug('DecisionBuilder.get_decisions_after_workflow_start()')
decisions = []
task_ids = self.execution_graph.get_nodes_zero_in_degree()
for task_id in task_ids:
task = self.tasks_by_id[task_id]
decision = self.get_decision_schedule_activity(task=task)
decisions.append(decision)
return decisions
def get_decisions_faulty_tasks(self, task_events):
"""Analyze the faulty tasks and their retry strategies. If a task is to be resubmitted,
add a decision to the output.
Parameters
----------
task_events: list
List of ActivityTask Failed/TimedOut events
Returns
-------
list
List of ScheduleActivityTask decision if the tasks are being resubmitted.
If not, a TerminateWorkflow decision is returned and the self.terminate_workflow flag
is set.
"""
decisions = []
for e in task_events:
if self.is_terminate_workflow():
break
id_ = self.history.get_id_task_event(e)
t = self.tasks_by_id[id_]
if t.retry_strategy:
failures = self.history.get_number_activity_failures(t)
if t.retry_strategy.is_task_resubmitted(failures):
decision = self.get_decision_schedule_activity(task=t)
decisions.append(decision)
else:
reason = 'task_retry_limit_reached'
details = self.decision_input.get_details_failed_tasks(task_events)
decisions = self.get_decisions_after_failed_workflow_execution(reason=reason,
details=details)
else:
reason = 'task_failed'
details = self.decision_input.get_details_failed_tasks(task_events)
decisions = self.get_decisions_after_failed_workflow_execution(reason=reason,
details=details)
return decisions
def get_decisions_after_activity_completion(self, events):
"""Return the decisions based on the completed activities since the last decision task.
Parameters
----------
events: list
List of ActivityTaskCompleted or TimerFired events
"""
logger.debug('DecisionBuilder.get_decisions_after_activity_completion...')
task_ids = [self.history.get_id_task_event(e) for e in events]
tasks = self.get_tasks_to_be_scheduled(task_ids)
decisions = []
for t in tasks:
decisions.append(self.get_decision_schedule_activity(task=t))
return decisions
def get_decisions_decision_failed(self, events_decision_failed):
decisions = []
for event in events_decision_failed:
last_event_id = self.history.get_event_attributes(event)['startedEventId']
first_event_id = self.history.get_id_previous_started(event)
builder = floto.decider.DecisionBuilder(activity_tasks=self.activity_tasks,
default_activity_task_list = self.default_activity_task_list)
builder.first_event_id = first_event_id
builder.last_event_id = last_event_id
builder._set_history(self.history)
decisions.extend(builder._collect_decisions())
return decisions
def get_decisions_after_successful_workflow_execution(self):
tasks = [self.tasks_by_id[i] for i in self.execution_graph.get_outgoing_nodes()]
result = self.decision_input.collect_results(tasks)
d = floto.decisions.CompleteWorkflowExecution(result=result)
self.workflow_complete = True
return [d]
def get_decision_schedule_activity(self, *, task):
requires = [self.tasks_by_id[i] for i in self.execution_graph.get_requires(task.id_)]
if isinstance(task, floto.specs.task.ActivityTask):
input_ = self.decision_input.get_input(task, 'activity_task', requires)
return self.get_decision_schedule_activity_task(activity_task=task, input=input_)
elif isinstance(task, floto.specs.task.ChildWorkflow):
input_ = self.decision_input.get_input(task, 'child_workflow_task', requires)
return self.get_decision_start_child_workflow_execution(task, input_)
elif isinstance(task, floto.specs.task.Timer):
return self.get_decision_start_timer(timer_task=task)
else:
m = 'Do not know how to get decision for task of type: {}'.format(type(task))
raise ValueError(m)
def get_decisions_after_failed_workflow_execution(self, *, reason, details):
d = floto.decisions.FailWorkflowExecution(details=details, reason=reason)
self.workflow_fail = True
return [d]
def get_decision_schedule_activity_task(self, *, activity_task, input=None):
activity_type = floto.api.ActivityType(domain=activity_task.domain,
name=activity_task.name,
version=activity_task.version)
task_list = activity_task.task_list or self.default_activity_task_list
decision = floto.decisions.ScheduleActivityTask(activity_type=activity_type,
activity_id=activity_task.id_,
task_list=task_list, input=input)
return decision
def get_decision_start_timer(self, *, timer_task):
return floto.decisions.StartTimer(timer_id=timer_task.id_,
start_to_fire_timeout=timer_task.delay_in_seconds)
def get_decision_start_child_workflow_execution(self, child_workflow_task=None, input_=None):
logger.debug('DecisionBuilder.get_decision_start_child_workflow_execution...')
workflow_type = floto.api.WorkflowType(domain='d', name=child_workflow_task.workflow_type_name,
version=child_workflow_task.workflow_type_version)
args = {'workflow_id':child_workflow_task.id_,
'workflow_type':workflow_type}
if child_workflow_task.task_list:
args['task_list'] = child_workflow_task.task_list
if input_:
args['input'] = input_
return floto.decisions.StartChildWorkflowExecution(**args)
def all_workflow_tasks_finished(self, completed_tasks):
"""Return True if all tasks of this workflow have finished, False otherwise."""
logger.debug('DecisionBuilder.all_workflow_tasks_finished({})'.format(completed_tasks))
if self.completed_contain_generator(completed_tasks):
return False
if self.completed_have_depending_tasks(completed_tasks):
return False
if not self.outgoing_nodes_completed():
return False
return True
def completed_contain_generator(self, completed_tasks):
for task in completed_tasks:
id_ = self.history.get_id_task_event(task)
if isinstance(self.tasks_by_id[id_], floto.specs.task.Generator):
return True
def completed_have_depending_tasks(self, completed_tasks):
"""Return True if any of the tasks in "completed_tasks" has a task which depends on it.
False otherwise."""
logger.debug('DecisionBuilder.completed_have_depending_tasks({})'.format(completed_tasks))
for t in completed_tasks:
id_ = self.history.get_id_task_event(t)
depending_tasks = self.execution_graph.get_depending(id_)
if depending_tasks:
return True
return False
def outgoing_nodes_completed(self):
"""Check if all activity tasks which are outgoing vertices of the execution graph are
completed."""
outgoing_nodes = [self.tasks_by_id[i] for i in self.execution_graph.get_outgoing_nodes()]
for t in outgoing_nodes:
if not self.history.is_task_completed(t):
return False
return True
def get_tasks_to_be_scheduled(self, completed_task_ids):
"""Based on a list of ids of completed tasks, retrieve the tasks to be executed next.
Parameter
---------
list: str
List of ids of completed tasks
Returns
-------
list: floto.specs.Task
The tasks to be scheduled in the next decision
"""
logger.debug('DecisionBuilder.get_tasks_to_be_scheduled({})'.format(completed_task_ids))
tasks = set()
for completed_task_id in completed_task_ids:
for d in self.execution_graph.get_depending(completed_task_id):
requires = [self.tasks_by_id[id_] for id_ in self.execution_graph.get_requires(d)]
if all([self.history.is_task_completed(t) for t in requires]):
tasks.add(d)
return [self.tasks_by_id[i] for i in tasks]
def _update_execution_graph_with_completed_events(self, completed_events):
"""Updates the execution graph if the completed activities contain generators."""
for e in completed_events:
activity_id = self.history.get_id_task_event(e)
g = self._get_generator(e)
if g:
self._update_execution_graph(g)
def _update_execution_graph(self, generator):
"""Updates the execution graph."""
result_generator = self.history.get_result_completed_activity(generator)
if self._decompress_generator_result:
new_tasks_serializable = self._decompress_result(result_generator)
else:
new_tasks_serializable = result_generator
new_tasks = []
for serializable in new_tasks_serializable:
task = floto.specs.serializer.get_class(serializable['type'])
new_tasks.append(task.deserialized(**serializable))
self.tasks_by_id.update({task.id_:task for task in new_tasks})
self._add_tasks_to_execution_graph(new_tasks)
# TODO test
for id_ in self.execution_graph.get_depending(generator.id_) :
self.execution_graph.add_dependencies(id_, [t.id_ for t in new_tasks])
for t in new_tasks:
self.execution_graph.add_dependencies(t.id_, [generator.id_])
def _decompress_result(self, compressed_result):
result_bytes = bytes([int(c, 16) for c in compressed_result.split('x')])
result = gzip.decompress(result_bytes).decode()
result = json.loads(result)
return result
def _get_generator(self, completed_event):
"""Takes a completed event as defined by floto.History.get_events_for_decision and returns
the corresponding floto.specs.task.Generator if generator is found with id_.
Returns
-------
generator: <floto.specs.task.Generator>
"""
g = None
if completed_event['eventType'] == 'ActivityTaskCompleted':
activity_id = self.history.get_id_task_event(completed_event)
task = self.tasks_by_id[activity_id]
if isinstance(task, floto.specs.task.Generator):
g = task
return g
def _add_tasks_to_execution_graph(self, tasks):
logger.debug('Add tasks to execution graph: {}'.format(tasks))
for task in tasks:
self.execution_graph.add_task(task.id_)
for task in tasks:
if task.requires:
self.execution_graph.add_dependencies(task.id_, task.requires)
def _build_execution_graph(self):
self.execution_graph = floto.decider.ExecutionGraph()
self._add_tasks_to_execution_graph(self.activity_tasks)
if any([isinstance(t, floto.specs.task.Generator) for t in self.activity_tasks]):
events = self.history.get_events_for_decision(1, self.last_event_id)
completed = events['completed']
self._update_execution_graph_with_completed_events(completed)
if not self.execution_graph.is_acyclic():
raise ValueError('Execution graph contains cycle')
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This module implements a discovery function for Denon AVR receivers.
:copyright: (c) 2016 by Oliver Goetz.
:license: MIT, see LICENSE for more details.
"""
import logging
import asyncio
import socket
import re
import xml.etree.ElementTree as ET
from typing import Dict, List, Optional, Tuple, Set
from urllib.parse import urlparse
import httpx
import netifaces
from defusedxml import DefusedXmlException
from defusedxml.ElementTree import fromstring, ParseError
_LOGGER = logging.getLogger(__name__)
SSDP_ADDR = "239.255.255.250"
SSDP_PORT = 1900
SSDP_MX = 2
SSDP_TARGET = (SSDP_ADDR, SSDP_PORT)
SSDP_ST_1 = "ssdp:all"
SSDP_ST_2 = "upnp:rootdevice"
SSDP_ST_3 = "urn:schemas-upnp-org:device:MediaRenderer:1"
SSDP_ST_LIST = (SSDP_ST_1, SSDP_ST_2, SSDP_ST_3)
SSDP_LOCATION_PATTERN = re.compile(r'(?<=LOCATION:\s).+?(?=\r)')
SCPD_XMLNS = "{urn:schemas-upnp-org:device-1-0}"
SCPD_DEVICE = "{xmlns}device".format(xmlns=SCPD_XMLNS)
SCPD_DEVICELIST = "{xmlns}deviceList".format(xmlns=SCPD_XMLNS)
SCPD_DEVICETYPE = "{xmlns}deviceType".format(xmlns=SCPD_XMLNS)
SCPD_MANUFACTURER = "{xmlns}manufacturer".format(xmlns=SCPD_XMLNS)
SCPD_MODELNAME = "{xmlns}modelName".format(xmlns=SCPD_XMLNS)
SCPD_SERIALNUMBER = "{xmlns}serialNumber".format(xmlns=SCPD_XMLNS)
SCPD_FRIENDLYNAME = "{xmlns}friendlyName".format(xmlns=SCPD_XMLNS)
SCPD_PRESENTATIONURL = "{xmlns}presentationURL".format(xmlns=SCPD_XMLNS)
SUPPORTED_DEVICETYPES = [
"urn:schemas-upnp-org:device:MediaRenderer:1",
"urn:schemas-upnp-org:device:MediaServer:1",
]
SUPPORTED_MANUFACTURERS = ["Denon", "DENON", "DENON PROFESSIONAL", "Marantz"]
def ssdp_request(ssdp_st: str, ssdp_mx: float = SSDP_MX) -> bytes:
"""Return request bytes for given st and mx."""
return "\r\n".join([
'M-SEARCH * HTTP/1.1',
'ST: {}'.format(ssdp_st),
'MX: {:d}'.format(ssdp_mx),
'MAN: "ssdp:discover"',
'HOST: {}:{}'.format(*SSDP_TARGET),
'', '']).encode('utf-8')
def get_local_ips() -> List[str]:
"""Get IPs of local network adapters."""
ips = []
# pylint: disable=c-extension-no-member
for interface in netifaces.interfaces():
addresses = netifaces.ifaddresses(interface)
for address in addresses.get(netifaces.AF_INET, []):
ips.append(address["addr"])
return ips
async def async_identify_denonavr_receivers() -> List[Dict]:
"""
Identify DenonAVR using SSDP and SCPD queries.
Returns a list of dictionaries which includes all discovered Denon AVR
devices with keys "host", "modelName", "friendlyName", "presentationURL".
"""
# Sending SSDP broadcast message to get resource urls from devices
urls = await async_send_ssdp_broadcast()
# Check which responding device is a DenonAVR device and prepare output
receivers = []
for url in urls:
try:
async with httpx.AsyncClient() as client:
res = await client.get(url, timeout=5.0)
res.raise_for_status()
except httpx.HTTPError:
continue
else:
receiver = evaluate_scpd_xml(url, res.text)
if receiver is not None:
receivers.append(receiver)
return receivers
async def async_send_ssdp_broadcast() -> Set[str]:
"""
Send SSDP broadcast messages to discover UPnP devices.
Returns a set of SCPD XML resource urls for all discovered devices.
"""
# Send up to three different broadcast messages
ips = get_local_ips()
# Prepare output of responding devices
urls = set()
tasks = []
for ip_addr in ips:
tasks.append(async_send_ssdp_broadcast_ip(ip_addr))
results = await asyncio.gather(*tasks)
for result in results:
_LOGGER.debug("SSDP broadcast result received: %s", result)
urls = urls.union(result)
_LOGGER.debug("Following devices found: %s", urls)
return urls
async def async_send_ssdp_broadcast_ip(ip_addr: str) -> Set[str]:
"""Send SSDP broadcast messages to a single IP."""
# Ignore 169.254.0.0/16 adresses
if re.search("169.254.*.*", ip_addr):
return set()
# Prepare socket
sock = socket.socket(
socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.bind((ip_addr, 0))
# Get asyncio loop
loop = asyncio.get_event_loop()
transport, protocol = await loop.create_datagram_endpoint(
DenonAVRSSDP, sock=sock)
# Wait for the timeout period
await asyncio.sleep(SSDP_MX)
# Close the connection
transport.close()
_LOGGER.debug(
"Got %s results after SSDP queries using ip %s",
len(protocol.urls), ip_addr)
return protocol.urls
def evaluate_scpd_xml(url: str, body: str) -> Optional[Dict]:
"""
Evaluate SCPD XML.
Returns dictionary with keys "host", "modelName", "friendlyName" and
"presentationURL" if a Denon AVR device was found and "None" if not.
"""
try:
root = fromstring(body)
# Look for manufacturer "Denon" in response.
# Using "try" in case tags are not available in XML
device = {}
device_xml = None
device["manufacturer"] = (
root.find(SCPD_DEVICE).find(SCPD_MANUFACTURER).text)
_LOGGER.debug(
"Device %s has manufacturer %s", url, device["manufacturer"])
if not device["manufacturer"] in SUPPORTED_MANUFACTURERS:
return None
if (root.find(SCPD_DEVICE).find(SCPD_DEVICETYPE).text
in SUPPORTED_DEVICETYPES):
device_xml = root.find(SCPD_DEVICE)
elif root.find(SCPD_DEVICE).find(SCPD_DEVICELIST) is not None:
for dev in root.find(SCPD_DEVICE).find(SCPD_DEVICELIST):
if (dev.find(SCPD_DEVICETYPE).text in SUPPORTED_DEVICETYPES
and dev.find(SCPD_SERIALNUMBER) is not None):
device_xml = dev
break
if device_xml is None:
return None
if device_xml.find(SCPD_PRESENTATIONURL) is not None:
device["host"] = urlparse(
device_xml.find(
SCPD_PRESENTATIONURL).text).hostname
device["presentationURL"] = (
device_xml.find(SCPD_PRESENTATIONURL).text)
else:
device["host"] = urlparse(url).hostname
device["modelName"] = (
device_xml.find(SCPD_MODELNAME).text)
device["serialNumber"] = (
device_xml.find(SCPD_SERIALNUMBER).text)
device["friendlyName"] = (
device_xml.find(SCPD_FRIENDLYNAME).text)
return device
except (AttributeError, ValueError, ET.ParseError, DefusedXmlException,
ParseError, UnicodeDecodeError) as err:
_LOGGER.error(
"Error occurred during evaluation of SCPD XML from URI %s: %s",
url, err)
return None
class DenonAVRSSDP(asyncio.DatagramProtocol):
"""Implements datagram protocol for SSDP discovery of Denon AVR devices."""
def __init__(self) -> None:
"""Create instance."""
self.urls = set()
def connection_made(
self, transport: asyncio.DatagramTransport) -> None:
"""Send SSDP request when connection was made."""
# Prepare SSDP and send broadcast message
for ssdp_st in SSDP_ST_LIST:
request = ssdp_request(ssdp_st)
transport.sendto(request, SSDP_TARGET)
_LOGGER.debug("SSDP request sent %s", request)
def datagram_received(self, data: bytes, addr: Tuple[str, int]) -> None:
"""Receive responses to SSDP call."""
# Some string operations to get the receivers URL
# which could be found between LOCATION and end of line of the response
_LOGGER.debug("Response to SSDP call received: %s", data)
data_text = data.decode("utf-8")
match = SSDP_LOCATION_PATTERN.search(data_text)
if match:
self.urls.add(match.group(0))
|
|
"""Test the bootstrapping."""
# pylint: disable=protected-access
import asyncio
import glob
import os
from unittest.mock import Mock, patch
import pytest
from homeassistant import bootstrap, core, runner
from homeassistant.bootstrap import SIGNAL_BOOTSTRAP_INTEGRATONS
import homeassistant.config as config_util
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.dispatcher import async_dispatcher_connect
import homeassistant.util.dt as dt_util
from tests.common import (
MockModule,
MockPlatform,
get_test_config_dir,
mock_coro,
mock_entity_platform,
mock_integration,
)
ORIG_TIMEZONE = dt_util.DEFAULT_TIME_ZONE
VERSION_PATH = os.path.join(get_test_config_dir(), config_util.VERSION_FILE)
@pytest.fixture(autouse=True)
def apply_mock_storage(hass_storage):
"""Apply the storage mock."""
@pytest.fixture(autouse=True)
async def apply_stop_hass(stop_hass):
"""Make sure all hass are stopped."""
@pytest.fixture(autouse=True)
def mock_http_start_stop():
"""Mock HTTP start and stop."""
with patch(
"homeassistant.components.http.start_http_server_and_save_config"
), patch("homeassistant.components.http.HomeAssistantHTTP.stop"):
yield
@patch("homeassistant.bootstrap.async_enable_logging", Mock())
async def test_home_assistant_core_config_validation(hass):
"""Test if we pass in wrong information for HA conf."""
# Extensive HA conf validation testing is done
result = await bootstrap.async_from_config_dict(
{"homeassistant": {"latitude": "some string"}}, hass
)
assert result is None
async def test_async_enable_logging(hass, caplog):
"""Test to ensure logging is migrated to the queue handlers."""
with patch("logging.getLogger"), patch(
"homeassistant.bootstrap.async_activate_log_queue_handler"
) as mock_async_activate_log_queue_handler, patch(
"homeassistant.bootstrap.logging.handlers.RotatingFileHandler.doRollover",
side_effect=OSError,
):
bootstrap.async_enable_logging(hass)
mock_async_activate_log_queue_handler.assert_called_once()
mock_async_activate_log_queue_handler.reset_mock()
bootstrap.async_enable_logging(
hass,
log_rotate_days=5,
log_file="test.log",
)
mock_async_activate_log_queue_handler.assert_called_once()
for f in glob.glob("test.log*"):
os.remove(f)
for f in glob.glob("testing_config/home-assistant.log*"):
os.remove(f)
assert "Error rolling over log file" in caplog.text
async def test_load_hassio(hass):
"""Test that we load Hass.io component."""
with patch.dict(os.environ, {}, clear=True):
assert bootstrap._get_domains(hass, {}) == set()
with patch.dict(os.environ, {"HASSIO": "1"}):
assert bootstrap._get_domains(hass, {}) == {"hassio"}
@pytest.mark.parametrize("load_registries", [False])
async def test_empty_setup(hass):
"""Test an empty set up loads the core."""
await bootstrap.async_from_config_dict({}, hass)
for domain in bootstrap.CORE_INTEGRATIONS:
assert domain in hass.config.components, domain
async def test_core_failure_loads_safe_mode(hass, caplog):
"""Test failing core setup aborts further setup."""
with patch(
"homeassistant.components.homeassistant.async_setup",
return_value=mock_coro(False),
):
await bootstrap.async_from_config_dict({"group": {}}, hass)
assert "core failed to initialize" in caplog.text
# We aborted early, group not set up
assert "group" not in hass.config.components
@pytest.mark.parametrize("load_registries", [False])
async def test_setting_up_config(hass):
"""Test we set up domains in config."""
await bootstrap._async_set_up_integrations(
hass, {"group hello": {}, "homeassistant": {}}
)
assert "group" in hass.config.components
@pytest.mark.parametrize("load_registries", [False])
async def test_setup_after_deps_all_present(hass):
"""Test after_dependencies when all present."""
order = []
def gen_domain_setup(domain):
async def async_setup(hass, config):
order.append(domain)
return True
return async_setup
mock_integration(
hass, MockModule(domain="root", async_setup=gen_domain_setup("root"))
)
mock_integration(
hass,
MockModule(
domain="first_dep",
async_setup=gen_domain_setup("first_dep"),
partial_manifest={"after_dependencies": ["root"]},
),
)
mock_integration(
hass,
MockModule(
domain="second_dep",
async_setup=gen_domain_setup("second_dep"),
partial_manifest={"after_dependencies": ["first_dep"]},
),
)
with patch(
"homeassistant.components.logger.async_setup", gen_domain_setup("logger")
):
await bootstrap._async_set_up_integrations(
hass, {"root": {}, "first_dep": {}, "second_dep": {}, "logger": {}}
)
assert "root" in hass.config.components
assert "first_dep" in hass.config.components
assert "second_dep" in hass.config.components
assert order == ["logger", "root", "first_dep", "second_dep"]
@pytest.mark.parametrize("load_registries", [False])
async def test_setup_after_deps_in_stage_1_ignored(hass):
"""Test after_dependencies are ignored in stage 1."""
# This test relies on this
assert "cloud" in bootstrap.STAGE_1_INTEGRATIONS
order = []
def gen_domain_setup(domain):
async def async_setup(hass, config):
order.append(domain)
return True
return async_setup
mock_integration(
hass,
MockModule(
domain="normal_integration",
async_setup=gen_domain_setup("normal_integration"),
partial_manifest={"after_dependencies": ["an_after_dep"]},
),
)
mock_integration(
hass,
MockModule(
domain="an_after_dep",
async_setup=gen_domain_setup("an_after_dep"),
),
)
mock_integration(
hass,
MockModule(
domain="cloud",
async_setup=gen_domain_setup("cloud"),
partial_manifest={"after_dependencies": ["normal_integration"]},
),
)
await bootstrap._async_set_up_integrations(
hass, {"cloud": {}, "normal_integration": {}, "an_after_dep": {}}
)
assert "normal_integration" in hass.config.components
assert "cloud" in hass.config.components
assert order == ["cloud", "an_after_dep", "normal_integration"]
@pytest.mark.parametrize("load_registries", [False])
async def test_setup_after_deps_via_platform(hass):
"""Test after_dependencies set up via platform."""
order = []
after_dep_event = asyncio.Event()
def gen_domain_setup(domain):
async def async_setup(hass, config):
if domain == "after_dep_of_platform_int":
await after_dep_event.wait()
order.append(domain)
return True
return async_setup
mock_integration(
hass,
MockModule(
domain="after_dep_of_platform_int",
async_setup=gen_domain_setup("after_dep_of_platform_int"),
),
)
mock_integration(
hass,
MockModule(
domain="platform_int",
async_setup=gen_domain_setup("platform_int"),
partial_manifest={"after_dependencies": ["after_dep_of_platform_int"]},
),
)
mock_entity_platform(hass, "light.platform_int", MockPlatform())
@core.callback
def continue_loading(_):
"""When light component loaded, continue other loading."""
after_dep_event.set()
hass.bus.async_listen_once("component_loaded", continue_loading)
await bootstrap._async_set_up_integrations(
hass, {"light": {"platform": "platform_int"}, "after_dep_of_platform_int": {}}
)
assert "light" in hass.config.components
assert "after_dep_of_platform_int" in hass.config.components
assert "platform_int" in hass.config.components
assert order == ["after_dep_of_platform_int", "platform_int"]
@pytest.mark.parametrize("load_registries", [False])
async def test_setup_after_deps_not_trigger_load(hass):
"""Test after_dependencies does not trigger loading it."""
order = []
def gen_domain_setup(domain):
async def async_setup(hass, config):
order.append(domain)
return True
return async_setup
mock_integration(
hass, MockModule(domain="root", async_setup=gen_domain_setup("root"))
)
mock_integration(
hass,
MockModule(
domain="first_dep",
async_setup=gen_domain_setup("first_dep"),
partial_manifest={"after_dependencies": ["root"]},
),
)
mock_integration(
hass,
MockModule(
domain="second_dep",
async_setup=gen_domain_setup("second_dep"),
partial_manifest={"after_dependencies": ["first_dep"]},
),
)
await bootstrap._async_set_up_integrations(hass, {"root": {}, "second_dep": {}})
assert "root" in hass.config.components
assert "first_dep" not in hass.config.components
assert "second_dep" in hass.config.components
@pytest.mark.parametrize("load_registries", [False])
async def test_setup_after_deps_not_present(hass):
"""Test after_dependencies when referenced integration doesn't exist."""
order = []
def gen_domain_setup(domain):
async def async_setup(hass, config):
order.append(domain)
return True
return async_setup
mock_integration(
hass, MockModule(domain="root", async_setup=gen_domain_setup("root"))
)
mock_integration(
hass,
MockModule(
domain="second_dep",
async_setup=gen_domain_setup("second_dep"),
partial_manifest={"after_dependencies": ["first_dep"]},
),
)
await bootstrap._async_set_up_integrations(
hass, {"root": {}, "first_dep": {}, "second_dep": {}}
)
assert "root" in hass.config.components
assert "first_dep" not in hass.config.components
assert "second_dep" in hass.config.components
assert order == ["root", "second_dep"]
@pytest.fixture
def mock_is_virtual_env():
"""Mock enable logging."""
with patch(
"homeassistant.bootstrap.is_virtual_env", return_value=False
) as is_virtual_env:
yield is_virtual_env
@pytest.fixture
def mock_enable_logging():
"""Mock enable logging."""
with patch("homeassistant.bootstrap.async_enable_logging") as enable_logging:
yield enable_logging
@pytest.fixture
def mock_mount_local_lib_path():
"""Mock enable logging."""
with patch(
"homeassistant.bootstrap.async_mount_local_lib_path"
) as mount_local_lib_path:
yield mount_local_lib_path
@pytest.fixture
def mock_process_ha_config_upgrade():
"""Mock enable logging."""
with patch(
"homeassistant.config.process_ha_config_upgrade"
) as process_ha_config_upgrade:
yield process_ha_config_upgrade
@pytest.fixture
def mock_ensure_config_exists():
"""Mock enable logging."""
with patch(
"homeassistant.config.async_ensure_config_exists", return_value=True
) as ensure_config_exists:
yield ensure_config_exists
async def test_setup_hass(
mock_enable_logging,
mock_is_virtual_env,
mock_mount_local_lib_path,
mock_ensure_config_exists,
mock_process_ha_config_upgrade,
caplog,
loop,
):
"""Test it works."""
verbose = Mock()
log_rotate_days = Mock()
log_file = Mock()
log_no_color = Mock()
with patch(
"homeassistant.config.async_hass_config_yaml",
return_value={"browser": {}, "frontend": {}},
), patch.object(bootstrap, "LOG_SLOW_STARTUP_INTERVAL", 5000):
hass = await bootstrap.async_setup_hass(
runner.RuntimeConfig(
config_dir=get_test_config_dir(),
verbose=verbose,
log_rotate_days=log_rotate_days,
log_file=log_file,
log_no_color=log_no_color,
skip_pip=True,
safe_mode=False,
),
)
assert "Waiting on integrations to complete setup" not in caplog.text
assert "browser" in hass.config.components
assert "safe_mode" not in hass.config.components
assert len(mock_enable_logging.mock_calls) == 1
assert mock_enable_logging.mock_calls[0][1] == (
hass,
verbose,
log_rotate_days,
log_file,
log_no_color,
)
assert len(mock_mount_local_lib_path.mock_calls) == 1
assert len(mock_ensure_config_exists.mock_calls) == 1
assert len(mock_process_ha_config_upgrade.mock_calls) == 1
async def test_setup_hass_takes_longer_than_log_slow_startup(
mock_enable_logging,
mock_is_virtual_env,
mock_mount_local_lib_path,
mock_ensure_config_exists,
mock_process_ha_config_upgrade,
caplog,
loop,
):
"""Test it works."""
verbose = Mock()
log_rotate_days = Mock()
log_file = Mock()
log_no_color = Mock()
async def _async_setup_that_blocks_startup(*args, **kwargs):
await asyncio.sleep(0.6)
return True
with patch(
"homeassistant.config.async_hass_config_yaml",
return_value={"browser": {}, "frontend": {}},
), patch.object(bootstrap, "LOG_SLOW_STARTUP_INTERVAL", 0.3), patch.object(
bootstrap, "SLOW_STARTUP_CHECK_INTERVAL", 0.05
), patch(
"homeassistant.components.frontend.async_setup",
side_effect=_async_setup_that_blocks_startup,
):
await bootstrap.async_setup_hass(
runner.RuntimeConfig(
config_dir=get_test_config_dir(),
verbose=verbose,
log_rotate_days=log_rotate_days,
log_file=log_file,
log_no_color=log_no_color,
skip_pip=True,
safe_mode=False,
),
)
assert "Waiting on integrations to complete setup" in caplog.text
async def test_setup_hass_invalid_yaml(
mock_enable_logging,
mock_is_virtual_env,
mock_mount_local_lib_path,
mock_ensure_config_exists,
mock_process_ha_config_upgrade,
loop,
):
"""Test it works."""
with patch(
"homeassistant.config.async_hass_config_yaml", side_effect=HomeAssistantError
):
hass = await bootstrap.async_setup_hass(
runner.RuntimeConfig(
config_dir=get_test_config_dir(),
verbose=False,
log_rotate_days=10,
log_file="",
log_no_color=False,
skip_pip=True,
safe_mode=False,
),
)
assert "safe_mode" in hass.config.components
assert len(mock_mount_local_lib_path.mock_calls) == 0
async def test_setup_hass_config_dir_nonexistent(
mock_enable_logging,
mock_is_virtual_env,
mock_mount_local_lib_path,
mock_ensure_config_exists,
mock_process_ha_config_upgrade,
loop,
):
"""Test it works."""
mock_ensure_config_exists.return_value = False
assert (
await bootstrap.async_setup_hass(
runner.RuntimeConfig(
config_dir=get_test_config_dir(),
verbose=False,
log_rotate_days=10,
log_file="",
log_no_color=False,
skip_pip=True,
safe_mode=False,
),
)
is None
)
async def test_setup_hass_safe_mode(
mock_enable_logging,
mock_is_virtual_env,
mock_mount_local_lib_path,
mock_ensure_config_exists,
mock_process_ha_config_upgrade,
loop,
):
"""Test it works."""
with patch("homeassistant.components.browser.setup") as browser_setup, patch(
"homeassistant.config_entries.ConfigEntries.async_domains",
return_value=["browser"],
):
hass = await bootstrap.async_setup_hass(
runner.RuntimeConfig(
config_dir=get_test_config_dir(),
verbose=False,
log_rotate_days=10,
log_file="",
log_no_color=False,
skip_pip=True,
safe_mode=True,
),
)
assert "safe_mode" in hass.config.components
assert len(mock_mount_local_lib_path.mock_calls) == 0
# Validate we didn't try to set up config entry.
assert "browser" not in hass.config.components
assert len(browser_setup.mock_calls) == 0
async def test_setup_hass_invalid_core_config(
mock_enable_logging,
mock_is_virtual_env,
mock_mount_local_lib_path,
mock_ensure_config_exists,
mock_process_ha_config_upgrade,
loop,
):
"""Test it works."""
with patch(
"homeassistant.config.async_hass_config_yaml",
return_value={"homeassistant": {"non-existing": 1}},
):
hass = await bootstrap.async_setup_hass(
runner.RuntimeConfig(
config_dir=get_test_config_dir(),
verbose=False,
log_rotate_days=10,
log_file="",
log_no_color=False,
skip_pip=True,
safe_mode=False,
),
)
assert "safe_mode" in hass.config.components
async def test_setup_safe_mode_if_no_frontend(
mock_enable_logging,
mock_is_virtual_env,
mock_mount_local_lib_path,
mock_ensure_config_exists,
mock_process_ha_config_upgrade,
loop,
):
"""Test we setup safe mode if frontend didn't load."""
verbose = Mock()
log_rotate_days = Mock()
log_file = Mock()
log_no_color = Mock()
with patch(
"homeassistant.config.async_hass_config_yaml",
return_value={
"homeassistant": {
"internal_url": "http://192.168.1.100:8123",
"external_url": "https://abcdef.ui.nabu.casa",
},
"map": {},
"person": {"invalid": True},
},
):
hass = await bootstrap.async_setup_hass(
runner.RuntimeConfig(
config_dir=get_test_config_dir(),
verbose=verbose,
log_rotate_days=log_rotate_days,
log_file=log_file,
log_no_color=log_no_color,
skip_pip=True,
safe_mode=False,
),
)
assert "safe_mode" in hass.config.components
assert hass.config.config_dir == get_test_config_dir()
assert hass.config.skip_pip
assert hass.config.internal_url == "http://192.168.1.100:8123"
assert hass.config.external_url == "https://abcdef.ui.nabu.casa"
@pytest.mark.parametrize("load_registries", [False])
async def test_empty_integrations_list_is_only_sent_at_the_end_of_bootstrap(hass):
"""Test empty integrations list is only sent at the end of bootstrap."""
order = []
def gen_domain_setup(domain):
async def async_setup(hass, config):
order.append(domain)
await asyncio.sleep(0.1)
async def _background_task():
await asyncio.sleep(0.2)
await hass.async_create_task(_background_task())
return True
return async_setup
mock_integration(
hass,
MockModule(
domain="normal_integration",
async_setup=gen_domain_setup("normal_integration"),
partial_manifest={"after_dependencies": ["an_after_dep"]},
),
)
mock_integration(
hass,
MockModule(
domain="an_after_dep",
async_setup=gen_domain_setup("an_after_dep"),
),
)
integrations = []
@core.callback
def _bootstrap_integrations(data):
integrations.append(data)
async_dispatcher_connect(
hass, SIGNAL_BOOTSTRAP_INTEGRATONS, _bootstrap_integrations
)
with patch.object(bootstrap, "SLOW_STARTUP_CHECK_INTERVAL", 0.05):
await bootstrap._async_set_up_integrations(
hass, {"normal_integration": {}, "an_after_dep": {}}
)
await hass.async_block_till_done()
assert integrations[0] != {}
assert "an_after_dep" in integrations[0]
assert integrations[-3] != {}
assert integrations[-1] == {}
assert "normal_integration" in hass.config.components
assert order == ["an_after_dep", "normal_integration"]
@pytest.mark.parametrize("load_registries", [False])
async def test_warning_logged_on_wrap_up_timeout(hass, caplog):
"""Test we log a warning on bootstrap timeout."""
def gen_domain_setup(domain):
async def async_setup(hass, config):
await asyncio.sleep(0.1)
async def _background_task():
await asyncio.sleep(0.2)
await hass.async_create_task(_background_task())
return True
return async_setup
mock_integration(
hass,
MockModule(
domain="normal_integration",
async_setup=gen_domain_setup("normal_integration"),
partial_manifest={},
),
)
with patch.object(bootstrap, "WRAP_UP_TIMEOUT", 0):
await bootstrap._async_set_up_integrations(hass, {"normal_integration": {}})
await hass.async_block_till_done()
assert "Setup timed out for bootstrap - moving forward" in caplog.text
|
|
#!/usr/bin/env python
# -*- mode: python; encoding: utf-8 -*-
"""Test the flow_management interface."""
from grr.gui import runtests_test
from grr.lib import action_mocks
from grr.lib import flags
from grr.lib import flow
from grr.lib import test_lib
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import structs as rdf_structs
from grr.proto import tests_pb2
class RecursiveTestFlowArgs(rdf_structs.RDFProtoStruct):
protobuf = tests_pb2.RecursiveTestFlowArgs
class RecursiveTestFlow(flow.GRRFlow):
"""A test flow which starts some subflows."""
args_type = RecursiveTestFlowArgs
@flow.StateHandler(next_state="End")
def Start(self):
if self.args.depth < 2:
for i in range(2):
self.Log("Subflow call %d", i)
self.CallFlow("RecursiveTestFlow", depth=self.args.depth + 1,
next_state="End")
class FlowWithOneStatEntryResult(flow.GRRFlow):
"""Test flow that calls SendReply once with a StatEntry value."""
@flow.StateHandler()
def Start(self):
self.SendReply(rdf_client.StatEntry(aff4path="aff4:/some/unique/path"))
class FlowWithOneNetworkConnectionResult(flow.GRRFlow):
"""Test flow that calls SendReply once with a NetworkConnection value."""
@flow.StateHandler()
def Start(self):
self.SendReply(rdf_client.NetworkConnection(pid=42))
class TestFlowManagement(test_lib.GRRSeleniumTest):
"""Test the flow management GUI."""
def testFlowManagement(self):
"""Test that scheduling flows works."""
with self.ACLChecksDisabled():
self.GrantClientApproval("C.0000000000000001")
self.Open("/")
self.Type("client_query", "C.0000000000000001")
self.Click("client_query_submit")
self.WaitUntilEqual(u"C.0000000000000001",
self.GetText, "css=span[type=subject]")
# Choose client 1
self.Click("css=td:contains('0001')")
# First screen should be the Host Information already.
self.WaitUntil(self.IsTextPresent, "VFSGRRClient")
self.Click("css=a[grrtarget=LaunchFlows]")
self.Click("css=#_Processes")
self.assertEqual("ListProcesses", self.GetText("link=ListProcesses"))
self.Click("link=ListProcesses")
self.WaitUntil(self.IsTextPresent, "C.0000000000000001")
self.WaitUntil(self.IsTextPresent, "Prototype: ListProcesses")
self.Click("css=button.Launch")
self.WaitUntil(self.IsTextPresent, "Launched Flow ListProcesses")
self.Click("css=#_Network")
self.assertEqual("Netstat", self.GetText("link=Netstat"))
self.Click("css=#_Browser")
# Wait until the tree has expanded.
self.WaitUntil(self.IsTextPresent, "FirefoxHistory")
# Check that we can get a file in chinese
self.Click("css=#_Filesystem")
# Wait until the tree has expanded.
self.WaitUntil(self.IsTextPresent, "UpdateSparseImageChunks")
self.Click("link=GetFile")
self.Select("css=.form-group:has(> label:contains('Pathtype')) select",
"OS")
self.Type("css=.form-group:has(> label:contains('Path')) input",
u"/dev/c/msn[1].exe")
self.Click("css=button.Launch")
self.WaitUntil(self.IsTextPresent, "Launched Flow GetFile")
# Test that recursive tests are shown in a tree table.
flow.GRRFlow.StartFlow(
client_id="aff4:/C.0000000000000001", flow_name="RecursiveTestFlow",
token=self.token)
self.Click("css=a:contains('Manage launched flows')")
self.WaitUntilEqual("RecursiveTestFlow", self.GetText,
"//table/tbody/tr[1]/td[3]")
self.WaitUntilEqual("GetFile", self.GetText,
"//table/tbody/tr[2]/td[3]")
# Check that child flows are not shown.
self.assertNotEqual(self.GetText("//table/tbody/tr[2]/td[3]"),
"RecursiveTestFlow")
# Click on the first tree_closed to open it.
self.Click("css=.tree_closed")
self.WaitUntilEqual("RecursiveTestFlow", self.GetText,
"//table/tbody/tr[1]/td[3]")
self.WaitUntilEqual("RecursiveTestFlow", self.GetText,
"//table/tbody/tr[2]/td[3]")
# Select the requests tab
self.Click("Requests")
self.Click("css=td:contains(GetFile)")
self.WaitUntil(self.IsElementPresent,
"css=td:contains(flow:request:00000001)")
# Check that a StatFile client action was issued as part of the GetFile
# flow.
self.WaitUntil(self.IsElementPresent,
"css=.tab-content td.proto_value:contains(StatFile)")
def testLogsCanBeOpenedByClickingOnLogsTab(self):
client_id = rdf_client.ClientURN("C.0000000000000001")
# RecursiveTestFlow doesn't send any results back.
with self.ACLChecksDisabled():
for _ in test_lib.TestFlowHelper(
"RecursiveTestFlow", action_mocks.ActionMock(),
client_id=client_id, token=self.token):
pass
self.GrantClientApproval(client_id)
self.Open("/#c=C.0000000000000001")
self.Click("css=a:contains('Manage launched flows')")
self.Click("css=td:contains('RecursiveTestFlow')")
self.Click("css=li[renderer=FlowLogView]")
self.WaitUntil(self.IsTextPresent, "Subflow call 1")
self.WaitUntil(self.IsTextPresent, "Subflow call 0")
def testResultsAreDisplayedInResultsTab(self):
client_id = rdf_client.ClientURN("C.0000000000000001")
with self.ACLChecksDisabled():
for _ in test_lib.TestFlowHelper(
"FlowWithOneStatEntryResult", action_mocks.ActionMock(),
client_id=client_id, token=self.token):
pass
self.GrantClientApproval(client_id)
self.Open("/#c=C.0000000000000001")
self.Click("css=a:contains('Manage launched flows')")
self.Click("css=td:contains('FlowWithOneStatEntryResult')")
self.Click("css=#Results")
self.WaitUntil(self.IsTextPresent, "aff4:/some/unique/path")
def testEmptyTableIsDisplayedInResultsWhenNoResults(self):
client_id = "C.0000000000000001"
with self.ACLChecksDisabled():
flow.GRRFlow.StartFlow(flow_name="FlowWithOneStatEntryResult",
client_id=client_id, sync=False, token=self.token)
self.GrantClientApproval(client_id)
self.Open("/#c=" + client_id)
self.Click("css=a:contains('Manage launched flows')")
self.Click("css=td:contains('FlowWithOneStatEntryResult')")
self.Click("css=#Results")
self.WaitUntil(self.IsElementPresent, "css=#main_bottomPane table thead "
"th:contains('Value')")
def testExportTabIsEnabledForStatEntryResults(self):
client_id = rdf_client.ClientURN("C.0000000000000001")
with self.ACLChecksDisabled():
for _ in test_lib.TestFlowHelper(
"FlowWithOneStatEntryResult", action_mocks.ActionMock(),
client_id=client_id, token=self.token):
pass
self.GrantClientApproval(client_id)
self.Open("/#c=C.0000000000000001")
self.Click("css=a:contains('Manage launched flows')")
self.Click("css=td:contains('FlowWithOneStatEntryResult')")
self.Click("css=#Export")
self.WaitUntil(
self.IsTextPresent,
"--username test --reason 'Running tests' collection_files "
"--path aff4:/C.0000000000000001/analysis/FlowWithOneStatEntryResult")
def testExportTabIsDisabledWhenNoResults(self):
client_id = rdf_client.ClientURN("C.0000000000000001")
# RecursiveTestFlow doesn't send any results back.
with self.ACLChecksDisabled():
for _ in test_lib.TestFlowHelper(
"RecursiveTestFlow", action_mocks.ActionMock(),
client_id=client_id, token=self.token):
pass
self.GrantClientApproval(client_id)
self.Open("/#c=C.0000000000000001")
self.Click("css=a:contains('Manage launched flows')")
self.Click("css=td:contains('RecursiveTestFlow')")
self.WaitUntil(self.IsElementPresent, "css=#Export.disabled")
def testExportTabIsDisabledForNonFileResults(self):
client_id = rdf_client.ClientURN("C.0000000000000001")
with self.ACLChecksDisabled():
for _ in test_lib.TestFlowHelper(
"FlowWithOneNetworkConnectionResult", action_mocks.ActionMock(),
client_id=client_id, token=self.token):
pass
self.GrantClientApproval(client_id)
self.Open("/#c=C.0000000000000001")
self.Click("css=a:contains('Manage launched flows')")
self.Click("css=td:contains('FlowWithOneNetworkConnectionResult')")
self.WaitUntil(self.IsElementPresent, "css=#Export.disabled")
def testCancelFlowWorksCorrectly(self):
"""Tests that cancelling flows works."""
with self.ACLChecksDisabled():
self.GrantClientApproval("C.0000000000000001")
flow.GRRFlow.StartFlow(client_id="aff4:/C.0000000000000001",
flow_name="RecursiveTestFlow",
token=self.token)
# Open client and find the flow
self.Open("/")
self.Type("client_query", "C.0000000000000001")
self.Click("client_query_submit")
self.WaitUntilEqual(u"C.0000000000000001",
self.GetText, "css=span[type=subject]")
self.Click("css=td:contains('0001')")
self.Click("css=a:contains('Manage launched flows')")
self.Click("css=td:contains('RecursiveTestFlow')")
self.Click("css=button[name=cancel_flow]")
# The window should be updated now
self.WaitUntil(self.IsTextPresent, "Cancelled in GUI")
def testGlobalFlowManagement(self):
"""Test that scheduling flows works."""
with self.ACLChecksDisabled():
self.CreateAdminUser("test")
self.Open("/")
self.Click("css=a[grrtarget=GlobalLaunchFlows]")
self.Click("css=#_Reporting")
self.assertEqual("RunReport", self.GetText("link=RunReport"))
self.Click("link=RunReport")
self.WaitUntil(self.IsTextPresent, "Report name")
def main(argv):
# Run the full test suite
runtests_test.SeleniumTestProgram(argv=argv)
if __name__ == "__main__":
flags.StartMain(main)
|
|
"""
This is an auxiliary script that is used to compute valid PLL values to set
the CPU frequency to a given value. The algorithm here appears as C code
for the machine.freq() function.
"""
from __future__ import print_function
import re
def close_int(x):
return abs(x - round(x)) < 0.01
# original version that requires N/M to be an integer (for simplicity)
def compute_pll(hse, sys):
for P in (2, 4, 6, 8): # allowed values of P
Q = sys * P / 48
NbyM = sys * P / hse
# N/M and Q must be integers
if not (close_int(NbyM) and close_int(Q)):
continue
# VCO_OUT must be between 192MHz and 432MHz
if not (192 <= hse * NbyM <= 432):
continue
# compute M
M = int(192 // NbyM)
while hse > 2 * M or NbyM * M < 192:
M += 1
# VCO_IN must be between 1MHz and 2MHz (2MHz recommended)
if not (M <= hse):
continue
# compute N
N = NbyM * M
# N and Q are restricted
if not (192 <= N <= 432 and 2 <= Q <= 15):
continue
# found valid values
assert NbyM == N // M
return (M, N, P, Q)
# no valid values found
return None
# improved version that doesn't require N/M to be an integer
def compute_pll2(hse, sys, relax_pll48):
# Loop over the allowed values of P, looking for a valid PLL configuration
# that gives the desired "sys" frequency. We use floats for P to force
# floating point arithmetic on Python 2.
fallback = None
for P in (2.0, 4.0, 6.0, 8.0):
NbyM = sys * P / hse
# VCO_OUT must be between 192MHz and 432MHz
if not (192 <= hse * NbyM <= 432):
continue
# scan M
M = int(192 // NbyM) # starting value
while 2 * M < hse:
M += 1
# VCO_IN must be between 1MHz and 2MHz (2MHz recommended)
for M in range(M, hse + 1):
if NbyM * M < 191.99 or not close_int(NbyM * M):
continue
# compute N
N = NbyM * M
# N must be an integer
if not close_int(N):
continue
# N is restricted
if not (192 <= N <= 432):
continue
Q = (sys * P / 48)
# Q must be an integer in a set range
if not (2 <= Q <= 15):
continue
if not close_int(Q):
if int(M) == int(hse) and fallback is None:
# the values don't give 48MHz on PLL48 but are otherwise OK
fallback = M, N, P, int(Q)
continue
# found valid values
return (M, N, P, Q)
if relax_pll48:
# might have found values which don't give 48MHz on PLL48
return fallback
else:
# no valid values found which give 48MHz on PLL48
return None
def compute_derived(hse, pll):
M, N, P, Q = pll
vco_in = hse / M
vco_out = hse * N / M
pllck = hse / M * N / P
pll48ck = hse / M * N / Q
return (vco_in, vco_out, pllck, pll48ck)
def verify_pll(hse, pll):
M, N, P, Q = pll
vco_in, vco_out, pllck, pll48ck = compute_derived(hse, pll)
# verify ints
assert close_int(M)
assert close_int(N)
assert close_int(P)
assert close_int(Q)
# verify range
assert 2 <= M <= 63
assert 192 <= N <= 432
assert P in (2, 4, 6, 8)
assert 2 <= Q <= 15
assert 1 <= vco_in <= 2
assert 192 <= vco_out <= 432
def compute_pll_table(source_clk, relax_pll48):
valid_plls = []
for sysclk in range(2, 217, 2):
pll = compute_pll2(source_clk, sysclk, relax_pll48)
if pll is not None:
verify_pll(source_clk, pll)
valid_plls.append((sysclk, pll))
return valid_plls
def generate_c_table(hse, valid_plls):
valid_plls.sort()
print("// (M, P/2-1, SYS) values for %u MHz source" % hse)
print("static const uint16_t pll_freq_table[%u] = {" % len(valid_plls))
for sys, (M, N, P, Q) in valid_plls:
print(" (%u << 10) | (%u << 8) | %u," % (M, P // 2 - 1, sys))
print("};")
def print_table(hse, valid_plls):
print("HSE =", hse, "MHz")
print("sys : M N P Q : VCO_IN VCO_OUT PLLCK PLL48CK")
out_format = "%3u : %2u %.1f %.2f %.2f : %5.2f %6.2f %6.2f %6.2f"
for sys, pll in valid_plls:
print(out_format % ((sys,) + pll + compute_derived(hse, pll)))
print("found %u valid configurations" % len(valid_plls))
def search_header_for_hsx_values(filename, vals):
regex_inc = re.compile(r'#include "(boards/[A-Za-z0-9_./]+)"')
regex_def = re.compile(r'#define +(HSE_VALUE|HSI_VALUE) +\((\(uint32_t\))?([0-9]+)\)')
with open(filename) as f:
for line in f:
line = line.strip()
m = regex_inc.match(line)
if m:
# Search included file
search_header_for_hsx_values(m.group(1), vals)
continue
m = regex_def.match(line)
if m:
# Found HSE_VALUE or HSI_VALUE
val = int(m.group(3)) // 1000000
if m.group(1) == 'HSE_VALUE':
vals[0] = val
else:
vals[1] = val
return vals
def main():
global out_format
# parse input args
import sys
argv = sys.argv[1:]
c_table = False
relax_pll48 = False
hse = None
hsi = None
while True:
if argv[0] == '-c':
c_table = True
argv.pop(0)
elif argv[0] == '--relax-pll48':
relax_pll48 = True
argv.pop(0)
else:
break
if len(argv) != 1:
print("usage: pllvalues.py [-c] <hse in MHz>")
sys.exit(1)
if argv[0].startswith("file:"):
# extract HSE_VALUE, and optionally HSI_VALUE, from header file
hse, hsi = search_header_for_hsx_values(argv[0][5:], [None, None])
if hse is None:
raise ValueError("%s does not contain a definition of HSE_VALUE" % argv[0])
if hsi is not None and hsi > 16:
# Currently, a HSI value greater than 16MHz is not supported
hsi = None
else:
# HSE given directly as an integer
hse = int(argv[0])
hse_valid_plls = compute_pll_table(hse, relax_pll48)
if hsi is not None:
hsi_valid_plls = compute_pll_table(hsi, relax_pll48)
if c_table:
print('#if MICROPY_HW_CLK_USE_HSI')
if hsi is not None:
hsi_valid_plls.append((hsi, (0, 0, 2, 0)))
generate_c_table(hsi, hsi_valid_plls)
print('#else')
if hsi is not None:
hse_valid_plls.append((hsi, (0, 0, 2, 0)))
hse_valid_plls.append((hse, (1, 0, 2, 0)))
generate_c_table(hse, hse_valid_plls)
print('#endif')
else:
print_table(hse, hse_valid_plls)
if __name__ == "__main__":
main()
|
|
import hail as hl
from hail.utils import wrap_to_list
def import_gtf(path, key=None):
"""Import a GTF file.
The GTF file format is identical to the GFF version 2 file format,
and so this function can be used to import GFF version 2 files as
well.
See https://www.ensembl.org/info/website/upload/gff.html for more
details on the GTF/GFF2 file format.
The :class:`.Table` returned by this function will include the following
row fields:
.. code-block:: text
'seqname': str
'source': str
'feature': str
'start': int32
'end': int32
'score': float64
'strand': str
'frame': int32
There will also be corresponding fields for every tag found in the
attribute field of the GTF file.
.. note::
The "end" field in the table will be incremented by 1 in
comparison to the value found in the GTF file, as the end
coordinate in a GTF file is inclusive while the end
coordinate in Hail is exclusive.
Example
-------
>>> ht = hl.experimental.import_gtf('data/test.gtf', key='gene_id')
>>> ht.describe()
.. code-block:: text
----------------------------------------
Global fields:
None
----------------------------------------
Row fields:
'seqname': str
'source': str
'feature': str
'start': int32
'end': int32
'score': float64
'strand': str
'frame': int32
'havana_gene': str
'exon_id': str
'havana_transcript': str
'transcript_name': str
'gene_type': str
'tag': str
'transcript_status': str
'exon_number': str
'level': str
'transcript_id': str
'transcript_type': str
'gene_id': str
'gene_name': str
'gene_status': str
----------------------------------------
Key: ['gene_id']
----------------------------------------
Parameters
----------
path : :obj:`str`
File to import.
key : :obj:`str` or :obj:`list` of :obj:`str`
Key field(s). Can be tag name(s) found in the attribute field
of the GTF file.
Returns
-------
:class:`.Table`
"""
ht = hl.import_table(path,
comment='#',
no_header=True,
types={'f3': hl.tint,
'f4': hl.tint,
'f5': hl.tfloat,
'f7': hl.tint},
missing='.',
delimiter='\t')
ht = ht.rename({'f0': 'seqname',
'f1': 'source',
'f2': 'feature',
'f3': 'start',
'f4': 'end',
'f5': 'score',
'f6': 'strand',
'f7': 'frame',
'f8': 'attribute'})
ht = ht.annotate(end=ht['end'] + 1)
ht = ht.annotate(attribute=hl.dict(
hl.map(lambda x: (x.split(' ')[0],
x.split(' ')[1].replace('"', '').replace(';$', '')),
ht['attribute'].split('; '))))
attributes = list(ht.aggregate(
hl.set(hl.flatten(hl.agg.collect(ht['attribute'].keys())))))
ht = ht.annotate(**{x: hl.or_missing(ht['attribute'].contains(x),
ht['attribute'][x])
for x in attributes})
ht = ht.drop(ht['attribute'])
if key:
key = wrap_to_list(key)
ht = ht.key_by(*key)
return ht
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-v', required=True, help='GTEx version.')
parser.add_argument('-b', required=True, choices=['GRCh37', 'GRCh38'], help='Ensembl reference genome build.')
parser.add_argument('-d', required=True, choices=['eqtl_associations', 'gene', 'transcript', 'exon', 'junction', ], help='Ensembl sequence dataset to load.')
args = parser.parse_args()
version = 'v{:}'.format(args.v)
reference_genome = args.b
dataset = args.d
if dataset == 'gene':
ht_samples = hl.import_table('gs://hail-datasets/raw-data/gtex/v7/annotations/GTEx_v7_Annotations.SampleAttributesDS.txt',
key='SAMPID',
missing='')
float_cols = ['SMRIN',
'SME2MPRT',
'SMNTRART',
'SMMAPRT',
'SMEXNCRT',
'SM550NRM',
'SMUNMPRT',
'SM350NRM',
'SMMNCPB',
'SME1MMRT',
'SMNTERRT',
'SMMNCV',
'SMGAPPCT',
'SMNTRNRT',
'SMMPUNRT',
'SMEXPEFF',
'SME2MMRT',
'SMBSMMRT',
'SME1PCTS',
'SMRRNART',
'SME1MPRT',
'SMDPMPRT',
'SME2PCTS']
int_cols = ['SMTSISCH',
'SMATSSCR',
'SMTSPAX',
'SMCHMPRS',
'SMNUMGPS',
'SMGNSDTC',
'SMRDLGTH',
'SMSFLGTH',
'SMESTLBS',
'SMMPPD',
'SMRRNANM',
'SMVQCFL',
'SMTRSCPT',
'SMMPPDPR',
'SMCGLGTH',
'SMUNPDRD',
'SMMPPDUN',
'SME2ANTI',
'SMALTALG',
'SME2SNSE',
'SMMFLGTH',
'SMSPLTRD',
'SME1ANTI',
'SME1SNSE',
'SMNUM5CD']
ht_samples = ht_samples.annotate(**{x: hl.float(ht_samples[x]) for x in float_cols})
ht_samples = ht_samples.annotate(**{x: hl.int(ht_samples[x].replace('.0$', '')) for x in int_cols})
ht = ht.filter(ht.feature_type == 'gene')
ht = ht.annotate(interval=hl.interval(hl.locus(ht['contig'], ht['start'], 'GRCh37'), hl.locus(ht['contig'], ht['end'] + 1, 'GRCh37')))
ht = ht.annotate(attributes=hl.dict(hl.map(lambda x: (x.split(' ')[0], x.split(' ')[1].replace('"', '').replace(';$', '')), ht['attributes'].split('; '))))
attribute_cols = list(ht.aggregate(hl.set(hl.flatten(hl.agg.collect(ht.attributes.keys())))))
ht = ht.annotate(**{x: hl.or_missing(ht_genes.attributes.contains(x), ht_genes.attributes[x]) for x in attribute_cols})
ht = ht.select(*(['gene_id', 'interval', 'gene_type', 'strand', 'annotation_source', 'havana_gene', 'gene_status', 'tag']))
ht = ht.rename({'havana_gene': 'havana_gene_id'})
ht = ht.key_by(ht_genes.gene_id)
"""
|
|
#!/usr/bin/env python
# coding: utf-8
"""
A spot setup using cmf for a simple 1 storage hydrological model
This example can be easily extended with more storages
"""
from __future__ import division, print_function
import datetime
import cmf
import spotpy
from spotpy.parameter import Uniform
import numpy as np
# Make sure we do not get pestered with divIde by zero errors
np.seterr(all='ignore')
class DataProvider(object):
"""
Holds the forcing and calibration data
"""
def __init__(self):
# Load data from file using numpy magic
data = np.recfromcsv('cmf_data/fulda_climate.csv', encoding='utf-8')
def bstr2date(bs):
"""Helper function to convert date byte string to datetime object"""
return datetime.datetime.strptime(bs, '%d.%m.%Y')
# Get begin, step and end from the date column
self.begin = bstr2date(data.date[0])
self.step = bstr2date(data.date[1]) - self.begin
self.end = bstr2date(data.date[-1])
def a2ts(a):
"""Converts an array column to a timeseries"""
return cmf.timeseries.from_array(self.begin, self.step, a)
self.P = a2ts(data.prec)
self.T = a2ts(data.tmean)
self.Tmin = a2ts(data.tmin)
self.Tmax = a2ts(data.tmax)
self.Q = a2ts(data.q)
def runoff_mm(self, area):
"""Calculates the runoff in mm from the data"""
sec_per_day = 86400
mm_per_m = 1000
return self.Q * sec_per_day / area * mm_per_m
def add_stations(self, project):
"""
Creates a rainstation and a meteo station for the cmf project
:param project: A cmf.project
:return: rainstation, meteo
"""
rainstation = project.rainfall_stations.add('Grebenau avg', self.P, (0, 0, 0))
# Tell the project to use the meteo station just created
project.use_nearest_rainfall()
# Temperature data
meteo = project.meteo_stations.add_station('Grebenau avg', (0, 0, 0))
meteo.T = self.T
meteo.Tmin = self.Tmin
meteo.Tmax = self.Tmax
# Tell the project to use the meteo station just created
project.use_nearest_meteo()
return rainstation, meteo
class SingleStorage(object):
"""
A simple hydrological single storage model.
No snow, interception or routing.
"""
# Catchment area
area = 2976.41e6 # sq m
# General storage parameter
V0 = Uniform(10, 10000, optguess=1000)
# ET parameters
fETV1 = Uniform(0.01, 1, optguess=0.2, doc='if V<fETV1*V0, water uptake stress for plants starts')
fETV0 = Uniform(0, 0.9, optguess=0.2, doc='if V<fETV0*fETV1*V0, plants die of drought')
# Outflow parameters
tr = Uniform(0.1, 1000, optguess=10, doc='Residence time of water in storage when V=V0')
Vr = Uniform(0, 1, optguess=0.0, doc='Residual water in storage in terms of V0')
beta = Uniform(0.3, 5, optguess=1, doc='Exponent in kinematic wave function')
max_run_minutes = 5
def __init__(self, begin=None, end=None):
"""
Initializes the model
:param begin: Start year for calibration
:param end: stop year
"""
self.dbname = 'cmf_singlestorage'
# Loads driver data
self.data = DataProvider()
self.project, self.outlet = self.create_project()
self.data.add_stations(self.project)
self.setparameters()
self.begin = begin or self.data.begin
self.end = end or self.data.end
def __str__(self):
return type(self).__name__
def create_project(self):
"""
Creates the cmf project with its basic elements
"""
# Use only a single thread, that is better for a calibration run and for small models
cmf.set_parallel_threads(1)
# make the project
p = cmf.project()
# make a new cell
c = p.NewCell(0, 0, 0, 1000)
# Add a storage
layer = c.add_layer(1.0)
# ET
cmf.HargreaveET(layer, c.transpiration)
# Outlet
outlet = p.NewOutlet('outlet', 10, 0, 0)
return p, outlet
def setparameters(self, par=None):
"""
Sets the parameters of the model by creating the connections
"""
par = par or spotpy.parameter.create_set(self, valuetype='optguess')
# Some shortcuts to gain visibility
c = self.project[0]
o = self.outlet
# Set uptake stress
ETV1 = par.fETV1 * par.V0
ETV0 = par.fETV0 * ETV1
c.set_uptakestress(cmf.VolumeStress(ETV1, ETV0))
# Connect layer with outlet
cmf.PowerLawConnection(c.layers[0], o,
Q0=par.V0 / par.tr, beta=par.beta,
residual=par.Vr * par.V0, V0=par.V0)
def runmodel(self, verbose=False):
"""
Runs the model and saves the results
"""
solver = cmf.CVodeIntegrator(self.project, 1e-9)
c = self.project[0]
# result timeseries
res_q = cmf.timeseries(self.begin, cmf.day)
tstart = datetime.datetime.now()
# start solver and calculate in daily steps
for t in solver.run(self.data.begin, self.end, cmf.day):
if t > self.begin:
# append results, when spin up time is over
res_q.add(self.outlet.waterbalance(t))
# Give the status the screen to let us know what is going on
if verbose:
print(t, 'P={:5.3f}'.format(c.get_rainfall(t)))
if datetime.datetime.now() - tstart > datetime.timedelta(minutes=self.max_run_minutes):
print('Cancelled, since it took more than {} minutes'.format(self.max_run_minutes))
for t in cmf.timerange(solver.t, self.end, cmf.day):
res_q.add(np.nan)
return res_q
def simulation(self, vector=None, verbose=False):
"""
Sets the parameters of the model and starts a run
:return: np.array with runoff in mm/day
"""
self.setparameters(vector)
result_q = self.runmodel(verbose)
return np.array(result_q[self.begin:self.end])
@staticmethod
def objectivefunction(simulation, evaluation):
"""
Calculates the goodness of the simulation
"""
return [
spotpy.objectivefunctions.nashsutcliffe(evaluation, simulation),
spotpy.objectivefunctions.pbias(evaluation, simulation)
]
def evaluation(self):
"""
Returns the evaluation data
"""
runoff_mm = self.data.runoff_mm(self.area)
return np.array(runoff_mm[self.begin:self.end])
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import pytest
from bson import DBRef
from pymongo.errors import DuplicateKeyError
from nose import with_setup
from .. import Collection, Index, Model
class TestCollection(Collection):
def custom(self):
return 'It works!'
class TestModel(Model):
'''Model class for test cases.'''
class Meta:
database = 'minimongo_test'
collection = 'minimongo_test'
indices = (
Index('x'),
)
def a_method(self):
self.x = 123
self.y = 456
self.save()
class TestModelCollection(Model):
'''Model class with a custom collection class.'''
class Meta:
database = 'minimongo_test'
collection = 'minimongo_collection'
collection_class = TestCollection
class TestModelUnique(Model):
class Meta:
database = 'minimongo_test'
collection = 'minimongo_unique'
indices = (
Index('x', unique=True),
)
class TestDerivedModel(TestModel):
class Meta:
database = 'minimongo_test'
collection = 'minimongo_derived'
class TestNoAutoIndexModel(Model):
class Meta:
database = 'minimongo_test'
collection = 'minimongo_noidex'
indices = (
Index('x'),
)
auto_index = False
class TestModelInterface(Model):
class Meta:
interface = True
class TestModelImplementation(TestModelInterface):
class Meta:
database = 'minimongo_test'
collection = 'minimongo_impl'
class TestFieldMapper(Model):
class Meta:
database = 'minimongo_test'
collection = 'minimongo_mapper'
field_map = (
(lambda k, v: k == 'x' and isinstance(v, int),
lambda v: float(v * (4.0 / 3.0))),
)
def setup():
# Make sure we start with a clean, empty DB.
TestModel.connection.drop_database(TestModel.database)
# Create indices up front
TestModel.auto_index()
TestModelUnique.auto_index()
def teardown():
# This will drop the entire minimongo_test database. Careful!
TestModel.connection.drop_database(TestModel.database)
def test_meta():
assert hasattr(TestModel, '_meta')
assert not hasattr(TestModel, 'Meta')
meta = TestModel._meta
for attr in ('host', 'port', 'indices', 'database',
'collection', 'collection_class'):
assert hasattr(meta, attr)
assert meta.database == 'minimongo_test'
assert meta.collection == 'minimongo_test'
assert meta.indices == (Index('x'), )
@with_setup(setup, teardown)
def test_dictyness():
item = TestModel({'x': 642})
assert item['x'] == item.x == 642
item.y = 426
assert item['y'] == item.y == 426
assert set(item.keys()) == set(['x', 'y'])
del item['x']
assert item == {'y': 426}
item.z = 3
del item.y
assert item == {'z': 3}
@with_setup(setup, teardown)
def test_creation():
'''Test simple object creation and querying via find_one.'''
object_a = TestModel({'x': 1, 'y': 1})
object_a.z = 1
object_a.save()
object_b = TestModel.collection.find_one({'x': 1})
# Make sure that the find_one method returns the right type.
assert isinstance(object_b, TestModel)
# Make sure that the contents are the same.
assert object_b == object_a
# Make sure that our internal representation is what we expect (and
# no extra fields, etc.)
assert object_a == {'x': 1, 'y': 1, 'z': 1, '_id': object_a._id}
assert object_b == {'x': 1, 'y': 1, 'z': 1, '_id': object_b._id}
@with_setup(setup, teardown)
def test_find_one():
model = TestModel({'x': 1, 'y': 1})
model.save()
assert model._id is not None
found = TestModel.collection.find_one(model._id)
assert found is not None
assert isinstance(found, TestModel)
assert found == model
@with_setup(setup, teardown)
def test_save_with_arguments():
# Manipulate is what inserts the _id on save if it is missing
model = TestModel(foo=0)
model.save(manipulate=False)
with pytest.raises(AttributeError):
model._id
# but the object was actually saved
model = TestModel.collection.find_one({'foo': 0})
assert model.foo == 0
@with_setup(setup, teardown)
def test_mongo_update():
"""Test update. note that update does not sync back the server copy."""
model = TestModel(counter=10, x=0, y=1)
model.save()
# NOTE: These tests below could be thought of outlining existing
# edge-case behavior (i.e. they're bugs) and they should be fixed and
# the behavior made more correct/consistent.
# Update will not delete missing attributes, so at this point our
# local copy is out of sync with what's on the server.
model.y = 1
del model.x
model.update()
assert model.get('x', 'foo') == 'foo'
# $inc changes the server, not the local copy.
model.mongo_update({'$inc': {'counter': 1}})
assert model.counter == 10
# reload the model. This will pull in the "true" document from the server.
model = TestModel.collection.find_one({'_id': model._id})
assert model.counter == 11
assert model.x == 0
assert model.y == 1
@with_setup(setup, teardown)
def test_load():
"""Partial loading of documents.x"""
# object_a and object_b are 2 instances of the same document
object_a = TestModel(x=0, y=1).save()
object_b = TestModel(_id=object_a._id)
with pytest.raises(AttributeError):
object_b.x
# Partial load. only the x value
object_b.load(fields={'x': 1})
assert object_b.x == object_a.x
with pytest.raises(AttributeError):
object_b.y
# Complete load. change the value first
object_a.x = 2
object_a.save()
object_b.load()
assert object_b.x == 2
assert object_b.y == object_a.y
@with_setup(setup, teardown)
def test_load_and_field_mapper():
object_a = TestFieldMapper(x=12, y=1).save()
object_b = TestFieldMapper(_id=object_a._id)
# X got mapped (multiplied by 4/3 and converted to object_a float)
assert object_a.x == 16.0
assert object_a.y == 1
object_b.load(fields={'x': 1})
assert object_b.x == 16.0
with pytest.raises(AttributeError):
object_b.y # object_b does not have the 'y' field
object_b.load()
assert object_b.y == 1
@with_setup(setup, teardown)
def test_index_existance():
'''Test that indexes were created properly.'''
indices = TestModel.collection.index_information()
# Even though PyMongo documents that indices should not contain
# "ns", the seem to do in practice.
assert "x_1" in indices
assert indices["x_1"]["key"] == [("x", 1)]
@pytest.mark.xfail(reason="drop_dups is unsupported since MongoDB 2.7.5")
def test_unique_index():
'''Test behavior of indices with unique=True'''
# This will work (y is undefined)
TestModelUnique({'x': 1}).save()
TestModelUnique({'x': 1}).save()
# Assert that there's only one object in the collection, even though
# we inserted two. The uniqueness constraint on the index has dropped
# one of the inserts (silently, I guess).
assert TestModelUnique.collection.find().count() == 1
# Even if we use different values for y, it's still only one object:
TestModelUnique({'x': 2, 'y': 1}).save()
TestModelUnique({'x': 2, 'y': 2}).save()
# There are now 2 objects, one with x=1, one with x=2.
assert TestModelUnique.collection.find().count() == 2
@with_setup(setup, teardown)
def test_unique_constraint():
x1_a = TestModelUnique({'x': 1, 'y': 1})
x1_b = TestModelUnique({'x': 1, 'y': 2})
x1_a.save(safe=True)
with pytest.raises(DuplicateKeyError):
x1_b.save(safe=True)
x1_c = TestModelUnique({'x': 2, 'y': 1})
x1_c.save()
@with_setup(setup, teardown)
def test_queries():
'''Test some more complex query forms.'''
object_a = TestModel({'x': 1, 'y': 1}).save()
object_b = TestModel({'x': 1, 'y': 2}).save()
object_c = TestModel({'x': 2, 'y': 2}).save()
object_d = TestModel({'x': 2, 'y': 1}).save()
found_x1 = TestModel.collection.find({'x': 1})
found_y1 = TestModel.collection.find({'y': 1})
found_x2y2 = TestModel.collection.find({'x': 2, 'y': 2})
list_x1 = list(found_x1)
list_y1 = list(found_y1)
list_x2y2 = list(found_x2y2)
# make sure the types of the things coming back from find() are the
# derived Model types, not just a straight dict.
assert isinstance(list_x1[0], TestModel)
assert object_a in list_x1
assert object_b in list_x1
assert object_a in list_y1
assert object_d in list_y1
assert object_c == list_x2y2[0]
@with_setup(setup, teardown)
def test_deletion():
'''Test deleting an object from a collection.'''
object_a = TestModel()
object_a.x = 100
object_a.y = 200
object_a.save()
object_b = TestModel.collection.find({'x': 100})
assert object_b.count() == 1
object_b[0].remove()
object_a = TestModel.collection.find({'x': 100})
assert object_a.count() == 0
@with_setup(setup, teardown)
def test_complex_types():
'''Test lists as types.'''
object_a = TestModel()
object_a.l = ['a', 'b', 'c']
object_a.x = 1
object_a.y = {'m': 'n',
'o': 'p'}
object_a['z'] = {'q': 'r',
's': {'t': ''}}
object_a.save()
object_b = TestModel.collection.find_one({'x': 1})
# Make sure the internal lists are equivalent.
assert object_a.l == object_b.l
# Make sure that everything is of the right type, including the types of
# the nested fields that we read back from the DB, and that we are able
# to access fields as both attrs and items.
assert type(object_a) == type(object_b) == TestModel
assert isinstance(object_a.y, dict)
assert isinstance(object_b.y, dict)
assert isinstance(object_a['z'], dict)
assert isinstance(object_b['z'], dict)
assert isinstance(object_a.z, dict)
assert isinstance(object_b.z, dict)
# These nested fields are actually instances of AttrDict, which is why
# we can access as both attributes and values. Thus, the "isinstance"
# dict check.
assert isinstance(object_a['z']['s'], dict)
assert isinstance(object_b['z']['s'], dict)
assert isinstance(object_a.z.s, dict)
assert isinstance(object_b.z.s, dict)
assert object_a == object_b
@with_setup(setup, teardown)
def test_type_from_cursor():
for i in range(6):
TestModel({'x': i}).save()
objects = TestModel.collection.find()
for single_object in objects:
assert type(single_object) == TestModel
# Make sure it's both a dict and a TestModel, which is also an object
assert isinstance(single_object, dict)
assert isinstance(single_object, object)
assert isinstance(single_object, TestModel)
assert isinstance(single_object['x'], int)
@with_setup(setup, teardown)
def test_delete_field():
'''Test deleting a single field from an object.'''
object_a = TestModel({'x': 1, 'y': 2})
object_a.save()
del object_a.x
object_a.save()
assert TestModel.collection.find_one({'y': 2}) == {
'y': 2, '_id': object_a._id
}
@with_setup(setup, teardown)
def test_count_and_fetch():
'''Test counting methods on Cursors. '''
object_d = TestModel({'x': 1, 'y': 4}).save()
object_b = TestModel({'x': 1, 'y': 2}).save()
object_a = TestModel({'x': 1, 'y': 1}).save()
object_c = TestModel({'x': 1, 'y': 3}).save()
find_x1 = TestModel.collection.find({'x': 1}).sort('y')
assert find_x1.count() == 4
list_x1 = list(find_x1)
assert list_x1[0] == object_a
assert list_x1[1] == object_b
assert list_x1[2] == object_c
assert list_x1[3] == object_d
@with_setup(setup, teardown)
def test_fetch_and_limit():
'''Test counting methods on Cursors. '''
object_a = TestModel({'x': 1, 'y': 1}).save()
object_b = TestModel({'x': 1, 'y': 2}).save()
TestModel({'x': 1, 'y': 4}).save()
TestModel({'x': 1, 'y': 3}).save()
find_x1 = TestModel.collection.find({'x': 1}).limit(2).sort('y')
assert find_x1.count(with_limit_and_skip=True) == 2
assert object_a in find_x1
assert object_b in find_x1
@with_setup(setup, teardown)
def test_dbref():
'''Test generation of DBRef objects, and querying via DBRef
objects.'''
object_a = TestModel({'x': 1, 'y': 999}).save()
ref_a = object_a.dbref()
object_b = TestModel.collection.from_dbref(ref_a)
assert object_a == object_b
# Making sure, that a ValueError is raised for DBRefs from a
# 'foreign' collection or database.
with pytest.raises(ValueError):
ref_a = DBRef('foo', ref_a.id)
TestModel.collection.from_dbref(ref_a)
with pytest.raises(ValueError):
ref_a = DBRef(ref_a.collection, ref_a.id, 'foo')
TestModel.collection.from_dbref(ref_a)
# Testing ``with_database`` option.
ref_a = object_a.dbref(with_database=False)
assert ref_a.database is None
ref_a = object_a.dbref(with_database=True)
assert ref_a.database is not None
ref_a = object_a.dbref() # True by default.
assert ref_a.database is not None
# Testing additional fields
ref_a = object_a.dbref(name="foo")
assert ref_a.name == 'foo'
def test_db_and_collection_names():
'''Test the methods that return the current class's DB and
Collection names.'''
object_a = TestModel({'x': 1})
assert object_a.database.name == 'minimongo_test'
assert TestModel.database.name == 'minimongo_test'
assert object_a.collection.name == 'minimongo_test'
assert TestModel.collection.name == 'minimongo_test'
def test_derived():
'''Test Models that are derived from other models.'''
derived_object = TestDerivedModel()
derived_object.a_method()
assert derived_object.database.name == 'minimongo_test'
assert derived_object.collection.name == 'minimongo_derived'
assert TestDerivedModel.collection.find_one({'x': 123}) == derived_object
def test_collection_class():
model = TestModelCollection()
assert isinstance(model.collection, TestCollection)
assert hasattr(model.collection, 'custom')
assert model.collection.custom() == 'It works!'
def test_str():
assert str(TestModel()) == 'TestModel({})'
assert str(TestModel({'foo': 'bar'})) == 'TestModel({\'foo\': \'bar\'})'
assert str(TestModel({'foo': 'bar'})) == 'TestModel({\'foo\': \'bar\'})'
def test_auto_collection_name():
try:
class SomeModel(Model):
class Meta:
database = 'minimongo_test'
except Exception:
pytest.fail('`collection_name` should\'ve been constructed.')
assert SomeModel.collection.name == 'some_model'
def test_no_auto_index():
TestNoAutoIndexModel({'x': 1}).save()
indices = TestNoAutoIndexModel.collection.index_information()
assert indices["_id_"]["key"] == [("_id", 1)]
TestNoAutoIndexModel.auto_index()
indices = TestNoAutoIndexModel.collection.index_information()
assert indices["_id_"]["key"] == [("_id", 1)]
assert indices["x_1"]["key"] == [("x", 1)]
def test_interface_models():
test_interface_instance = TestModelInterface()
test_interface_instance.x = 5
with pytest.raises(Exception):
test_interface_instance.save()
test_model_instance = TestModelImplementation()
test_model_instance.x = 123
test_model_instance.save()
test_model_instance_2 = TestModelImplementation.collection.find_one(
{'x': 123})
assert test_model_instance == test_model_instance_2
def test_field_mapper():
test_mapped_object = TestFieldMapper()
# x is going to be multiplied by 4/3 automatically.
test_mapped_object.x = 6
test_mapped_object.y = 7
test_mapped_object.z = 6.0
assert test_mapped_object.x == 8.0
assert test_mapped_object.y == 7
assert test_mapped_object.z == 6.0
assert type(test_mapped_object.x) == float
assert type(test_mapped_object.y) == int
assert type(test_mapped_object.z) == float
test_mapped_object.save()
loaded_mapped_object = TestFieldMapper.collection.find_one()
# When the object was loaded from the database, the mapper automatically
# multiplied every integer field by 4.0/3.0 and converted it to a float.
# This is a crazy use case only used for testing here.
assert test_mapped_object.x == 8.0
assert test_mapped_object.y == 7
assert test_mapped_object.z == 6.0
assert type(loaded_mapped_object.x) == float
assert type(test_mapped_object.x) == float
assert type(loaded_mapped_object.y) == int
assert type(loaded_mapped_object.z) == float
def test_slicing():
object_a = TestModel({'x': 1}).save()
object_b = TestModel({'x': 2}).save()
object_c = TestModel({'x': 3}).save()
object_d = TestModel({'x': 4}).save()
object_e = TestModel({'x': 5}).save()
objects = TestModel.collection.find().sort('x')
obj_list = list(objects[:2])
assert obj_list == [object_a, object_b]
assert type(obj_list[0]) == TestModel
assert type(obj_list[1]) == TestModel
# We can't re-slice an already sliced cursor, so we query again.
objects = TestModel.collection.find().sort('x')
obj_list = list(objects[2:])
assert obj_list == [object_c, object_d, object_e]
assert type(obj_list[0] == TestModel)
assert type(obj_list[1] == TestModel)
assert type(obj_list[2] == TestModel)
|
|
# -*- coding: utf-8 -*-
'''
magento.api
Generic API for magento
:license: BSD, see LICENSE for more details
'''
import sys
from threading import RLock
PROTOCOLS = []
try:
if sys.version_info <= (2,):
from xmlrpclib import ServerProxy
else:
from xmlrpc.client import ServerProxy
except ImportError:
pass
else:
PROTOCOLS.append('xmlrpc')
try:
from suds.client import Client
except ImportError:
pass
else:
PROTOCOLS.append('soap')
from . import rest
try:
import requests
import json
except ImportError:
pass
else:
PROTOCOLS.append('rest')
from .utils import expand_url, camel_2_snake
class ClientApiMeta(type):
"""
A Metaclass that automatically injects objects that inherit from API
as properties.
"""
def __new__(meta, name, bases, dct):
abstract = dct.get('__abstract__', False)
Klass = super(ClientApiMeta, meta).__new__(meta, name, bases, dct)
if not abstract:
setattr(
API, camel_2_snake(name),
property(lambda self: self.get_instance_of(Klass))
)
return Klass
class API(object):
"""
Generic API to connect to magento
"""
__metaclass__ = ClientApiMeta
__abstract__ = True
def __init__(self, url, username, password,
version='1.3.2.4', full_url=False,
protocol='xmlrpc', transport=None,
verify_ssl=True):
"""
This is the Base API class which other APIs have to subclass. By
default the inherited classes also get the properties of this
class which will allow the use of the API with the `with` statement
A typical example to extend the API for your subclass is given below::
from magento.api import API
class Core(API):
def websites(self):
return self.call('ol_websites.list', [])
def stores(self):
return self.call('ol_groups.list', [])
def store_views(self):
return self.call('ol_storeviews.list', [])
The above real life example extends the API for the custom API
implementation for the magento extension
magento-community/Openlabs_OpenERPConnector
Example usage ::
from magento.api import API
with API(url, username, password) as magento_api:
return magento_api.call('customer.list', [])
.. note:: Python with statement has to be imported from __future__
in older versions of python. *from __future__ import with_statement*
If you want to use the API as a normal class, then you have to manually
end the session. A typical example is below::
from magento.api import API
api = API(url, username, password)
api.connect()
try:
return api.call('customer.list', [])
finally:
api.client.endSession(api.session)
:param url: URL to the magento instance.
By default the URL is treated as a base url
of the domain to which the api part of the URL
is added. If you want to specify the complete
URL, set the full_url flag as True.
:param username: API username of the Web services user. Note
that this is NOT magento admin username
:param password: API password of the Web services user.
:param version: The version of magento the connection is being made to.
It is recommended to specify this as there could be
API specific changes in certain calls. Default value is
1.3.2.4
:param full_url: If set to true, then the `url` is expected to
be a complete URL
:param protocol: 'xmlrpc' and 'soap' are valid values
:param transport: optional xmlrpclib.Transport subclass for
use in xmlrpc requests
:param verify_ssl: for REST API, skip SSL validation if False
"""
assert protocol \
in PROTOCOLS, "protocol must be %s" % ' OR '.join(PROTOCOLS)
self.url = str(full_url and url or expand_url(url, protocol))
self.username = username
self.password = password
self.protocol = protocol
self.version = version
self.transport = transport
self.session = None
self.client = None
self.verify_ssl = verify_ssl
self.lock = RLock()
def connect(self):
"""
Connects to the service
but does not login. This could be used as a connection test
"""
if self.protocol == 'xmlrpc':
if self.transport:
self.client = ServerProxy(
self.url, allow_none=True, transport=self.transport)
else:
self.client = ServerProxy(self.url, allow_none=True)
elif self.protocol == 'rest':
# Use an authentication token as the password
self.client = rest.Client(self.url, self.password,
verify_ssl=self.verify_ssl)
else:
self.client = Client(self.url)
def __enter__(self):
"""
Entry point for with statement
Logs in and creates a session
"""
if self.client is None:
self.connect()
if self.protocol == 'xmlrpc':
self.session = self.client.login(
self.username, self.password)
elif self.protocol == 'rest':
self.session = True
else:
self.session = self.client.service.login(
self.username, self.password)
return self
def __exit__(self, type, value, traceback):
"""
Exit point
Closes session with magento
"""
if self.protocol == 'xmlrpc':
self.client.endSession(self.session)
elif self.protocol == 'soap':
self.client.service.endSession(self.session)
self.session = None
def call(self, resource_path, arguments):
"""
Proxy for SOAP call API
"""
if self.protocol == 'xmlrpc':
return self.client.call(self.session, resource_path, arguments)
elif self.protocol == 'rest':
return self.client.call(resource_path, arguments)
else:
return self.client.service.call(
self.session, resource_path, arguments)
def multiCall(self, calls):
"""
Proxy for multicalls
"""
if self.protocol == 'xmlrpc':
return self.client.multiCall(self.session, calls)
else:
return self.client.service.multiCall(self.session, calls)
_missing = []
def get_instance_of(self, Klass):
"""
Return an instance of the client API with the same auth credentials
that the API server was instanciated with. The created instance is
cached, so subsequent requests get an already existing instance.
:param Klass: The klass for which the instance has to be created.
"""
with self.lock:
value = self.__dict__.get(Klass.__name__, self._missing)
if value is self._missing:
value = Klass(
self.url,
self.username,
self.password,
self.version,
True,
self.protocol,
)
self.__dict__[Klass.__name__] = value.__enter__()
return value
|
|
"""Materialized Path Trees"""
import sys
import operator
if sys.version_info >= (3, 0):
from functools import reduce
from django.core import serializers
from django.db import models, transaction, connection
from django.db.models import F, Q
from django.utils.translation import ugettext_noop as _
from treebeard.numconv import NumConv
from treebeard.models import Node
from treebeard.exceptions import InvalidMoveToDescendant, PathOverflow
class MP_NodeQuerySet(models.query.QuerySet):
"""
Custom queryset for the tree node manager.
Needed only for the customized delete method.
"""
def delete(self):
"""
Custom delete method, will remove all descendant nodes to ensure a
consistent tree (no orphans)
:returns: ``None``
"""
# we'll have to manually run through all the nodes that are going
# to be deleted and remove nodes from the list if an ancestor is
# already getting removed, since that would be redundant
removed = {}
for node in self.order_by('depth', 'path'):
found = False
for depth in range(1, int(len(node.path) / node.steplen)):
path = node._get_basepath(node.path, depth)
if path in removed:
# we are already removing a parent of this node
# skip
found = True
break
if not found:
removed[node.path] = node
# ok, got the minimal list of nodes to remove...
# we must also remove their children
# and update every parent node's numchild attribute
# LOTS OF FUN HERE!
parents = {}
toremove = []
for path, node in removed.items():
parentpath = node._get_basepath(node.path, node.depth - 1)
if parentpath:
if parentpath not in parents:
parents[parentpath] = node.get_parent(True)
parent = parents[parentpath]
if parent and parent.numchild > 0:
parent.numchild -= 1
parent.save()
if not node.is_leaf():
toremove.append(Q(path__startswith=node.path))
else:
toremove.append(Q(path=node.path))
# Django will handle this as a SELECT and then a DELETE of
# ids, and will deal with removing related objects
if toremove:
qset = self.model.objects.filter(reduce(operator.or_, toremove))
super(MP_NodeQuerySet, qset).delete()
transaction.commit_unless_managed()
class MP_NodeManager(models.Manager):
"""Custom manager for nodes."""
def get_query_set(self):
"""Sets the custom queryset as the default."""
return MP_NodeQuerySet(self.model).order_by('path')
class MP_Node(Node):
"""Abstract model to create your own Materialized Path Trees."""
steplen = 4
alphabet = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
node_order_by = []
path = models.CharField(max_length=255, unique=True)
depth = models.PositiveIntegerField()
numchild = models.PositiveIntegerField(default=0)
objects = MP_NodeManager()
numconv_obj_ = None
@classmethod
def _int2str(cls, num):
return cls.numconv_obj().int2str(num)
@classmethod
def _str2int(cls, num):
return cls.numconv_obj().str2int(num)
@classmethod
def numconv_obj(cls):
if cls.numconv_obj_ is None:
cls.numconv_obj_ = NumConv(len(cls.alphabet), cls.alphabet)
return cls.numconv_obj_
@classmethod
def add_root(cls, **kwargs):
"""
Adds a root node to the tree.
:raise PathOverflow: when no more root objects can be added
"""
# do we have a root node already?
last_root = cls.get_last_root_node()
if last_root and last_root.node_order_by:
# there are root nodes and node_order_by has been set
# delegate sorted insertion to add_sibling
return last_root.add_sibling('sorted-sibling', **kwargs)
if last_root:
# adding the new root node as the last one
newpath = cls._inc_path(last_root.path)
else:
# adding the first root node
newpath = cls._get_path(None, 1, 1)
# creating the new object
newobj = cls(**kwargs)
newobj.depth = 1
newobj.path = newpath
# saving the instance before returning it
newobj.save()
transaction.commit_unless_managed()
return newobj
@classmethod
def dump_bulk(cls, parent=None, keep_ids=True):
"""Dumps a tree branch to a python data structure."""
# Because of fix_tree, this method assumes that the depth
# and numchild properties in the nodes can be incorrect,
# so no helper methods are used
qset = cls._get_serializable_model().objects.all()
if parent:
qset = qset.filter(path__startswith=parent.path)
ret, lnk = [], {}
for pyobj in serializers.serialize('python', qset):
# django's serializer stores the attributes in 'fields'
fields = pyobj['fields']
path = fields['path']
depth = int(len(path) / cls.steplen)
# this will be useless in load_bulk
del fields['depth']
del fields['path']
del fields['numchild']
if 'id' in fields:
# this happens immediately after a load_bulk
del fields['id']
newobj = {'data': fields}
if keep_ids:
newobj['id'] = pyobj['pk']
if (not parent and depth == 1) or\
(parent and len(path) == len(parent.path)):
ret.append(newobj)
else:
parentpath = cls._get_basepath(path, depth - 1)
parentobj = lnk[parentpath]
if 'children' not in parentobj:
parentobj['children'] = []
parentobj['children'].append(newobj)
lnk[path] = newobj
return ret
@classmethod
def find_problems(cls):
"""
Checks for problems in the tree structure, problems can occur when:
1. your code breaks and you get incomplete transactions (always
use transactions!)
2. changing the ``steplen`` value in a model (you must
:meth:`dump_bulk` first, change ``steplen`` and then
:meth:`load_bulk`
:returns: A tuple of five lists:
1. a list of ids of nodes with characters not found in the
``alphabet``
2. a list of ids of nodes when a wrong ``path`` length
according to ``steplen``
3. a list of ids of orphaned nodes
4. a list of ids of nodes with the wrong depth value for
their path
5. a list of ids nodes that report a wrong number of children
"""
evil_chars, bad_steplen, orphans = [], [], []
wrong_depth, wrong_numchild = [], []
for node in cls.objects.all():
found_error = False
for char in node.path:
if char not in cls.alphabet:
evil_chars.append(node.pk)
found_error = True
break
if found_error:
continue
if len(node.path) % cls.steplen:
bad_steplen.append(node.pk)
continue
try:
node.get_parent(True)
except cls.DoesNotExist:
orphans.append(node.pk)
continue
if node.depth != int(len(node.path) / cls.steplen):
wrong_depth.append(node.pk)
continue
real_numchild = cls.objects.filter(
path__range=cls._get_children_path_interval(node.path)
).extra(
where=['LENGTH(path)/%d=%d' % (cls.steplen, node.depth + 1)]
).count()
if real_numchild != node.numchild:
wrong_numchild.append(node.pk)
continue
return evil_chars, bad_steplen, orphans, wrong_depth, wrong_numchild
@classmethod
def fix_tree(cls, destructive=False):
"""
Solves some problems that can appear when transactions are not used and
a piece of code breaks, leaving the tree in an inconsistent state.
The problems this method solves are:
1. Nodes with an incorrect ``depth`` or ``numchild`` values due to
incorrect code and lack of database transactions.
2. "Holes" in the tree. This is normal if you move/delete nodes a
lot. Holes in a tree don't affect performance,
3. Incorrect ordering of nodes when ``node_order_by`` is enabled.
Ordering is enforced on *node insertion*, so if an attribute in
``node_order_by`` is modified after the node is inserted, the
tree ordering will be inconsistent.
:param destructive:
A boolean value. If True, a more agressive fix_tree method will be
attemped. If False (the default), it will use a safe (and fast!)
fix approach, but it will only solve the ``depth`` and
``numchild`` nodes, it won't fix the tree holes or broken path
ordering.
.. warning::
Currently what the ``destructive`` method does is:
1. Backup the tree with :meth:`dump_data`
2. Remove all nodes in the tree.
3. Restore the tree with :meth:`load_data`
So, even when the primary keys of your nodes will be preserved,
this method isn't foreign-key friendly. That needs complex
in-place tree reordering, not available at the moment (hint:
patches are welcome).
"""
if destructive:
dump = cls.dump_bulk(None, True)
cls.objects.all().delete()
cls.load_bulk(dump, None, True)
else:
cursor = cls._get_database_cursor('write')
# fix the depth field
# we need the WHERE to speed up postgres
sql = "UPDATE %s "\
"SET depth=LENGTH(path)/%%s "\
"WHERE depth!=LENGTH(path)/%%s" % (
connection.ops.quote_name(cls._meta.db_table), )
vals = [cls.steplen, cls.steplen]
cursor.execute(sql, vals)
# fix the numchild field
vals = ['_' * cls.steplen]
# the cake and sql portability are a lie
if cls.get_database_vendor('read') == 'mysql':
sql = "SELECT tbn1.path, tbn1.numchild, ("\
"SELECT COUNT(1) "\
"FROM %(table)s AS tbn2 "\
"WHERE tbn2.path LIKE "\
"CONCAT(tbn1.path, %%s)) AS real_numchild "\
"FROM %(table)s AS tbn1 "\
"HAVING tbn1.numchild != real_numchild" % {
'table': connection.ops.quote_name(
cls._meta.db_table)}
else:
subquery = "(SELECT COUNT(1) FROM %(table)s AS tbn2"\
" WHERE tbn2.path LIKE tbn1.path||%%s)"
sql = ("SELECT tbn1.path, tbn1.numchild, " + subquery +
" FROM %(table)s AS tbn1 WHERE tbn1.numchild != " +
subquery)
sql = sql % {
'table': connection.ops.quote_name(cls._meta.db_table)}
# we include the subquery twice
vals *= 2
cursor.execute(sql, vals)
sql = "UPDATE %(table)s "\
"SET numchild=%%s "\
"WHERE path=%%s" % {
'table': connection.ops.quote_name(cls._meta.db_table)}
for node_data in cursor.fetchall():
vals = [node_data[2], node_data[0]]
cursor.execute(sql, vals)
transaction.commit_unless_managed()
@classmethod
def get_tree(cls, parent=None):
"""
:returns:
A *queryset* of nodes ordered as DFS, including the parent.
If no parent is given, the entire tree is returned.
"""
if parent is None:
# return the entire tree
return cls.objects.all()
if not parent.is_leaf():
return cls.objects.filter(path__startswith=parent.path,
depth__gte=parent.depth)
return cls.objects.filter(pk=parent.pk)
@classmethod
def get_root_nodes(cls):
""":returns: A queryset containing the root nodes in the tree."""
return cls.objects.filter(depth=1)
@classmethod
def get_descendants_group_count(cls, parent=None):
"""
Helper for a very common case: get a group of siblings and the number
of *descendants* in every sibling.
"""
#~
# disclaimer: this is the FOURTH implementation I wrote for this
# function. I really tried to make it return a queryset, but doing so
# with a *single* query isn't trivial with Django's ORM.
# ok, I DID manage to make Django's ORM return a queryset here,
# defining two querysets, passing one subquery in the tables parameters
# of .extra() of the second queryset, using the undocumented order_by
# feature, and using a HORRIBLE hack to avoid django quoting the
# subquery as a table, BUT (and there is always a but) the hack didn't
# survive turning the QuerySet into a ValuesQuerySet, so I just used
# good old SQL.
# NOTE: in case there is interest, the hack to avoid django quoting the
# subquery as a table, was adding the subquery to the alias cache of
# the queryset's query object:
#
# qset.query.quote_cache[subquery] = subquery
#
# If there is a better way to do this in an UNMODIFIED django 1.0, let
# me know.
#~
if parent:
depth = parent.depth + 1
params = cls._get_children_path_interval(parent.path)
extrand = 'AND path BETWEEN %s AND %s'
else:
depth = 1
params = []
extrand = ''
sql = 'SELECT * FROM %(table)s AS t1 INNER JOIN '\
' (SELECT '\
' SUBSTR(path, 1, %(subpathlen)s) AS subpath, '\
' COUNT(1)-1 AS count '\
' FROM %(table)s '\
' WHERE depth >= %(depth)s %(extrand)s'\
' GROUP BY subpath) AS t2 '\
' ON t1.path=t2.subpath '\
' ORDER BY t1.path' % {
'table': connection.ops.quote_name(cls._meta.db_table),
'subpathlen': depth * cls.steplen,
'depth': depth,
'extrand': extrand}
cursor = cls._get_database_cursor('write')
cursor.execute(sql, params)
ret = []
field_names = [field[0] for field in cursor.description]
for node_data in cursor.fetchall():
node = cls(**dict(zip(field_names, node_data[:-2])))
node.descendants_count = node_data[-1]
ret.append(node)
transaction.commit_unless_managed()
return ret
def get_depth(self):
""":returns: the depth (level) of the node"""
return self.depth
def get_siblings(self):
"""
:returns: A queryset of all the node's siblings, including the node
itself.
"""
qset = self.__class__.objects.filter(depth=self.depth)
if self.depth > 1:
# making sure the non-root nodes share a parent
parentpath = self._get_basepath(self.path, self.depth - 1)
qset = qset.filter(
path__range=self._get_children_path_interval(parentpath))
return qset
def get_children(self):
""":returns: A queryset of all the node's children"""
if self.is_leaf():
return self.__class__.objects.none()
return self.__class__.objects.filter(
depth=self.depth + 1,
path__range=self._get_children_path_interval(self.path)
)
def get_next_sibling(self):
"""
:returns: The next node's sibling, or None if it was the rightmost
sibling.
"""
try:
return self.get_siblings().filter(path__gt=self.path)[0]
except IndexError:
return None
def get_descendants(self):
"""
:returns: A queryset of all the node's descendants as DFS, doesn't
include the node itself
"""
return self.__class__.get_tree(self).exclude(pk=self.pk)
def get_prev_sibling(self):
"""
:returns: The previous node's sibling, or None if it was the leftmost
sibling.
"""
try:
return self.get_siblings().filter(path__lt=self.path).reverse()[0]
except IndexError:
return None
def get_children_count(self):
"""
:returns: The number the node's children, calculated in the most
efficient possible way.
"""
return self.numchild
def is_sibling_of(self, node):
"""
:returns: ``True`` if the node is a sibling of another node given as an
argument, else, returns ``False``
"""
aux = self.depth == node.depth
# Check non-root nodes share a parent only if they have the same depth
if aux and self.depth > 1:
# making sure the non-root nodes share a parent
parentpath = self._get_basepath(self.path, self.depth - 1)
return aux and node.path.startswith(parentpath)
return aux
def is_child_of(self, node):
"""
:returns: ``True`` is the node if a child of another node given as an
argument, else, returns ``False``
"""
return (self.path.startswith(node.path) and
self.depth == node.depth + 1)
def is_descendant_of(self, node):
"""
:returns: ``True`` if the node is a descendant of another node given
as an argument, else, returns ``False``
"""
return self.path.startswith(node.path) and self.depth > node.depth
def add_child(self, **kwargs):
"""
Adds a child to the node.
:raise PathOverflow: when no more child nodes can be added
"""
if not self.is_leaf() and self.node_order_by:
# there are child nodes and node_order_by has been set
# delegate sorted insertion to add_sibling
self.numchild += 1
return self.get_last_child().add_sibling('sorted-sibling',
**kwargs)
# creating a new object
newobj = self.__class__(**kwargs)
newobj.depth = self.depth + 1
if not self.is_leaf():
# adding the new child as the last one
newobj.path = self._inc_path(self.get_last_child().path)
else:
# the node had no children, adding the first child
newobj.path = self._get_path(self.path, newobj.depth, 1)
max_length = newobj.__class__._meta.get_field('path').max_length
if len(newobj.path) > max_length:
raise PathOverflow(
_('The new node is too deep in the tree, try'
' increasing the path.max_length property'
' and UPDATE your database'))
# saving the instance before returning it
newobj.save()
newobj._cached_parent_obj = self
self.__class__.objects.filter(path=self.path).update(numchild=F(
'numchild')+1)
# we increase the numchild value of the object in memory
self.numchild += 1
transaction.commit_unless_managed()
return newobj
def add_sibling(self, pos=None, **kwargs):
"""
Adds a new node as a sibling to the current node object.
:raise PathOverflow: when the library can't make room for the
node's new position
"""
pos = self._prepare_pos_var_for_add_sibling(pos)
# creating a new object
newobj = self.__class__(**kwargs)
newobj.depth = self.depth
if pos == 'sorted-sibling':
siblings = self.get_sorted_pos_queryset(
self.get_siblings(), newobj)
try:
newpos = self._get_lastpos_in_path(siblings.all()[0].path)
except IndexError:
newpos = None
if newpos is None:
pos = 'last-sibling'
else:
newpos, siblings = None, []
stmts = []
_, newpath = self._move_add_sibling_aux(pos, newpos,
self.depth, self, siblings,
stmts, None, False)
parentpath = self._get_basepath(newpath, self.depth - 1)
if parentpath:
stmts.append(self._get_sql_update_numchild(parentpath, 'inc'))
cursor = self._get_database_cursor('write')
for sql, vals in stmts:
cursor.execute(sql, vals)
# saving the instance before returning it
newobj.path = newpath
newobj.save()
transaction.commit_unless_managed()
return newobj
def get_root(self):
""":returns: the root node for the current node object."""
return self.__class__.objects.get(path=self.path[0:self.steplen])
def is_leaf(self):
""":returns: True if the node is a leaf node (else, returns False)"""
return self.numchild == 0
def get_ancestors(self):
"""
:returns: A queryset containing the current node object's ancestors,
starting by the root node and descending to the parent.
"""
paths = [self.path[0:pos]
for pos in range(0, len(self.path), self.steplen)[1:]]
return self.__class__.objects.filter(path__in=paths).order_by('depth')
def get_parent(self, update=False):
"""
:returns: the parent node of the current node object.
Caches the result in the object itself to help in loops.
"""
depth = int(len(self.path) / self.steplen)
if depth <= 1:
return
try:
if update:
del self._cached_parent_obj
else:
return self._cached_parent_obj
except AttributeError:
pass
parentpath = self._get_basepath(self.path, depth - 1)
self._cached_parent_obj = self.__class__.objects.get(path=parentpath)
return self._cached_parent_obj
def move(self, target, pos=None):
"""
Moves the current node and all it's descendants to a new position
relative to another node.
:raise PathOverflow: when the library can't make room for the
node's new position
"""
pos = self._prepare_pos_var_for_move(pos)
oldpath = self.path
# initialize variables and if moving to a child, updates "move to
# child" to become a "move to sibling" if possible (if it can't
# be done, it means that we are adding the first child)
(pos, target, newdepth, siblings, newpos) = (
self._fix_move_to_child(pos, target)
)
if target.is_descendant_of(self):
raise InvalidMoveToDescendant(
_("Can't move node to a descendant."))
if oldpath == target.path and (
(pos == 'left') or
(pos in ('right', 'last-sibling') and
target.path == target.get_last_sibling().path) or
(pos == 'first-sibling' and
target.path == target.get_first_sibling().path)):
# special cases, not actually moving the node so no need to UPDATE
return
if pos == 'sorted-sibling':
siblings = self.get_sorted_pos_queryset(
target.get_siblings(), self)
try:
newpos = self._get_lastpos_in_path(siblings.all()[0].path)
except IndexError:
newpos = None
if newpos is None:
pos = 'last-sibling'
stmts = []
# generate the sql that will do the actual moving of nodes
oldpath, newpath = self._move_add_sibling_aux(pos, newpos, newdepth,
target, siblings, stmts,
oldpath, True)
# updates needed for mysql and children count in parents
self._updates_after_move(oldpath, newpath, stmts)
cursor = self._get_database_cursor('write')
for sql, vals in stmts:
cursor.execute(sql, vals)
transaction.commit_unless_managed()
@classmethod
def _get_basepath(cls, path, depth):
""":returns: The base path of another path up to a given depth"""
if path:
return path[0:depth * cls.steplen]
return ''
@classmethod
def _get_path(cls, path, depth, newstep):
"""
Builds a path given some values
:param path: the base path
:param depth: the depth of the node
:param newstep: the value (integer) of the new step
"""
parentpath = cls._get_basepath(path, depth - 1)
key = cls._int2str(newstep)
return '%s%s%s' % (parentpath,
'0' * (cls.steplen - len(key)),
key)
@classmethod
def _inc_path(cls, path):
""":returns: The path of the next sibling of a given node path."""
newpos = cls._str2int(path[-cls.steplen:]) + 1
key = cls._int2str(newpos)
if len(key) > cls.steplen:
raise PathOverflow(_("Path Overflow from: '%s'" % (path, )))
return '%s%s%s' % (path[:-cls.steplen],
'0' * (cls.steplen - len(key)),
key)
@classmethod
def _get_lastpos_in_path(cls, path):
""":returns: The integer value of the last step in a path."""
return cls._str2int(path[-cls.steplen:])
@classmethod
def _get_parent_path_from_path(cls, path):
""":returns: The parent path for a given path"""
if path:
return path[0:len(path) - cls.steplen]
return ''
@classmethod
def _get_children_path_interval(cls, path):
""":returns: An interval of all possible children paths for a node."""
return (path + cls.alphabet[0] * cls.steplen,
path + cls.alphabet[-1] * cls.steplen)
@classmethod
def _move_add_sibling_aux(cls, pos, newpos, newdepth, target, siblings,
stmts, oldpath=None, movebranch=False):
"""
Handles the reordering of nodes and branches when adding/moving
nodes.
:returns: A tuple containing the old path and the new path.
"""
if (
(pos == 'last-sibling') or
(pos == 'right' and target == target.get_last_sibling())
):
# easy, the last node
last = target.get_last_sibling()
newpath = cls._inc_path(last.path)
if movebranch:
stmts.append(cls._get_sql_newpath_in_branches(oldpath,
newpath))
else:
# do the UPDATE dance
if newpos is None:
siblings = target.get_siblings()
siblings = {'left': siblings.filter(path__gte=target.path),
'right': siblings.filter(path__gt=target.path),
'first-sibling': siblings}[pos]
basenum = cls._get_lastpos_in_path(target.path)
newpos = {'first-sibling': 1,
'left': basenum,
'right': basenum + 1}[pos]
newpath = cls._get_path(target.path, newdepth, newpos)
# If the move is amongst siblings and is to the left and there
# are siblings to the right of its new position then to be on
# the safe side we temporarily dump it on the end of the list
tempnewpath = None
if movebranch and len(oldpath) == len(newpath):
parentoldpath = cls._get_basepath(
oldpath,
int(len(oldpath) / cls.steplen) - 1
)
parentnewpath = cls._get_basepath(newpath, newdepth - 1)
if (
parentoldpath == parentnewpath and
siblings and
newpath < oldpath
):
last = target.get_last_sibling()
basenum = cls._get_lastpos_in_path(last.path)
tempnewpath = cls._get_path(newpath, newdepth, basenum + 2)
stmts.append(cls._get_sql_newpath_in_branches(oldpath,
tempnewpath))
# Optimisation to only move siblings which need moving
# (i.e. if we've got holes, allow them to compress)
movesiblings = []
priorpath = newpath
for node in siblings:
# If the path of the node is already greater than the path
# of the previous node it doesn't need shifting
if node.path > priorpath:
break
# It does need shifting, so add to the list
movesiblings.append(node)
# Calculate the path that it would be moved to, as that's
# the next "priorpath"
priorpath = cls._inc_path(node.path)
movesiblings.reverse()
for node in movesiblings:
# moving the siblings (and their branches) at the right of the
# related position one step to the right
sql, vals = cls._get_sql_newpath_in_branches(node.path,
cls._inc_path(
node.path))
stmts.append((sql, vals))
if movebranch:
if oldpath.startswith(node.path):
# if moving to a parent, update oldpath since we just
# increased the path of the entire branch
oldpath = vals[0] + oldpath[len(vals[0]):]
if target.path.startswith(node.path):
# and if we moved the target, update the object
# django made for us, since the update won't do it
# maybe useful in loops
target.path = vals[0] + target.path[len(vals[0]):]
if movebranch:
# node to move
if tempnewpath:
stmts.append(cls._get_sql_newpath_in_branches(tempnewpath,
newpath))
else:
stmts.append(cls._get_sql_newpath_in_branches(oldpath,
newpath))
return oldpath, newpath
def _fix_move_to_child(self, pos, target):
"""Update preliminar vars in :meth:`move` when moving to a child"""
newdepth = target.depth
newpos = None
siblings = []
if pos in ('first-child', 'last-child', 'sorted-child'):
# moving to a child
parent = target
newdepth += 1
if target.is_leaf():
# moving as a target's first child
newpos = 1
pos = 'first-sibling'
siblings = self.__class__.objects.none()
else:
target = target.get_last_child()
pos = {'first-child': 'first-sibling',
'last-child': 'last-sibling',
'sorted-child': 'sorted-sibling'}[pos]
# this is not for save(), since if needed, will be handled with a
# custom UPDATE, this is only here to update django's object,
# should be useful in loops
parent.numchild += 1
return pos, target, newdepth, siblings, newpos
@classmethod
def _updates_after_move(cls, oldpath, newpath, stmts):
"""
Updates the list of sql statements needed after moving nodes.
1. :attr:`depth` updates *ONLY* needed by mysql databases (*sigh*)
2. update the number of children of parent nodes
"""
if (
cls.get_database_vendor('write') == 'mysql' and
len(oldpath) != len(newpath)
):
# no words can describe how dumb mysql is
# we must update the depth of the branch in a different query
stmts.append(cls._get_sql_update_depth_in_branch(newpath))
oldparentpath = cls._get_parent_path_from_path(oldpath)
newparentpath = cls._get_parent_path_from_path(newpath)
if (not oldparentpath and newparentpath) or\
(oldparentpath and not newparentpath) or\
(oldparentpath != newparentpath):
# node changed parent, updating count
if oldparentpath:
stmts.append(cls._get_sql_update_numchild(oldparentpath,
'dec'))
if newparentpath:
stmts.append(cls._get_sql_update_numchild(newparentpath,
'inc'))
@classmethod
def _get_sql_newpath_in_branches(cls, oldpath, newpath):
"""
:returns" The sql needed to move a branch to another position.
.. note::
The generated sql will only update the depth values if needed.
"""
vendor = cls.get_database_vendor('write')
sql1 = "UPDATE %s SET" % (
connection.ops.quote_name(cls._meta.db_table), )
# <3 "standard" sql
if vendor == 'sqlite':
# I know that the third argument in SUBSTR (LENGTH(path)) is
# awful, but sqlite fails without it:
# OperationalError: wrong number of arguments to function substr()
# even when the documentation says that 2 arguments are valid:
# http://www.sqlite.org/lang_corefunc.html
sqlpath = "%s||SUBSTR(path, %s, LENGTH(path))"
elif vendor == 'mysql':
# hooray for mysql ignoring standards in their default
# configuration!
# to make || work as it should, enable ansi mode
# http://dev.mysql.com/doc/refman/5.0/en/ansi-mode.html
sqlpath = "CONCAT(%s, SUBSTR(path, %s))"
else:
sqlpath = "%s||SUBSTR(path, %s)"
sql2 = ["path=%s" % (sqlpath, )]
vals = [newpath, len(oldpath) + 1]
if len(oldpath) != len(newpath) and vendor != 'mysql':
# when using mysql, this won't update the depth and it has to be
# done in another query
# doesn't even work with sql_mode='ANSI,TRADITIONAL'
# TODO: FIND OUT WHY?!?? right now I'm just blaming mysql
sql2.append("depth=LENGTH(%s)/%%s" % (sqlpath, ))
vals.extend([newpath, len(oldpath) + 1, cls.steplen])
sql3 = "WHERE path LIKE %s"
vals.extend([oldpath + '%'])
sql = '%s %s %s' % (sql1, ', '.join(sql2), sql3)
return sql, vals
@classmethod
def _get_sql_update_depth_in_branch(cls, path):
"""
:returns: The sql needed to update the depth of all the nodes in a
branch.
"""
# Right now this is only used by *sigh* mysql.
sql = "UPDATE %s SET depth=LENGTH(path)/%%s"\
" WHERE path LIKE %%s" % (
connection.ops.quote_name(cls._meta.db_table), )
vals = [cls.steplen, path + '%']
return sql, vals
@classmethod
def _get_sql_update_numchild(cls, path, incdec='inc'):
""":returns: The sql needed the numchild value of a node"""
sql = "UPDATE %s SET numchild=numchild%s1"\
" WHERE path=%%s" % (
connection.ops.quote_name(cls._meta.db_table),
{'inc': '+', 'dec': '-'}[incdec])
vals = [path]
return sql, vals
class Meta:
"""Abstract model."""
abstract = True
|
|
import os
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.views.generic import *
from corehq.apps.styleguide.example_forms import (
BasicCrispyForm,
CheckboxesForm,
)
def styleguide_default(request):
return HttpResponseRedirect(reverse(MainStyleGuideView.urlname))
class MainStyleGuideView(TemplateView):
template_name = 'styleguide/home.html'
urlname = 'styleguide_home'
class BaseStyleGuideArticleView(TemplateView):
template_name = 'styleguide/base_section.html'
@property
def sections(self):
"""This will be inserted into the page context's sections variable
as a list of strings following the format
'styleguide/_includes/<section>.html'
Make sure you create the corresponding template in the styleguide app.
:return: List of the sections in order. Usually organized by
<article>/<section_name>
"""
raise NotImplementedError("please implement 'sections'")
@property
def navigation_name(self):
"""This will be inserted into the page context under
styleguide/_includes/nav/<navigation_name>.html. Make sure
you create the corresponding template in the styleguide app
when you add this.
:return: a string that is the name of the navigation section
"""
raise NotImplementedError("please implement 'navigation_name'")
@property
def section_context(self):
return {
'sections': ['styleguide/_includes/%s.html' % s
for s in self.sections],
'navigation': ('styleguide/_includes/nav/%s.html'
% self.navigation_name),
}
@property
def page_context(self):
"""It's intended that you override this method when necessary to provide
any additional content that's relevant to the view specifically.
:return: a dict
"""
return {}
def example(self, filename):
examples = os.path.join(os.path.dirname(__file__), '..', 'templates', 'styleguide', 'examples')
with open(os.path.join(examples, filename), 'r', encoding='utf-8') as content:
return content.read()
def render_to_response(self, context, **response_kwargs):
context.update(self.section_context)
context.update(self.page_context)
return super(BaseStyleGuideArticleView, self).render_to_response(
context, **response_kwargs)
class AtomsStyleGuideView(BaseStyleGuideArticleView):
urlname = 'styleguide_atoms'
navigation_name = 'atoms'
@property
def sections(self):
return [
'atoms/intro',
'atoms/accessibility',
'atoms/typography',
'atoms/colors',
'atoms/icons',
'atoms/css',
]
@property
def page_context(self):
return {
'common_icons': [
{
'name': 'Common FontAwesome primary icons',
'icons': [
'fa-plus', 'fa-trash', 'fa-remove', 'fa-search',
'fa-angle-double-right', 'fa-angle-double-down',
],
},
{
'name': 'Common FontAwesome secondary icons',
'icons': [
'fa-cloud-download', 'fa-cloud-upload',
'fa-warning', 'fa-info-circle', 'fa-question-circle', 'fa-check',
'fa-external-link',
],
}
],
'custom_icons': [
{
'name': 'Custom HQ icons',
'icons': [
'fcc-flower', 'fcc-applications', 'fcc-commtrack', 'fcc-reports', 'fcc-data', 'fcc-users',
'fcc-settings', 'fcc-help', 'fcc-exchange', 'fcc-messaging', 'fcc-chart-report',
'fcc-form-report', 'fcc-datatable-report', 'fcc-piegraph-report', 'fcc-survey',
'fcc-casemgt', 'fcc-blankapp', 'fcc-globe', 'fcc-app-createform', 'fcc-app-updateform',
'fcc-app-completeform',
],
},
{
'name': 'Custom HQ icons specific to form builder',
'icons': [
'fcc-fd-text', 'fcc-fd-numeric', 'fcc-fd-data', 'fcc-fd-variable', 'fcc-fd-single-select',
'fcc-fd-single-circle', 'fcc-fd-multi-select', 'fcc-fd-multi-box', 'fcc-fd-decimal',
'fcc-fd-long', 'fcc-fd-datetime', 'fcc-fd-audio-capture', 'fcc-fd-android-intent',
'fcc-fd-signature', 'fcc-fd-multi-box', 'fcc-fd-single-circle', 'fcc-fd-hash',
'fcc-fd-external-case', 'fcc-fd-external-case-data', 'fcc-fd-expand', 'fcc-fd-collapse',
'fcc-fd-case-property', 'fcc-fd-edit-form',
],
},
],
'swatches': {
'RED': {
'main': ('e73c27', 'cc-att-neg-mid'),
'shades': [
('fbeae6', 'cc-att-neg-extra-hi'),
('fead9a', 'cc-att-neg-hi'),
('bf0712', 'cc-att-neg-low'),
('340101', 'cc-att-neg-extra-low'),
],
'inverse': True,
'name': 'Error, Negative Attention',
'description': '''
Use to highlight an error, something negative or a critical risk.
Use as text, highlights, banners or destructive buttons. Often called
"danger", as in <code>.btn-danger</code>.
''',
},
'YELLOW': {
'main': ('eec200', 'cc-light-warm-accent-mid'),
'shades': [
('fcf2cd', 'cc-light-warm-accent-extra-hi'),
('ffea8a', 'cc-light-warm-accent-hi'),
('9c6f19', 'cc-light-warm-accent-low'),
('573b00', 'cc-light-warm-accent-extra-low'),
],
'name': 'Attention',
'description': '''
Use for warning-level information, less severe than an error but still in need of
attention. Often called "warning", as in <code>.alert-warning</code>.
''',
},
'GREEN': {
'main': ('4aba32', 'cc-att-pos-mid'),
'shades': [
('e3f1df', 'cc-att-pos-extra-hi'),
('bbe5b3', 'cc-att-pos-hi'),
('118043', 'cc-att-pos-low'),
('173630', 'cc-att-pos-extra-low'),
],
'inverse': True,
'name': 'Success',
'description': '''
Use when an action has been completed successfully, primarily for messaging.
Rarely used for interacactive elements like buttons. Used in classes such as
<code>.alert-success</code>.
''',
},
'BLACK': {
'main': ('1c2126', 'cc-text'),
'inverse': True,
'name': 'Ink Black',
'description': "Default text color. Also used for footer.",
},
'BACKGROUND': {
'main': ('f2f2f1', 'cc-bg'),
'name': 'Background',
'description': '''
Used for backgrounds that are light but distinct from the default white background,
such as panel headers.
''',
},
'ACTION': {
'main': ('5c6ac5', 'call-to-action-mid'),
'shades': [
('f4f5fa', 'call-to-action-extra-hi'),
('b4bcf5', 'call-to-action-hi'),
('212f78', 'call-to-action-low'),
('000639', 'call-to-action-extra-low'),
],
'inverse': True,
'name': 'Call to Action',
'description': '''
Use for buttons, checkmarks, radio buttons or actionable primary icons.
Do not use for text links. Used for <code>.btn-primary</code>.
''',
},
'ACCENT_TEAL': {
'main': ('00bdc5', 'cc-light-cool-accent-mid'),
'shades': [
('ccf3f4', 'cc-light-cool-accent-hi'),
('00799a', 'cc-light-cool-accent-low'),
],
'inverse': True,
'name': 'Accent Teal',
'description': '''
Use for primary button on dark backgrounds.
Use sparingly for secondary buttons, typically buttons indicating a download or upload.
Corresponds with "info" classes like <code>.btn-info</code>.
''',
},
'SIGNUP_PURPLE': {
'main': ('43467F', 'color-purple-dark'),
'inverse': True,
'name': 'Signup Purple',
'description': "Use for banners or interactive elements in the signup and registration flow.",
},
'SIGNUP_PURPLE_INVERSE': {
'main': ('E3D0FF', 'color-purple-dark-inverse'),
'name': '',
'description': "Corresponds to signup purple."
},
'NEUTRAL': {
'main': ('685c53', 'cc-neutral-mid'),
'shades': [
('d6d6d4', 'cc-neutral-hi'),
('373534', 'cc-neutral-low'),
],
'inverse': True,
'name': 'Neutral',
'description': '''
Use for neutral visual indicators, typically borders or backgrounds.
''',
},
'BLUE': {
'main': ('004ebc', 'cc-brand-mid'),
'shades': [
('bcdeff', 'cc-brand-hi'),
('002c5f', 'cc-brand-low'),
],
'inverse': True,
'name': 'Link, Selection',
'description': '''
Use for text links or to indicate that something is selected. Used in <code>.active</code>.
''',
},
'ACCENT_PURPLE': {
'main': ('9060c8', 'cc-dark-cool-accent-mid'),
'shades': [
('d6c5ea', 'cc-dark-cool-accent-hi'),
('5d3f82', 'cc-dark-cool-accent-low'),
],
'inverse': True,
'name': 'Accent Purple',
'description': '''
Avoid. Used occasionally for billing, web apps, and other unusual cases.
''',
},
'ACCENT_ORANGE': {
'main': ('ff8400', 'cc-dark-warm-accent-mid'),
'shades': [
('ffe3c2', 'cc-dark-warm-accent-hi'),
('994f00', 'cc-dark-warm-accent-low'),
],
'inverse': True,
'name': 'Accent Orange',
'description': '''
Avoid. Used occasionally for billing, web apps, and other unusual cases.
''',
},
},
}
class MoleculesStyleGuideView(BaseStyleGuideArticleView):
urlname = 'styleguide_molecules'
navigation_name = 'molecules'
@property
def sections(self):
return [
'molecules/intro',
'molecules/buttons',
'molecules/selections',
'molecules/checkboxes',
'molecules/modals',
'molecules/pagination',
'molecules/search_box',
'molecules/inline_edit',
'molecules/feedback',
]
@property
def page_context(self):
return {
'checkboxes_form': CheckboxesForm(),
'examples': {
'selections': {
'button_group': self.example('button_group.html'),
'select2': self.example('select2.html'),
'multiselect': self.example('multiselect.html'),
},
'checkbox_in_form': self.example('checkbox_in_form.html'),
'lonely_checkbox': self.example('lonely_checkbox.html'),
'modals': self.example('modals.html'),
'pagination': self.example('pagination.html'),
'search_box': self.example('search_box.html'),
'inline_edit': self.example('inline_edit.html'),
'feedback': self.example('feedback.html'),
},
}
class OrganismsStyleGuideView(BaseStyleGuideArticleView):
urlname = 'styleguide_organisms'
navigation_name = 'organisms'
@property
def sections(self):
return [
'organisms/intro',
'organisms/forms',
'organisms/tables',
]
@property
def page_context(self):
return {
'basic_crispy_form': BasicCrispyForm(),
'examples': {
'html_form': self.example('html_form.html'),
'error_form': self.example('error_form.html'),
'basic_table': self.example('basic_table.html'),
'complex_table': self.example('complex_table.html'),
},
}
class PagesStyleGuideView(BaseStyleGuideArticleView):
urlname = 'styleguide_pages'
navigation_name = 'pages'
@property
def sections(self):
return [
'pages/intro',
'pages/navigation',
'pages/class_based',
'pages/functional',
]
@property
def page_context(self):
return {
'examples': {
'header': self.example('header.html'),
'panels': self.example('panels.html'),
'tabs': self.example('tabs.html'),
},
}
|
|
import re
import unittest
from urlparse import urlsplit, urlunsplit
from xml.dom.minidom import parseString, Node
from django.conf import settings
from django.core import mail
from django.core.management import call_command
from django.core.urlresolvers import clear_url_caches
from django.db import transaction, connection
from django.http import QueryDict
from django.test import _doctest as doctest
from django.test.client import Client
from django.utils import simplejson
normalize_long_ints = lambda s: re.sub(r'(?<![\w])(\d+)L(?![\w])', '\\1', s)
normalize_decimals = lambda s: re.sub(r"Decimal\('(\d+(\.\d*)?)'\)", lambda m: "Decimal(\"%s\")" % m.groups()[0], s)
def to_list(value):
"""
Puts value into a list if it's not already one.
Returns an empty list if value is None.
"""
if value is None:
value = []
elif not isinstance(value, list):
value = [value]
return value
real_commit = transaction.commit
real_rollback = transaction.rollback
real_enter_transaction_management = transaction.enter_transaction_management
real_leave_transaction_management = transaction.leave_transaction_management
real_savepoint_commit = transaction.savepoint_commit
real_savepoint_rollback = transaction.savepoint_rollback
def nop(x=None):
return
def disable_transaction_methods():
transaction.commit = nop
transaction.rollback = nop
transaction.savepoint_commit = nop
transaction.savepoint_rollback = nop
transaction.enter_transaction_management = nop
transaction.leave_transaction_management = nop
def restore_transaction_methods():
transaction.commit = real_commit
transaction.rollback = real_rollback
transaction.savepoint_commit = real_savepoint_commit
transaction.savepoint_rollback = real_savepoint_rollback
transaction.enter_transaction_management = real_enter_transaction_management
transaction.leave_transaction_management = real_leave_transaction_management
class OutputChecker(doctest.OutputChecker):
def check_output(self, want, got, optionflags):
"The entry method for doctest output checking. Defers to a sequence of child checkers"
checks = (self.check_output_default,
self.check_output_numeric,
self.check_output_xml,
self.check_output_json)
for check in checks:
if check(want, got, optionflags):
return True
return False
def check_output_default(self, want, got, optionflags):
"The default comparator provided by doctest - not perfect, but good for most purposes"
return doctest.OutputChecker.check_output(self, want, got, optionflags)
def check_output_numeric(self, want, got, optionflags):
"""Doctest does an exact string comparison of output, which means that
some numerically equivalent values aren't equal. This check normalizes
* long integers (22L) so that they equal normal integers. (22)
* Decimals so that they are comparable, regardless of the change
made to __repr__ in Python 2.6.
"""
return doctest.OutputChecker.check_output(self,
normalize_decimals(normalize_long_ints(want)),
normalize_decimals(normalize_long_ints(got)),
optionflags)
def check_output_xml(self, want, got, optionsflags):
"""Tries to do a 'xml-comparision' of want and got. Plain string
comparision doesn't always work because, for example, attribute
ordering should not be important.
Based on http://codespeak.net/svn/lxml/trunk/src/lxml/doctestcompare.py
"""
_norm_whitespace_re = re.compile(r'[ \t\n][ \t\n]+')
def norm_whitespace(v):
return _norm_whitespace_re.sub(' ', v)
def child_text(element):
return ''.join([c.data for c in element.childNodes
if c.nodeType == Node.TEXT_NODE])
def children(element):
return [c for c in element.childNodes
if c.nodeType == Node.ELEMENT_NODE]
def norm_child_text(element):
return norm_whitespace(child_text(element))
def attrs_dict(element):
return dict(element.attributes.items())
def check_element(want_element, got_element):
if want_element.tagName != got_element.tagName:
return False
if norm_child_text(want_element) != norm_child_text(got_element):
return False
if attrs_dict(want_element) != attrs_dict(got_element):
return False
want_children = children(want_element)
got_children = children(got_element)
if len(want_children) != len(got_children):
return False
for want, got in zip(want_children, got_children):
if not check_element(want, got):
return False
return True
want, got = self._strip_quotes(want, got)
want = want.replace('\\n','\n')
got = got.replace('\\n','\n')
# If the string is not a complete xml document, we may need to add a
# root element. This allow us to compare fragments, like "<foo/><bar/>"
if not want.startswith('<?xml'):
wrapper = '<root>%s</root>'
want = wrapper % want
got = wrapper % got
# Parse the want and got strings, and compare the parsings.
try:
want_root = parseString(want).firstChild
got_root = parseString(got).firstChild
except:
return False
return check_element(want_root, got_root)
def check_output_json(self, want, got, optionsflags):
"Tries to compare want and got as if they were JSON-encoded data"
want, got = self._strip_quotes(want, got)
try:
want_json = simplejson.loads(want)
got_json = simplejson.loads(got)
except:
return False
return want_json == got_json
def _strip_quotes(self, want, got):
"""
Strip quotes of doctests output values:
>>> o = OutputChecker()
>>> o._strip_quotes("'foo'")
"foo"
>>> o._strip_quotes('"foo"')
"foo"
>>> o._strip_quotes("u'foo'")
"foo"
>>> o._strip_quotes('u"foo"')
"foo"
"""
def is_quoted_string(s):
s = s.strip()
return (len(s) >= 2
and s[0] == s[-1]
and s[0] in ('"', "'"))
def is_quoted_unicode(s):
s = s.strip()
return (len(s) >= 3
and s[0] == 'u'
and s[1] == s[-1]
and s[1] in ('"', "'"))
if is_quoted_string(want) and is_quoted_string(got):
want = want.strip()[1:-1]
got = got.strip()[1:-1]
elif is_quoted_unicode(want) and is_quoted_unicode(got):
want = want.strip()[2:-1]
got = got.strip()[2:-1]
return want, got
class DocTestRunner(doctest.DocTestRunner):
def __init__(self, *args, **kwargs):
doctest.DocTestRunner.__init__(self, *args, **kwargs)
self.optionflags = doctest.ELLIPSIS
def report_unexpected_exception(self, out, test, example, exc_info):
doctest.DocTestRunner.report_unexpected_exception(self, out, test,
example, exc_info)
# Rollback, in case of database errors. Otherwise they'd have
# side effects on other tests.
transaction.rollback_unless_managed()
class TransactionTestCase(unittest.TestCase):
def _pre_setup(self):
"""Performs any pre-test setup. This includes:
* Flushing the database.
* If the Test Case class has a 'fixtures' member, installing the
named fixtures.
* If the Test Case class has a 'urls' member, replace the
ROOT_URLCONF with it.
* Clearing the mail test outbox.
"""
self._fixture_setup()
self._urlconf_setup()
mail.outbox = []
def _fixture_setup(self):
call_command('flush', verbosity=0, interactive=False)
if hasattr(self, 'fixtures'):
# We have to use this slightly awkward syntax due to the fact
# that we're using *args and **kwargs together.
call_command('loaddata', *self.fixtures, **{'verbosity': 0})
def _urlconf_setup(self):
if hasattr(self, 'urls'):
self._old_root_urlconf = settings.ROOT_URLCONF
settings.ROOT_URLCONF = self.urls
clear_url_caches()
def __call__(self, result=None):
"""
Wrapper around default __call__ method to perform common Django test
set up. This means that user-defined Test Cases aren't required to
include a call to super().setUp().
"""
self.client = Client()
try:
self._pre_setup()
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
import sys
result.addError(self, sys.exc_info())
return
super(TransactionTestCase, self).__call__(result)
try:
self._post_teardown()
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
import sys
result.addError(self, sys.exc_info())
return
def _post_teardown(self):
""" Performs any post-test things. This includes:
* Putting back the original ROOT_URLCONF if it was changed.
"""
self._fixture_teardown()
self._urlconf_teardown()
def _fixture_teardown(self):
pass
def _urlconf_teardown(self):
if hasattr(self, '_old_root_urlconf'):
settings.ROOT_URLCONF = self._old_root_urlconf
clear_url_caches()
def assertRedirects(self, response, expected_url, status_code=302,
target_status_code=200, host=None):
"""Asserts that a response redirected to a specific URL, and that the
redirect URL can be loaded.
Note that assertRedirects won't work for external links since it uses
TestClient to do a request.
"""
self.assertEqual(response.status_code, status_code,
("Response didn't redirect as expected: Response code was %d"
" (expected %d)" % (response.status_code, status_code)))
url = response['Location']
scheme, netloc, path, query, fragment = urlsplit(url)
e_scheme, e_netloc, e_path, e_query, e_fragment = urlsplit(expected_url)
if not (e_scheme or e_netloc):
expected_url = urlunsplit(('http', host or 'testserver', e_path,
e_query, e_fragment))
self.assertEqual(url, expected_url,
"Response redirected to '%s', expected '%s'" % (url, expected_url))
# Get the redirection page, using the same client that was used
# to obtain the original response.
redirect_response = response.client.get(path, QueryDict(query))
self.assertEqual(redirect_response.status_code, target_status_code,
("Couldn't retrieve redirection page '%s': response code was %d"
" (expected %d)") %
(path, redirect_response.status_code, target_status_code))
def assertContains(self, response, text, count=None, status_code=200):
"""
Asserts that a response indicates that a page was retrieved
successfully, (i.e., the HTTP status code was as expected), and that
``text`` occurs ``count`` times in the content of the response.
If ``count`` is None, the count doesn't matter - the assertion is true
if the text occurs at least once in the response.
"""
self.assertEqual(response.status_code, status_code,
"Couldn't retrieve page: Response code was %d (expected %d)'" %
(response.status_code, status_code))
real_count = response.content.count(text)
if count is not None:
self.assertEqual(real_count, count,
"Found %d instances of '%s' in response (expected %d)" %
(real_count, text, count))
else:
self.failUnless(real_count != 0,
"Couldn't find '%s' in response" % text)
def assertNotContains(self, response, text, status_code=200):
"""
Asserts that a response indicates that a page was retrieved
successfully, (i.e., the HTTP status code was as expected), and that
``text`` doesn't occurs in the content of the response.
"""
self.assertEqual(response.status_code, status_code,
"Couldn't retrieve page: Response code was %d (expected %d)'" %
(response.status_code, status_code))
self.assertEqual(response.content.count(text), 0,
"Response should not contain '%s'" % text)
def assertFormError(self, response, form, field, errors):
"""
Asserts that a form used to render the response has a specific field
error.
"""
# Put context(s) into a list to simplify processing.
contexts = to_list(response.context)
if not contexts:
self.fail('Response did not use any contexts to render the'
' response')
# Put error(s) into a list to simplify processing.
errors = to_list(errors)
# Search all contexts for the error.
found_form = False
for i,context in enumerate(contexts):
if form not in context:
continue
found_form = True
for err in errors:
if field:
if field in context[form].errors:
field_errors = context[form].errors[field]
self.failUnless(err in field_errors,
"The field '%s' on form '%s' in"
" context %d does not contain the"
" error '%s' (actual errors: %s)" %
(field, form, i, err,
repr(field_errors)))
elif field in context[form].fields:
self.fail("The field '%s' on form '%s' in context %d"
" contains no errors" % (field, form, i))
else:
self.fail("The form '%s' in context %d does not"
" contain the field '%s'" %
(form, i, field))
else:
non_field_errors = context[form].non_field_errors()
self.failUnless(err in non_field_errors,
"The form '%s' in context %d does not contain the"
" non-field error '%s' (actual errors: %s)" %
(form, i, err, non_field_errors))
if not found_form:
self.fail("The form '%s' was not used to render the response" %
form)
def assertTemplateUsed(self, response, template_name):
"""
Asserts that the template with the provided name was used in rendering
the response.
"""
template_names = [t.name for t in to_list(response.template)]
if not template_names:
self.fail('No templates used to render the response')
self.failUnless(template_name in template_names,
(u"Template '%s' was not a template used to render the response."
u" Actual template(s) used: %s") % (template_name,
u', '.join(template_names)))
def assertTemplateNotUsed(self, response, template_name):
"""
Asserts that the template with the provided name was NOT used in
rendering the response.
"""
template_names = [t.name for t in to_list(response.template)]
self.failIf(template_name in template_names,
(u"Template '%s' was used unexpectedly in rendering the"
u" response") % template_name)
class TestCase(TransactionTestCase):
"""
Does basically the same as TransactionTestCase, but surrounds every test
with a transaction, monkey-patches the real transaction management routines to
do nothing, and rollsback the test transaction at the end of the test. You have
to use TransactionTestCase, if you need transaction management inside a test.
"""
def _fixture_setup(self):
if not settings.DATABASE_SUPPORTS_TRANSACTIONS:
return super(TestCase, self)._fixture_setup()
transaction.enter_transaction_management()
transaction.managed(True)
disable_transaction_methods()
from django.contrib.sites.models import Site
Site.objects.clear_cache()
if hasattr(self, 'fixtures'):
call_command('loaddata', *self.fixtures, **{
'verbosity': 0,
'commit': False
})
def _fixture_teardown(self):
if not settings.DATABASE_SUPPORTS_TRANSACTIONS:
return super(TestCase, self)._fixture_teardown()
restore_transaction_methods()
transaction.rollback()
transaction.leave_transaction_management()
connection.close()
|
|
##################################################################
# Code for testing the variational Multi-Stage Generative Model. #
##################################################################
from __future__ import print_function, division
# basic python
import cPickle as pickle
from PIL import Image
import numpy as np
import numpy.random as npr
from collections import OrderedDict
# theano business
import theano
import theano.tensor as T
# blocks stuff
from blocks.initialization import Constant, IsotropicGaussian, Orthogonal
from blocks.filter import VariableFilter
from blocks.graph import ComputationGraph
from blocks.roles import PARAMETER
from blocks.model import Model
from blocks.bricks import Tanh, Identity, Rectifier
from blocks.bricks.cost import BinaryCrossEntropy
from blocks.bricks.recurrent import SimpleRecurrent, LSTM
# phil's sweetness
import utils
from BlocksModels import *
from DKCode import get_adam_updates, get_adadelta_updates
from load_data import load_binarized_mnist
from HelperFuncs import construct_masked_data, shift_and_scale_into_01, \
row_shuffle, to_fX
###################################
###################################
## HELPER FUNCTIONS FOR SAMPLING ##
###################################
###################################
def scale_norm(arr):
arr = arr - arr.min()
scale = (arr.max() - arr.min())
return scale * arr
def img_grid(arr, global_scale=True):
N, height, width = arr.shape
rows = int(np.sqrt(N))
cols = int(np.sqrt(N))
if rows*cols < N:
cols = cols + 1
if rows*cols < N:
rows = rows + 1
total_height = rows * height
total_width = cols * width
if global_scale:
arr = scale_norm(arr)
I = np.zeros((total_height, total_width))
for i in xrange(N):
r = i // cols
c = i % cols
if global_scale:
this = arr[i]
else:
this = scale_norm(arr[i])
offset_y, offset_x = r*height, c*width
I[offset_y:(offset_y+height), offset_x:(offset_x+width)] = this
I = (255*I).astype(np.uint8)
return Image.fromarray(I)
###########################
###########################
## TEST MNIST IMPUTATION ##
###########################
###########################
def test_sgm_mnist(step_type='add', occ_dim=14, drop_prob=0.0, attention=False):
##########################
# Get some training data #
##########################
rng = np.random.RandomState(1234)
Xtr, Xva, Xte = load_binarized_mnist(data_path='./data/')
Xtr = np.vstack((Xtr, Xva))
Xva = Xte
#del Xte
tr_samples = Xtr.shape[0]
va_samples = Xva.shape[0]
batch_size = 200
############################################################
# Setup some parameters for the Iterative Refinement Model #
############################################################
x_dim = Xtr.shape[1]
writer_dim = 250
reader_dim = 250
dyn_dim = 250
primary_dim = 500
guide_dim = 500
z_dim = 100
n_iter = 20
dp_int = int(100.0 * drop_prob)
rnninits = {
'weights_init': IsotropicGaussian(0.01),
'biases_init': Constant(0.),
}
inits = {
'weights_init': IsotropicGaussian(0.01),
'biases_init': Constant(0.),
}
att_tag = "NA" # attention not implemented yet
# reader MLP provides input to the dynamics LSTM update
reader_mlp = MLP([Rectifier(), Rectifier(), None], \
[(x_dim + z_dim), reader_dim, reader_dim, 4*dyn_dim], \
name="reader_mlp", **inits)
# writer MLP applies changes to the generation workspace
writer_mlp = MLP([Rectifier(), Rectifier(), None], \
[(dyn_dim + z_dim), writer_dim, writer_dim, x_dim], \
name="writer_mlp", **inits)
# MLPs for computing conditionals over z
primary_policy = CondNet([Rectifier(), Rectifier()], \
[(dyn_dim + x_dim), primary_dim, primary_dim, z_dim], \
name="primary_policy", **inits)
guide_policy = CondNet([Rectifier(), Rectifier()], \
[(dyn_dim + 2*x_dim), guide_dim, guide_dim, z_dim], \
name="guide_policy", **inits)
# LSTMs for the actual LSTMs (obviously, perhaps)
shared_dynamics = BiasedLSTM(dim=dyn_dim, ig_bias=2.0, fg_bias=2.0, \
name="shared_dynamics", **rnninits)
model = SeqGenModel(
n_iter,
step_type=step_type, # step_type can be 'add' or 'jump'
reader_mlp=reader_mlp,
writer_mlp=writer_mlp,
primary_policy=primary_policy,
guide_policy=guide_policy,
shared_dynamics=shared_dynamics)
model.initialize()
# build the cost gradients, training function, samplers, etc.
model.build_model_funcs()
#model.load_model_params(f_name="TBSGM_IMP_MNIST_PARAMS_OD{}_DP{}_{}_{}.pkl".format(occ_dim, dp_int, step_type, att_tag))
################################################################
# Apply some updates, to check that they aren't totally broken #
################################################################
print("Beginning to train the model...")
out_file = open("TBSGM_IMP_MNIST_RESULTS_OD{}_DP{}_{}_{}.txt".format(occ_dim, dp_int, step_type, att_tag), 'wb')
out_file.flush()
costs = [0. for i in range(10)]
learn_rate = 0.0002
momentum = 0.5
batch_idx = np.arange(batch_size) + tr_samples
for i in range(250000):
scale = min(1.0, ((i+1) / 1000.0))
if (((i + 1) % 10000) == 0):
learn_rate = learn_rate * 0.95
if (i > 10000):
momentum = 0.90
else:
momentum = 0.50
# get the indices of training samples for this batch update
batch_idx += batch_size
if (np.max(batch_idx) >= tr_samples):
# we finished an "epoch", so we rejumble the training set
Xtr = row_shuffle(Xtr)
batch_idx = np.arange(batch_size)
# set sgd and objective function hyperparams for this update
zero_ary = np.zeros((1,))
model.lr.set_value(to_fX(zero_ary + learn_rate))
model.mom_1.set_value(to_fX(zero_ary + momentum))
model.mom_2.set_value(to_fX(zero_ary + 0.99))
# perform a minibatch update and record the cost for this batch
Xb = to_fX(Xtr.take(batch_idx, axis=0))
_, Xb, Mb = construct_masked_data(Xb, drop_prob=drop_prob, \
occ_dim=occ_dim, data_mean=None)
result = model.train_joint(Xb, Mb)
costs = [(costs[j] + result[j]) for j in range(len(result))]
if ((i % 200) == 0):
costs = [(v / 200.0) for v in costs]
str1 = "-- batch {0:d} --".format(i)
str2 = " total_cost: {0:.4f}".format(costs[0])
str3 = " nll_bound : {0:.4f}".format(costs[1])
str4 = " nll_term : {0:.4f}".format(costs[2])
str5 = " kld_q2p : {0:.4f}".format(costs[3])
str6 = " kld_p2q : {0:.4f}".format(costs[4])
str7 = " reg_term : {0:.4f}".format(costs[5])
joint_str = "\n".join([str1, str2, str3, str4, str5, str6, str7])
print(joint_str)
out_file.write(joint_str+"\n")
out_file.flush()
costs = [0.0 for v in costs]
if ((i % 1000) == 0):
model.save_model_params("TBSGM_IMP_MNIST_PARAMS_OD{}_DP{}_{}_{}.pkl".format(occ_dim, dp_int, step_type, att_tag))
# compute a small-sample estimate of NLL bound on validation set
Xva = row_shuffle(Xva)
Xb = to_fX(Xva[:5000])
_, Xb, Mb = construct_masked_data(Xb, drop_prob=drop_prob, \
occ_dim=occ_dim, data_mean=None)
va_costs = model.compute_nll_bound(Xb, Mb)
str1 = " va_nll_bound : {}".format(va_costs[1])
str2 = " va_nll_term : {}".format(va_costs[2])
str3 = " va_kld_q2p : {}".format(va_costs[3])
joint_str = "\n".join([str1, str2, str3])
print(joint_str)
out_file.write(joint_str+"\n")
out_file.flush()
# draw some independent samples from the model
Xb = to_fX(Xva[:100])
_, Xb, Mb = construct_masked_data(Xb, drop_prob=drop_prob, \
occ_dim=occ_dim, data_mean=None)
samples, _ = model.do_sample(Xb, Mb)
n_iter, N, D = samples.shape
samples = samples.reshape( (n_iter, N, 28, 28) )
for j in xrange(n_iter):
img = img_grid(samples[j,:,:,:])
img.save("TBSGM-IMP-MNIST-OD{0:d}-DP{1:d}-{2:s}-samples-{3:03d}.png".format(occ_dim, dp_int, step_type, j))
if __name__=="__main__":
test_sgm_mnist(step_type='add', occ_dim=0, drop_prob=0.85)
|
|
# Copyright (c) 2013 Dell Inc.
# Copyright 2013 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Volume driver for Dell EqualLogic Storage."""
import functools
import random
import eventlet
from eventlet import greenthread
import greenlet
from oslo.config import cfg
from cinder import exception
from cinder.i18n import _
from cinder.openstack.common import excutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils
from cinder import ssh_utils
from cinder import utils
from cinder.volume.drivers.san import SanISCSIDriver
LOG = logging.getLogger(__name__)
eqlx_opts = [
cfg.StrOpt('eqlx_group_name',
default='group-0',
help='Group name to use for creating volumes'),
cfg.IntOpt('eqlx_cli_timeout',
default=30,
help='Timeout for the Group Manager cli command execution'),
cfg.IntOpt('eqlx_cli_max_retries',
default=5,
help='Maximum retry count for reconnection'),
cfg.BoolOpt('eqlx_use_chap',
default=False,
help='Use CHAP authentication for targets?'),
cfg.StrOpt('eqlx_chap_login',
default='admin',
help='Existing CHAP account name'),
cfg.StrOpt('eqlx_chap_password',
default='password',
help='Password for specified CHAP account name',
secret=True),
cfg.StrOpt('eqlx_pool',
default='default',
help='Pool in which volumes will be created')
]
CONF = cfg.CONF
CONF.register_opts(eqlx_opts)
def with_timeout(f):
@functools.wraps(f)
def __inner(self, *args, **kwargs):
timeout = kwargs.pop('timeout', None)
gt = eventlet.spawn(f, self, *args, **kwargs)
if timeout is None:
return gt.wait()
else:
kill_thread = eventlet.spawn_after(timeout, gt.kill)
try:
res = gt.wait()
except greenlet.GreenletExit:
raise exception.VolumeBackendAPIException(
data="Command timed out")
else:
kill_thread.cancel()
return res
return __inner
class DellEQLSanISCSIDriver(SanISCSIDriver):
"""Implements commands for Dell EqualLogic SAN ISCSI management.
To enable the driver add the following line to the cinder configuration:
volume_driver=cinder.volume.drivers.eqlx.DellEQLSanISCSIDriver
Driver's prerequisites are:
- a separate volume group set up and running on the SAN
- SSH access to the SAN
- a special user must be created which must be able to
- create/delete volumes and snapshots;
- clone snapshots into volumes;
- modify volume access records;
The access credentials to the SAN are provided by means of the following
flags
san_ip=<ip_address>
san_login=<user name>
san_password=<user password>
san_private_key=<file containing SSH private key>
Thin provision of volumes is enabled by default, to disable it use:
san_thin_provision=false
In order to use target CHAP authentication (which is disabled by default)
SAN administrator must create a local CHAP user and specify the following
flags for the driver:
eqlx_use_chap=true
eqlx_chap_login=<chap_login>
eqlx_chap_password=<chap_password>
eqlx_group_name parameter actually represents the CLI prompt message
without '>' ending. E.g. if prompt looks like 'group-0>', then the
parameter must be set to 'group-0'
Also, the default CLI command execution timeout is 30 secs. Adjustable by
eqlx_cli_timeout=<seconds>
"""
VERSION = "1.0.0"
def __init__(self, *args, **kwargs):
super(DellEQLSanISCSIDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(eqlx_opts)
self._group_ip = None
self.sshpool = None
def _get_output(self, chan):
out = ''
ending = '%s> ' % self.configuration.eqlx_group_name
while not out.endswith(ending):
out += chan.recv(102400)
LOG.debug("CLI output\n%s", out)
return out.splitlines()
def _get_prefixed_value(self, lines, prefix):
for line in lines:
if line.startswith(prefix):
return line[len(prefix):]
return
@with_timeout
def _ssh_execute(self, ssh, command, *arg, **kwargs):
transport = ssh.get_transport()
chan = transport.open_session()
chan.invoke_shell()
LOG.debug("Reading CLI MOTD")
self._get_output(chan)
cmd = 'stty columns 255'
LOG.debug("Setting CLI terminal width: '%s'", cmd)
chan.send(cmd + '\r')
out = self._get_output(chan)
LOG.debug("Sending CLI command: '%s'", command)
chan.send(command + '\r')
out = self._get_output(chan)
chan.close()
if any(line.startswith(('% Error', 'Error:')) for line in out):
desc = _("Error executing EQL command")
cmdout = '\n'.join(out)
LOG.error(cmdout)
raise processutils.ProcessExecutionError(
stdout=cmdout, cmd=command, description=desc)
return out
def _run_ssh(self, cmd_list, attempts=1):
utils.check_ssh_injection(cmd_list)
command = ' '. join(cmd_list)
if not self.sshpool:
password = self.configuration.san_password
privatekey = self.configuration.san_private_key
min_size = self.configuration.ssh_min_pool_conn
max_size = self.configuration.ssh_max_pool_conn
self.sshpool = ssh_utils.SSHPool(
self.configuration.san_ip,
self.configuration.san_ssh_port,
self.configuration.ssh_conn_timeout,
self.configuration.san_login,
password=password,
privatekey=privatekey,
min_size=min_size,
max_size=max_size)
try:
total_attempts = attempts
with self.sshpool.item() as ssh:
while attempts > 0:
attempts -= 1
try:
LOG.info(_('EQL-driver: executing "%s"') % command)
return self._ssh_execute(
ssh, command,
timeout=self.configuration.eqlx_cli_timeout)
except processutils.ProcessExecutionError:
raise
except Exception as e:
LOG.exception(e)
greenthread.sleep(random.randint(20, 500) / 100.0)
msg = (_("SSH Command failed after '%(total_attempts)r' "
"attempts : '%(command)s'") %
{'total_attempts': total_attempts, 'command': command})
raise exception.VolumeBackendAPIException(data=msg)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_("Error running SSH command: %s") % command)
def _eql_execute(self, *args, **kwargs):
return self._run_ssh(
args, attempts=self.configuration.eqlx_cli_max_retries)
def _get_volume_data(self, lines):
prefix = 'iSCSI target name is '
target_name = self._get_prefixed_value(lines, prefix)[:-1]
lun_id = "%s:%s,1 %s 0" % (self._group_ip, '3260', target_name)
model_update = {}
model_update['provider_location'] = lun_id
if self.configuration.eqlx_use_chap:
model_update['provider_auth'] = 'CHAP %s %s' % \
(self.configuration.eqlx_chap_login,
self.configuration.eqlx_chap_password)
return model_update
def _get_space_in_gb(self, val):
scale = 1.0
part = 'GB'
if val.endswith('MB'):
scale = 1.0 / 1024
part = 'MB'
elif val.endswith('TB'):
scale = 1.0 * 1024
part = 'TB'
return scale * float(val.partition(part)[0])
def _update_volume_stats(self):
"""Retrieve stats info from eqlx group."""
LOG.debug("Updating volume stats")
data = {}
backend_name = "eqlx"
if self.configuration:
backend_name = self.configuration.safe_get('volume_backend_name')
data["volume_backend_name"] = backend_name or 'eqlx'
data["vendor_name"] = 'Dell'
data["driver_version"] = self.VERSION
data["storage_protocol"] = 'iSCSI'
data['reserved_percentage'] = 0
data['QoS_support'] = False
data['total_capacity_gb'] = 'infinite'
data['free_capacity_gb'] = 'infinite'
for line in self._eql_execute('pool', 'select',
self.configuration.eqlx_pool, 'show'):
if line.startswith('TotalCapacity:'):
out_tup = line.rstrip().partition(' ')
data['total_capacity_gb'] = self._get_space_in_gb(out_tup[-1])
if line.startswith('FreeSpace:'):
out_tup = line.rstrip().partition(' ')
data['free_capacity_gb'] = self._get_space_in_gb(out_tup[-1])
self._stats = data
def _check_volume(self, volume):
"""Check if the volume exists on the Array."""
command = ['volume', 'select', volume['name'], 'show']
try:
self._eql_execute(*command)
except processutils.ProcessExecutionError as err:
with excutils.save_and_reraise_exception():
if err.stdout.find('does not exist.\n') > -1:
LOG.debug('Volume %s does not exist, '
'it may have already been deleted',
volume['name'])
raise exception.VolumeNotFound(volume_id=volume['id'])
def _parse_connection(self, connector, out):
"""Returns the correct connection id for the initiator.
This parses the cli output from the command
'volume select <volumename> access show'
and returns the correct connection id.
"""
lines = [line for line in out if line != '']
#Every record has 2 lines
for i in xrange(0, len(lines), 2):
try:
int(lines[i][0])
#sanity check
if len(lines[i + 1].split()) == 1:
check = lines[i].split()[1] + lines[i + 1].strip()
if connector['initiator'] == check:
return lines[i].split()[0]
except (IndexError, ValueError):
pass # skip the line that is not a valid access record
return None
def do_setup(self, context):
"""Disable cli confirmation and tune output format."""
try:
disabled_cli_features = ('confirmation', 'paging', 'events',
'formatoutput')
for feature in disabled_cli_features:
self._eql_execute('cli-settings', feature, 'off')
for line in self._eql_execute('grpparams', 'show'):
if line.startswith('Group-Ipaddress:'):
out_tup = line.rstrip().partition(' ')
self._group_ip = out_tup[-1]
LOG.info(_("EQL-driver: Setup is complete, group IP is %s"),
self._group_ip)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_('Failed to setup the Dell EqualLogic driver'))
def create_volume(self, volume):
"""Create a volume."""
try:
cmd = ['volume', 'create',
volume['name'], "%sG" % (volume['size'])]
if self.configuration.eqlx_pool != 'default':
cmd.append('pool')
cmd.append(self.configuration.eqlx_pool)
if self.configuration.san_thin_provision:
cmd.append('thin-provision')
out = self._eql_execute(*cmd)
self.add_multihost_access(volume)
return self._get_volume_data(out)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_('Failed to create volume %s'), volume['name'])
def add_multihost_access(self, volume):
"""Add multihost-access to a volume. Needed for live migration."""
try:
cmd = ['volume', 'select',
volume['name'], 'multihost-access', 'enable']
self._eql_execute(*cmd)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_('Failed to add multi-host access for volume %s'),
volume['name'])
def delete_volume(self, volume):
"""Delete a volume."""
try:
self._check_volume(volume)
self._eql_execute('volume', 'select', volume['name'], 'offline')
self._eql_execute('volume', 'delete', volume['name'])
except exception.VolumeNotFound:
LOG.warn(_('Volume %s was not found while trying to delete it'),
volume['name'])
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_('Failed to delete volume %s'), volume['name'])
def create_snapshot(self, snapshot):
""""Create snapshot of existing volume on appliance."""
try:
out = self._eql_execute('volume', 'select',
snapshot['volume_name'],
'snapshot', 'create-now')
prefix = 'Snapshot name is '
snap_name = self._get_prefixed_value(out, prefix)
self._eql_execute('volume', 'select', snapshot['volume_name'],
'snapshot', 'rename', snap_name,
snapshot['name'])
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_('Failed to create snapshot of volume %s'),
snapshot['volume_name'])
def create_volume_from_snapshot(self, volume, snapshot):
"""Create new volume from other volume's snapshot on appliance."""
try:
out = self._eql_execute('volume', 'select',
snapshot['volume_name'], 'snapshot',
'select', snapshot['name'],
'clone', volume['name'])
self.add_multihost_access(volume)
return self._get_volume_data(out)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_('Failed to create volume from snapshot %s'),
snapshot['name'])
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
try:
src_volume_name = self.configuration.\
volume_name_template % src_vref['id']
out = self._eql_execute('volume', 'select', src_volume_name,
'clone', volume['name'])
self.add_multihost_access(volume)
return self._get_volume_data(out)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_('Failed to create clone of volume %s'),
volume['name'])
def delete_snapshot(self, snapshot):
"""Delete volume's snapshot."""
try:
self._eql_execute('volume', 'select', snapshot['volume_name'],
'snapshot', 'delete', snapshot['name'])
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_('Failed to delete snapshot %(snap)s of '
'volume %(vol)s'),
{'snap': snapshot['name'],
'vol': snapshot['volume_name']})
def initialize_connection(self, volume, connector):
"""Restrict access to a volume."""
try:
cmd = ['volume', 'select', volume['name'], 'access', 'create',
'initiator', connector['initiator']]
if self.configuration.eqlx_use_chap:
cmd.extend(['authmethod', 'chap', 'username',
self.configuration.eqlx_chap_login])
self._eql_execute(*cmd)
iscsi_properties = self._get_iscsi_properties(volume)
return {
'driver_volume_type': 'iscsi',
'data': iscsi_properties
}
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_('Failed to initialize connection to volume %s'),
volume['name'])
def terminate_connection(self, volume, connector, force=False, **kwargs):
"""Remove access restrictions from a volume."""
try:
out = self._eql_execute('volume', 'select', volume['name'],
'access', 'show')
connection_id = self._parse_connection(connector, out)
if connection_id is not None:
self._eql_execute('volume', 'select', volume['name'],
'access', 'delete', connection_id)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_('Failed to terminate connection to volume %s'),
volume['name'])
def create_export(self, context, volume):
"""Create an export of a volume.
Driver has nothing to do here for the volume has been exported
already by the SAN, right after it's creation.
"""
pass
def ensure_export(self, context, volume):
"""Ensure an export of a volume.
Driver has nothing to do here for the volume has been exported
already by the SAN, right after it's creation. We will just make
sure that the volume exists on the array and issue a warning.
"""
try:
self._check_volume(volume)
except exception.VolumeNotFound:
LOG.warn(_('Volume %s is not found!, it may have been deleted'),
volume['name'])
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_('Failed to ensure export of volume %s'),
volume['name'])
def remove_export(self, context, volume):
"""Remove an export of a volume.
Driver has nothing to do here for the volume has been exported
already by the SAN, right after it's creation.
Nothing to remove since there's nothing exported.
"""
pass
def extend_volume(self, volume, new_size):
"""Extend the size of the volume."""
try:
self._eql_execute('volume', 'select', volume['name'],
'size', "%sG" % new_size)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_('Failed to extend_volume %(name)s from '
'%(current_size)sGB to %(new_size)sGB'),
{'name': volume['name'],
'current_size': volume['size'],
'new_size': new_size})
def local_path(self, volume):
raise NotImplementedError()
|
|
"""
sentry.tasks.deletion
~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from celery.utils.log import get_task_logger
from sentry.utils.query import bulk_delete_objects
from sentry.tasks.base import instrumented_task, retry
logger = get_task_logger(__name__)
@instrumented_task(name='sentry.tasks.deletion.delete_organization', queue='cleanup',
default_retry_delay=60 * 5, max_retries=None)
@retry
def delete_organization(object_id, continuous=True, **kwargs):
from sentry.models import (
Organization, OrganizationMember, OrganizationStatus, Team, TeamStatus
)
try:
o = Organization.objects.get(id=object_id)
except Team.DoesNotExist:
return
if o.status == OrganizationStatus.VISIBLE:
raise ValueError('Aborting organization deletion as status is invalid')
if o.status != OrganizationStatus.DELETION_IN_PROGRESS:
o.update(status=OrganizationStatus.DELETION_IN_PROGRESS)
for team in Team.objects.filter(organization=o).order_by('id')[:1]:
logger.info('Removing Team id=%s where organization=%s', team.id, o.id)
team.update(status=TeamStatus.DELETION_IN_PROGRESS)
delete_team(team.id, continuous=False)
if continuous:
delete_organization.delay(object_id=object_id, countdown=15)
return
model_list = (OrganizationMember,)
has_more = delete_objects(model_list, relation={'organization': o}, logger=logger)
if has_more:
if continuous:
delete_organization.delay(object_id=object_id, countdown=15)
return
o.delete()
@instrumented_task(name='sentry.tasks.deletion.delete_team', queue='cleanup',
default_retry_delay=60 * 5, max_retries=None)
@retry
def delete_team(object_id, continuous=True, **kwargs):
from sentry.models import Team, TeamStatus, Project, ProjectStatus
try:
t = Team.objects.get(id=object_id)
except Team.DoesNotExist:
return
if t.status == TeamStatus.VISIBLE:
raise ValueError('Aborting team deletion as status is invalid')
if t.status != TeamStatus.DELETION_IN_PROGRESS:
t.update(status=TeamStatus.DELETION_IN_PROGRESS)
# Delete 1 project at a time since this is expensive by itself
for project in Project.objects.filter(team=t).order_by('id')[:1]:
logger.info('Removing Project id=%s where team=%s', project.id, t.id)
project.update(status=ProjectStatus.DELETION_IN_PROGRESS)
delete_project(project.id, continuous=False)
if continuous:
delete_team.delay(object_id=object_id, countdown=15)
return
t.delete()
@instrumented_task(name='sentry.tasks.deletion.delete_project', queue='cleanup',
default_retry_delay=60 * 5, max_retries=None)
@retry
def delete_project(object_id, continuous=True, **kwargs):
from sentry.models import (
Project, ProjectKey, ProjectStatus, TagKey, TagValue, GroupTagKey,
GroupTagValue, Activity, EventMapping, Group, GroupRuleStatus,
GroupHash, GroupSeen,
)
try:
p = Project.objects.get(id=object_id)
except Project.DoesNotExist:
return
if p.status == ProjectStatus.VISIBLE:
raise ValueError('Aborting project deletion as status is invalid')
if p.status != ProjectStatus.DELETION_IN_PROGRESS:
p.update(status=ProjectStatus.DELETION_IN_PROGRESS)
# XXX: remove keys first to prevent additional data from flowing in
model_list = (
ProjectKey, TagKey, TagValue, GroupTagKey, GroupTagValue, EventMapping,
Activity, GroupRuleStatus, GroupHash, GroupSeen,
)
for model in model_list:
has_more = bulk_delete_objects(model, project_id=p.id, logger=logger)
if has_more:
if continuous:
delete_project.delay(object_id=object_id, countdown=15)
return
has_more = delete_events(relation={'project_id': p.id}, logger=logger)
if has_more:
if continuous:
delete_project.delay(object_id=object_id, countdown=15)
return
model_list = (Group,)
for model in model_list:
has_more = bulk_delete_objects(model, project_id=p.id, logger=logger)
if has_more:
if continuous:
delete_project.delay(object_id=object_id, countdown=15)
return
p.delete()
@instrumented_task(name='sentry.tasks.deletion.delete_group', queue='cleanup',
default_retry_delay=60 * 5, max_retries=None)
@retry
def delete_group(object_id, continuous=True, **kwargs):
from sentry.models import (
Group, GroupHash, GroupRuleStatus, GroupTagKey, GroupTagValue,
EventMapping, GroupEmailThread,
)
try:
group = Group.objects.get(id=object_id)
except Group.DoesNotExist:
return
bulk_model_list = (
GroupHash, GroupRuleStatus, GroupTagValue, GroupTagKey,
EventMapping, GroupEmailThread,
)
for model in bulk_model_list:
has_more = bulk_delete_objects(model, group_id=object_id, logger=logger)
if has_more:
if continuous:
delete_group.delay(object_id=object_id, countdown=15)
return
has_more = delete_events(relation={'group_id': object_id}, logger=logger)
if has_more:
if continuous:
delete_group.delay(object_id=object_id, countdown=15)
return
group.delete()
@instrumented_task(name='sentry.tasks.deletion.delete_tag_key', queue='cleanup',
default_retry_delay=60 * 5, max_retries=None)
@retry
def delete_tag_key(object_id, continuous=True, **kwargs):
from sentry.models import (
GroupTagKey, GroupTagValue, TagKey, TagKeyStatus, TagValue
)
try:
tagkey = TagKey.objects.get(id=object_id)
except TagKey.DoesNotExist:
return
if tagkey.status != TagKeyStatus.DELETION_IN_PROGRESS:
tagkey.update(status=TagKeyStatus.DELETION_IN_PROGRESS)
bulk_model_list = (
GroupTagValue, GroupTagKey, TagValue
)
for model in bulk_model_list:
has_more = bulk_delete_objects(model, project_id=tagkey.project_id,
key=tagkey.key, logger=logger)
if has_more:
if continuous:
delete_tag_key.delay(object_id=object_id, countdown=15)
return
tagkey.delete()
def delete_events(relation, limit=100, logger=None):
from sentry.app import nodestore
from sentry.models import Event
has_more = False
if logger is not None:
logger.info('Removing %r objects where %r', Event, relation)
result_set = list(Event.objects.filter(**relation)[:limit])
has_more = bool(result_set)
if has_more:
# delete objects from nodestore first
node_ids = set(r.data.id for r in result_set)
nodestore.delete_multi(node_ids)
# bulk delete by id
Event.objects.filter(id__in=[r.id for r in result_set]).delete()
return has_more
def delete_objects(models, relation, limit=100, logger=None):
# This handles cascades properly
has_more = False
for model in models:
if logger is not None:
logger.info('Removing %r objects where %r', model, relation)
for obj in model.objects.filter(**relation)[:limit]:
obj.delete()
has_more = True
if has_more:
return True
return has_more
|
|
from __future__ import absolute_import, unicode_literals
import logging
import urlparse
from mopidy import models
from mopidy.audio import PlaybackState
from mopidy.core import listener
from mopidy.internal import deprecation, validation
logger = logging.getLogger(__name__)
class PlaybackController(object):
pykka_traversable = True
def __init__(self, audio, backends, core):
# TODO: these should be internal
self.backends = backends
self.core = core
self._audio = audio
self._stream_title = None
self._state = PlaybackState.STOPPED
self._current_tl_track = None
self._pending_tl_track = None
if self._audio:
self._audio.set_about_to_finish_callback(
self._on_about_to_finish_callback)
def _get_backend(self, tl_track):
if tl_track is None:
return None
uri_scheme = urlparse.urlparse(tl_track.track.uri).scheme
return self.backends.with_playback.get(uri_scheme, None)
# Properties
def get_current_tl_track(self):
"""Get the currently playing or selected track.
Returns a :class:`mopidy.models.TlTrack` or :class:`None`.
"""
return self._current_tl_track
def _set_current_tl_track(self, value):
"""Set the currently playing or selected track.
*Internal:* This is only for use by Mopidy's test suite.
"""
self._current_tl_track = value
current_tl_track = deprecation.deprecated_property(get_current_tl_track)
"""
.. deprecated:: 1.0
Use :meth:`get_current_tl_track` instead.
"""
def get_current_track(self):
"""
Get the currently playing or selected track.
Extracted from :meth:`get_current_tl_track` for convenience.
Returns a :class:`mopidy.models.Track` or :class:`None`.
"""
return getattr(self.get_current_tl_track(), 'track', None)
current_track = deprecation.deprecated_property(get_current_track)
"""
.. deprecated:: 1.0
Use :meth:`get_current_track` instead.
"""
def get_current_tlid(self):
"""
Get the currently playing or selected TLID.
Extracted from :meth:`get_current_tl_track` for convenience.
Returns a :class:`int` or :class:`None`.
.. versionadded:: 1.1
"""
return getattr(self.get_current_tl_track(), 'tlid', None)
def get_stream_title(self):
"""Get the current stream title or :class:`None`."""
return self._stream_title
def get_state(self):
"""Get The playback state."""
return self._state
def set_state(self, new_state):
"""Set the playback state.
Must be :attr:`PLAYING`, :attr:`PAUSED`, or :attr:`STOPPED`.
Possible states and transitions:
.. digraph:: state_transitions
"STOPPED" -> "PLAYING" [ label="play" ]
"STOPPED" -> "PAUSED" [ label="pause" ]
"PLAYING" -> "STOPPED" [ label="stop" ]
"PLAYING" -> "PAUSED" [ label="pause" ]
"PLAYING" -> "PLAYING" [ label="play" ]
"PAUSED" -> "PLAYING" [ label="resume" ]
"PAUSED" -> "STOPPED" [ label="stop" ]
"""
validation.check_choice(new_state, validation.PLAYBACK_STATES)
(old_state, self._state) = (self.get_state(), new_state)
logger.debug('Changing state: %s -> %s', old_state, new_state)
self._trigger_playback_state_changed(old_state, new_state)
state = deprecation.deprecated_property(get_state, set_state)
"""
.. deprecated:: 1.0
Use :meth:`get_state` and :meth:`set_state` instead.
"""
def get_time_position(self):
"""Get time position in milliseconds."""
backend = self._get_backend(self.get_current_tl_track())
if backend:
return backend.playback.get_time_position().get()
else:
return 0
time_position = deprecation.deprecated_property(get_time_position)
"""
.. deprecated:: 1.0
Use :meth:`get_time_position` instead.
"""
def get_volume(self):
"""
.. deprecated:: 1.0
Use :meth:`core.mixer.get_volume()
<mopidy.core.MixerController.get_volume>` instead.
"""
deprecation.warn('core.playback.get_volume')
return self.core.mixer.get_volume()
def set_volume(self, volume):
"""
.. deprecated:: 1.0
Use :meth:`core.mixer.set_volume()
<mopidy.core.MixerController.set_volume>` instead.
"""
deprecation.warn('core.playback.set_volume')
return self.core.mixer.set_volume(volume)
volume = deprecation.deprecated_property(get_volume, set_volume)
"""
.. deprecated:: 1.0
Use :meth:`core.mixer.get_volume()
<mopidy.core.MixerController.get_volume>` and
:meth:`core.mixer.set_volume()
<mopidy.core.MixerController.set_volume>` instead.
"""
def get_mute(self):
"""
.. deprecated:: 1.0
Use :meth:`core.mixer.get_mute()
<mopidy.core.MixerController.get_mute>` instead.
"""
deprecation.warn('core.playback.get_mute')
return self.core.mixer.get_mute()
def set_mute(self, mute):
"""
.. deprecated:: 1.0
Use :meth:`core.mixer.set_mute()
<mopidy.core.MixerController.set_mute>` instead.
"""
deprecation.warn('core.playback.set_mute')
return self.core.mixer.set_mute(mute)
mute = deprecation.deprecated_property(get_mute, set_mute)
"""
.. deprecated:: 1.0
Use :meth:`core.mixer.get_mute()
<mopidy.core.MixerController.get_mute>` and
:meth:`core.mixer.set_mute()
<mopidy.core.MixerController.set_mute>` instead.
"""
# Methods
def _on_end_of_stream(self):
self.set_state(PlaybackState.STOPPED)
self._set_current_tl_track(None)
# TODO: self._trigger_track_playback_ended?
def _on_stream_changed(self, uri):
self._stream_title = None
if self._pending_tl_track:
self._set_current_tl_track(self._pending_tl_track)
self._pending_tl_track = None
self._trigger_track_playback_started()
def _on_about_to_finish_callback(self):
"""Callback that performs a blocking actor call to the real callback.
This is passed to audio, which is allowed to call this code from the
audio thread. We pass execution into the core actor to ensure that
there is no unsafe access of state in core. This must block until
we get a response.
"""
self.core.actor_ref.ask({
'command': 'pykka_call', 'args': tuple(), 'kwargs': {},
'attr_path': ('playback', '_on_about_to_finish'),
})
def _on_about_to_finish(self):
self._trigger_track_playback_ended(self.get_time_position())
# TODO: check that we always have a current track
original_tl_track = self.get_current_tl_track()
next_tl_track = self.core.tracklist.eot_track(original_tl_track)
# TODO: only set pending if we have a backend that can play it?
# TODO: skip tracks that don't have a backend?
self._pending_tl_track = next_tl_track
backend = self._get_backend(next_tl_track)
if backend:
backend.playback.change_track(next_tl_track.track).get()
self.core.tracklist._mark_played(original_tl_track)
def _on_tracklist_change(self):
"""
Tell the playback controller that the current playlist has changed.
Used by :class:`mopidy.core.TracklistController`.
"""
if not self.core.tracklist.tl_tracks:
self.stop()
self._set_current_tl_track(None)
elif self.get_current_tl_track() not in self.core.tracklist.tl_tracks:
self._set_current_tl_track(None)
def next(self):
"""
Change to the next track.
The current playback state will be kept. If it was playing, playing
will continue. If it was paused, it will still be paused, etc.
"""
state = self.get_state()
current = self._pending_tl_track or self._current_tl_track
# TODO: move to pending track?
self._trigger_track_playback_ended(self.get_time_position())
self.core.tracklist._mark_played(self._current_tl_track)
while current:
pending = self.core.tracklist.next_track(current)
if self._change(pending, state):
break
else:
self.core.tracklist._mark_unplayable(pending)
# TODO: this could be needed to prevent a loop in rare cases
# if current == pending:
# break
current = pending
# TODO return result?
def pause(self):
"""Pause playback."""
backend = self._get_backend(self.get_current_tl_track())
if not backend or backend.playback.pause().get():
# TODO: switch to:
# backend.track(pause)
# wait for state change?
self.set_state(PlaybackState.PAUSED)
self._trigger_track_playback_paused()
def play(self, tl_track=None, tlid=None):
"""
Play the given track, or if the given tl_track and tlid is
:class:`None`, play the currently active track.
Note that the track **must** already be in the tracklist.
:param tl_track: track to play
:type tl_track: :class:`mopidy.models.TlTrack` or :class:`None`
:param tlid: TLID of the track to play
:type tlid: :class:`int` or :class:`None`
"""
if sum(o is not None for o in [tl_track, tlid]) > 1:
raise ValueError('At most one of "tl_track" and "tlid" may be set')
tl_track is None or validation.check_instance(tl_track, models.TlTrack)
tlid is None or validation.check_integer(tlid, min=1)
if tl_track:
deprecation.warn('core.playback.play:tl_track_kwarg', pending=True)
if tl_track is None and tlid is not None:
for tl_track in self.core.tracklist.get_tl_tracks():
if tl_track.tlid == tlid:
break
else:
tl_track = None
if tl_track is not None:
# TODO: allow from outside tracklist, would make sense given refs?
assert tl_track in self.core.tracklist.get_tl_tracks()
elif tl_track is None and self.get_state() == PlaybackState.PAUSED:
self.resume()
return
original = self._current_tl_track
current = self._pending_tl_track or self._current_tl_track
pending = tl_track or current or self.core.tracklist.next_track(None)
if original != pending and self.get_state() != PlaybackState.STOPPED:
self._trigger_track_playback_ended(self.get_time_position())
if pending:
# TODO: remove?
self.set_state(PlaybackState.PLAYING)
while pending:
# TODO: should we consume unplayable tracks in this loop?
if self._change(pending, PlaybackState.PLAYING):
break
else:
self.core.tracklist._mark_unplayable(pending)
current = pending
pending = self.core.tracklist.next_track(current)
# TODO: move to top and get rid of original?
self.core.tracklist._mark_played(original)
# TODO return result?
def _change(self, pending_tl_track, state):
self._pending_tl_track = pending_tl_track
if not pending_tl_track:
self.stop()
self._on_end_of_stream() # pretend an EOS happened for cleanup
return True
backend = self._get_backend(pending_tl_track)
if not backend:
return False
backend.playback.prepare_change()
if not backend.playback.change_track(pending_tl_track.track).get():
return False # TODO: test for this path
if state == PlaybackState.PLAYING:
try:
return backend.playback.play().get()
except TypeError:
# TODO: check by binding against underlying play method using
# inspect and otherwise re-raise?
logger.error('%s needs to be updated to work with this '
'version of Mopidy.', backend)
return False
elif state == PlaybackState.PAUSED:
return backend.playback.pause().get()
elif state == PlaybackState.STOPPED:
# TODO: emit some event now?
self._current_tl_track = self._pending_tl_track
self._pending_tl_track = None
return True
raise Exception('Unknown state: %s' % state)
def previous(self):
"""
Change to the previous track.
The current playback state will be kept. If it was playing, playing
will continue. If it was paused, it will still be paused, etc.
"""
self._trigger_track_playback_ended(self.get_time_position())
state = self.get_state()
current = self._pending_tl_track or self._current_tl_track
while current:
pending = self.core.tracklist.previous_track(current)
if self._change(pending, state):
break
else:
self.core.tracklist._mark_unplayable(pending)
# TODO: this could be needed to prevent a loop in rare cases
# if current == pending:
# break
current = pending
# TODO: no return value?
def resume(self):
"""If paused, resume playing the current track."""
if self.get_state() != PlaybackState.PAUSED:
return
backend = self._get_backend(self.get_current_tl_track())
if backend and backend.playback.resume().get():
self.set_state(PlaybackState.PLAYING)
# TODO: trigger via gst messages
self._trigger_track_playback_resumed()
# TODO: switch to:
# backend.resume()
# wait for state change?
def seek(self, time_position):
"""
Seeks to time position given in milliseconds.
:param time_position: time position in milliseconds
:type time_position: int
:rtype: :class:`True` if successful, else :class:`False`
"""
# TODO: seek needs to take pending tracks into account :(
validation.check_integer(time_position)
if time_position < 0:
logger.debug(
'Client seeked to negative position. Seeking to zero.')
time_position = 0
if not self.core.tracklist.tracks:
return False
if self.get_state() == PlaybackState.STOPPED:
self.play()
# TODO: uncomment once we have tests for this. Should fix seek after
# about to finish doing wrong track.
# if self._current_tl_track and self._pending_tl_track:
# self.play(self._current_tl_track)
# We need to prefer the still playing track, but if nothing is playing
# we fall back to the pending one.
tl_track = self._current_tl_track or self._pending_tl_track
if tl_track and tl_track.track.length is None:
return False
if time_position < 0:
time_position = 0
elif time_position > tl_track.track.length:
# TODO: gstreamer will trigger a about to finish for us, use that?
self.next()
return True
backend = self._get_backend(self.get_current_tl_track())
if not backend:
return False
success = backend.playback.seek(time_position).get()
if success:
self._trigger_seeked(time_position)
return success
def stop(self):
"""Stop playing."""
if self.get_state() != PlaybackState.STOPPED:
backend = self._get_backend(self.get_current_tl_track())
time_position_before_stop = self.get_time_position()
if not backend or backend.playback.stop().get():
self.set_state(PlaybackState.STOPPED)
self._trigger_track_playback_ended(time_position_before_stop)
def _trigger_track_playback_paused(self):
logger.debug('Triggering track playback paused event')
if self.current_track is None:
return
listener.CoreListener.send(
'track_playback_paused',
tl_track=self.get_current_tl_track(),
time_position=self.get_time_position())
def _trigger_track_playback_resumed(self):
logger.debug('Triggering track playback resumed event')
if self.current_track is None:
return
listener.CoreListener.send(
'track_playback_resumed',
tl_track=self.get_current_tl_track(),
time_position=self.get_time_position())
def _trigger_track_playback_started(self):
# TODO: replace with stream-changed
logger.debug('Triggering track playback started event')
if self.get_current_tl_track() is None:
return
tl_track = self.get_current_tl_track()
self.core.tracklist._mark_playing(tl_track)
self.core.history._add_track(tl_track.track)
listener.CoreListener.send('track_playback_started', tl_track=tl_track)
def _trigger_track_playback_ended(self, time_position_before_stop):
logger.debug('Triggering track playback ended event')
if self.get_current_tl_track() is None:
return
listener.CoreListener.send(
'track_playback_ended',
tl_track=self.get_current_tl_track(),
time_position=time_position_before_stop)
def _trigger_playback_state_changed(self, old_state, new_state):
logger.debug('Triggering playback state change event')
listener.CoreListener.send(
'playback_state_changed',
old_state=old_state, new_state=new_state)
def _trigger_seeked(self, time_position):
logger.debug('Triggering seeked event')
listener.CoreListener.send('seeked', time_position=time_position)
|
|
# Natural Language Toolkit: Tree Transformations
#
# Copyright (C) 2005-2007 Oregon Graduate Institute
# Author: Nathan Bodenstab <[email protected]>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
"""
A collection of methods for tree (grammar) transformations used
in parsing natural language.
Although many of these methods are technically grammar transformations
(ie. Chomsky Norm Form), when working with treebanks it is much more
natural to visualize these modifications in a tree structure. Hence,
we will do all transformation directly to the tree itself.
Transforming the tree directly also allows us to do parent annotation.
A grammar can then be simply induced from the modified tree.
The following is a short tutorial on the available transformations.
1. Chomsky Normal Form (binarization)
It is well known that any grammar has a Chomsky Normal Form (CNF)
equivalent grammar where CNF is defined by every production having
either two non-terminals or one terminal on its right hand side.
When we have hierarchically structured data (ie. a treebank), it is
natural to view this in terms of productions where the root of every
subtree is the head (left hand side) of the production and all of
its children are the right hand side constituents. In order to
convert a tree into CNF, we simply need to ensure that every subtree
has either two subtrees as children (binarization), or one leaf node
(non-terminal). In order to binarize a subtree with more than two
children, we must introduce artificial nodes.
There are two popular methods to convert a tree into CNF: left
factoring and right factoring. The following example demonstrates
the difference between them. Example::
Original Right-Factored Left-Factored
A A A
/ | \ / \ / \
B C D ==> B A|<C-D> OR A|<B-C> D
/ \ / \
C D B C
2. Parent Annotation
In addition to binarizing the tree, there are two standard
modifications to node labels we can do in the same traversal: parent
annotation and Markov order-N smoothing (or sibling smoothing).
The purpose of parent annotation is to refine the probabilities of
productions by adding a small amount of context. With this simple
addition, a CYK (inside-outside, dynamic programming chart parse)
can improve from 74% to 79% accuracy. A natural generalization from
parent annotation is to grandparent annotation and beyond. The
tradeoff becomes accuracy gain vs. computational complexity. We
must also keep in mind data sparcity issues. Example::
Original Parent Annotation
A A^<?>
/ | \ / \
B C D ==> B^<A> A|<C-D>^<?> where ? is the
/ \ parent of A
C^<A> D^<A>
3. Markov order-N smoothing
Markov smoothing combats data sparcity issues as well as decreasing
computational requirements by limiting the number of children
included in artificial nodes. In practice, most people use an order
2 grammar. Example::
Original No Smoothing Markov order 1 Markov order 2 etc.
__A__ A A A
/ /|\ \ / \ / \ / \
B C D E F ==> B A|<C-D-E-F> ==> B A|<C> ==> B A|<C-D>
/ \ / \ / \
C ... C ... C ...
Annotation decisions can be thought about in the vertical direction
(parent, grandparent, etc) and the horizontal direction (number of
siblings to keep). Parameters to the following functions specify
these values. For more information see:
Dan Klein and Chris Manning (2003) "Accurate Unlexicalized
Parsing", ACL-03. http://www.aclweb.org/anthology/P03-1054
4. Unary Collapsing
Collapse unary productions (ie. subtrees with a single child) into a
new non-terminal (Tree node). This is useful when working with
algorithms that do not allow unary productions, yet you do not wish
to lose the parent information. Example::
A
|
B ==> A+B
/ \ / \
C D C D
"""
from nltk.tree import Tree
def chomsky_normal_form(tree, factor = "right", horzMarkov = None, vertMarkov = 0, childChar = "|", parentChar = "^"):
# assume all subtrees have homogeneous children
# assume all terminals have no siblings
# A semi-hack to have elegant looking code below. As a result,
# any subtree with a branching factor greater than 999 will be incorrectly truncated.
if horzMarkov == None: horzMarkov = 999
# Traverse the tree depth-first keeping a list of ancestor nodes to the root.
# I chose not to use the tree.treepositions() method since it requires
# two traversals of the tree (one to get the positions, one to iterate
# over them) and node access time is proportional to the height of the node.
# This method is 7x faster which helps when parsing 40,000 sentences.
nodeList = [(tree, [tree.node])]
while nodeList != []:
node, parent = nodeList.pop()
if isinstance(node,Tree):
# parent annotation
parentString = ""
originalNode = node.node
if vertMarkov != 0 and node != tree and isinstance(node[0],Tree):
parentString = "%s<%s>" % (parentChar, "-".join(parent))
node.node += parentString
parent = [originalNode] + parent[:vertMarkov - 1]
# add children to the agenda before we mess with them
for child in node:
nodeList.append((child, parent))
# chomsky normal form factorization
if len(node) > 2:
childNodes = [child.node for child in node]
nodeCopy = node.copy()
node[0:] = [] # delete the children
curNode = node
numChildren = len(nodeCopy)
for i in range(1,numChildren - 1):
if factor == "right":
newHead = "%s%s<%s>%s" % (originalNode, childChar, "-".join(childNodes[i:min([i+horzMarkov,numChildren])]),parentString) # create new head
newNode = Tree(newHead, [])
curNode[0:] = [nodeCopy.pop(0), newNode]
else:
newHead = "%s%s<%s>%s" % (originalNode, childChar, "-".join(childNodes[max([numChildren-i-horzMarkov,0]):-i]),parentString)
newNode = Tree(newHead, [])
curNode[0:] = [newNode, nodeCopy.pop()]
curNode = newNode
curNode[0:] = [child for child in nodeCopy]
def un_chomsky_normal_form(tree, expandUnary = True, childChar = "|", parentChar = "^", unaryChar = "+"):
# Traverse the tree-depth first keeping a pointer to the parent for modification purposes.
nodeList = [(tree,[])]
while nodeList != []:
node,parent = nodeList.pop()
if isinstance(node,Tree):
# if the node contains the 'childChar' character it means that
# it is an artificial node and can be removed, although we still need
# to move its children to its parent
childIndex = node.node.find(childChar)
if childIndex != -1:
nodeIndex = parent.index(node)
parent.remove(parent[nodeIndex])
# Generated node was on the left if the nodeIndex is 0 which
# means the grammar was left factored. We must insert the children
# at the beginning of the parent's children
if nodeIndex == 0:
parent.insert(0,node[0])
parent.insert(1,node[1])
else:
parent.extend([node[0],node[1]])
# parent is now the current node so the children of parent will be added to the agenda
node = parent
else:
parentIndex = node.node.find(parentChar)
if parentIndex != -1:
# strip the node name of the parent annotation
node.node = node.node[:parentIndex]
# expand collapsed unary productions
if expandUnary == True:
unaryIndex = node.node.find(unaryChar)
if unaryIndex != -1:
newNode = Tree(node.node[unaryIndex + 1:], [i for i in node])
node.node = node.node[:unaryIndex]
node[0:] = [newNode]
for child in node:
nodeList.append((child,node))
def collapse_unary(tree, collapsePOS = False, collapseRoot = False, joinChar = "+"):
"""
Collapse subtrees with a single child (ie. unary productions)
into a new non-terminal (Tree node) joined by 'joinChar'.
This is useful when working with algorithms that do not allow
unary productions, and completely removing the unary productions
would require loss of useful information. The Tree is modified
directly (since it is passed by reference) and no value is returned.
@param tree: The Tree to be collapsed
@type tree: C{Tree}
@param collapsePOS: 'False' (default) will not collapse the parent of leaf nodes (ie.
Part-of-Speech tags) since they are always unary productions
@type collapsePOS: C{boolean}
@param collapseRoot: 'False' (default) will not modify the root production
if it is unary. For the Penn WSJ treebank corpus, this corresponds
to the TOP -> productions.
@type collapseRoot: C{boolean}
@param joinChar: A string used to connect collapsed node values (default = "+")
@type joinChar: C{string}
"""
if collapseRoot == False and isinstance(tree, Tree) and len(tree) == 1:
nodeList = [tree[0]]
else:
nodeList = [tree]
# depth-first traversal of tree
while nodeList != []:
node = nodeList.pop()
if isinstance(node,Tree):
if len(node) == 1 and isinstance(node[0], Tree) and (collapsePOS == True or isinstance(node[0,0], Tree)):
node.node += joinChar + node[0].node
node[0:] = [child for child in node[0]]
# since we assigned the child's children to the current node,
# evaluate the current node again
nodeList.append(node)
else:
for child in node:
nodeList.append(child)
#################################################################
# Demonstration
#################################################################
def demo():
"""
A demonstration showing how each tree transform can be used.
"""
from nltk.draw.tree import draw_trees
from nltk import treetransforms, bracket_parse
from copy import deepcopy
# original tree from WSJ bracketed text
sentence = """(TOP
(S
(S
(VP
(VBN Turned)
(ADVP (RB loose))
(PP
(IN in)
(NP
(NP (NNP Shane) (NNP Longman) (POS 's))
(NN trading)
(NN room)))))
(, ,)
(NP (DT the) (NN yuppie) (NNS dealers))
(VP (AUX do) (NP (NP (RB little)) (ADJP (RB right))))
(. .)))"""
tree = bracket_parse(sentence)
# collapse subtrees with only one child
collapsedTree = deepcopy(tree)
treetransforms.collapse_unary(collapsedTree)
# convert the tree to CNF
cnfTree = deepcopy(collapsedTree)
treetransforms.chomsky_normal_form(cnfTree)
# convert the tree to CNF with parent annotation (one level) and horizontal smoothing of order two
parentTree = deepcopy(collapsedTree)
treetransforms.chomsky_normal_form(parentTree, horzMarkov=2, vertMarkov=1)
# convert the tree back to its original form (used to make CYK results comparable)
original = deepcopy(parentTree)
treetransforms.un_chomsky_normal_form(original)
# convert tree back to bracketed text
sentence2 = original.pprint()
print sentence
print sentence2
print "Sentences the same? ", sentence == sentence2
draw_trees(tree, collapsedTree, cnfTree, parentTree, original)
if __name__ == '__main__':
demo()
__all__ = ["chomsky_normal_form", "un_chomsky_normal_form", "collapse_unary"]
|
|
# Copyright (c) 2012 Cloudera, Inc. All rights reserved.
#
# Talk to an impalad through beeswax.
# Usage:
# * impalad is a string with the host and port of the impalad
# with which the connection should be established.
# The format is "<hostname>:<port>"
# * query_string is the query to be executed, as a string.
# client = ImpalaBeeswaxClient(impalad)
# client.connect()
# result = client.execute(query_string)
# where result is an object of the class ImpalaBeeswaxResult.
import time
import sys
import shlex
import traceback
import getpass
import re
from beeswaxd import BeeswaxService
from beeswaxd.BeeswaxService import QueryState
from datetime import datetime
try:
# If Exec Summary is not implemented in Impala, this cannot be imported
from ExecStats.ttypes import TExecStats
except ImportError:
pass
from ImpalaService import ImpalaService
from ImpalaService.ImpalaService import TImpalaQueryOptions, TResetTableReq
from tests.util.thrift_util import create_transport
from thrift.transport.TSocket import TSocket
from thrift.transport.TTransport import TBufferedTransport, TTransportException
from thrift.protocol import TBinaryProtocol
from thrift.Thrift import TApplicationException
# Custom exception wrapper.
# All exceptions coming from thrift/beeswax etc. go through this wrapper.
# __str__ preserves the exception type.
# TODO: Add the ability to print some of the stack.
class ImpalaBeeswaxException(Exception):
__name__ = "ImpalaBeeswaxException"
def __init__(self, message, inner_exception):
self.__message = message
self.inner_exception = inner_exception
def __str__(self):
return "%s:\n %s" % (self.__name__, self.__message)
class ImpalaBeeswaxResult(object):
def __init__(self, **kwargs):
self.query = kwargs.get('query', None)
self.success = kwargs.get('success', False)
# Insert returns an int, convert into list to have a uniform data type.
# TODO: We should revisit this if we have more datatypes to deal with.
self.data = kwargs.get('data', None)
if not isinstance(self.data, list):
self.data = str(self.data)
self.data = [self.data]
self.log = None
self.time_taken = kwargs.get('time_taken', 0)
self.summary = kwargs.get('summary', str())
self.schema = kwargs.get('schema', None)
self.runtime_profile = kwargs.get('runtime_profile', str())
self.exec_summary = kwargs.get('exec_summary', None)
def get_data(self):
return self.__format_data()
def __format_data(self):
if self.data:
return '\n'.join(self.data)
return ''
def __str__(self):
message = ('Summary: %s\n'
'Success: %s\n'
'Took: %s(s)\n'
'Data:\n%s\n'
% (self.summary, self.success, self.time_taken,
self.__format_data())
)
return message
# Interface to beeswax. Responsible for executing queries, fetching results.
class ImpalaBeeswaxClient(object):
# Regex applied to all tokens of a query to detect the query type.
INSERT_REGEX = re.compile("^insert$", re.I)
def __init__(self, impalad, use_kerberos=False, user=None, password=None,
use_ssl=False):
self.connected = False
self.impalad = impalad
self.imp_service = None
self.transport = None
self.use_kerberos = use_kerberos
self.use_ssl = use_ssl
self.user, self.password = user, password
self.use_ldap = (self.user is not None)
self.__query_options = {}
self.query_states = QueryState._NAMES_TO_VALUES
def __options_to_string_list(self):
return ["%s=%s" % (k,v) for (k,v) in self.__query_options.iteritems()]
def get_query_options(self):
return self.__query_options
def set_query_option(self, name, value):
self.__query_options[name.upper()] = value
def set_query_options(self, query_option_dict):
if query_option_dict is None:
raise ValueError, 'Cannot pass None value for query options'
self.clear_query_options()
for name, value in query_option_dict.iteritems():
self.set_query_option(name, value)
def get_query_option(self, name):
return self.__query_options.get(name.upper())
def clear_query_options(self):
self.__query_options.clear()
def connect(self):
"""Connect to impalad specified in intializing this object
Raises an exception if the connection is unsuccesful.
"""
try:
self.impalad = self.impalad.split(':')
self.transport = self.__get_transport()
self.transport.open()
protocol = TBinaryProtocol.TBinaryProtocol(self.transport)
self.imp_service = ImpalaService.Client(protocol)
self.connected = True
except Exception, e:
raise ImpalaBeeswaxException(self.__build_error_message(e), e)
def close_connection(self):
"""Close the transport if it's still open"""
if self.transport:
self.transport.close()
def __get_transport(self):
"""Creates the proper transport type based environment (secure vs unsecure)"""
trans_type = 'buffered'
if self.use_kerberos:
trans_type = 'kerberos'
elif self.use_ldap:
trans_type = 'plain_sasl'
return create_transport(host=self.impalad[0], port=int(self.impalad[1]),
service='impala', transport_type=trans_type, user=self.user,
password=self.password, use_ssl=self.use_ssl)
def execute(self, query_string, user=None):
"""Re-directs the query to its appropriate handler, returns ImpalaBeeswaxResult"""
# Take care of leading/trailing whitespaces.
query_string = query_string.strip()
start = time.time()
start_time = datetime.now()
handle = self.__execute_query(query_string.strip(), user=user)
if self.__get_query_type(query_string) == 'insert':
# DML queries are finished by this point.
time_taken = time.time() - start
# fetch_results() will close the query after which there is no guarantee that
# profile and log will be available so fetch them first.
runtime_profile = self.get_runtime_profile(handle)
exec_summary = self.get_exec_summary(handle)
log = self.get_log(handle.log_context)
result = self.fetch_results(query_string, handle)
result.time_taken, result.start_time, result.runtime_profile, result.log = \
time_taken, start_time, runtime_profile, log
result.exec_summary = exec_summary
else:
# For SELECT queries, execution might still be ongoing. fetch_results() will block
# until the query is completed.
result = self.fetch_results(query_string, handle)
result.time_taken = time.time() - start
result.start_time = start_time
result.exec_summary = self.get_exec_summary(handle)
result.log = self.get_log(handle.log_context)
result.runtime_profile = self.get_runtime_profile(handle)
self.close_query(handle)
return result
def get_exec_summary(self, handle):
"""Calls GetExecSummary() for the last query handle"""
try:
summary = self.__do_rpc(lambda: self.imp_service.GetExecSummary(handle))
except ImpalaBeeswaxException:
summary = None
if summary is None or summary.nodes is None:
return None
# If exec summary is not implemented in Impala, this function returns, so we do not
# get the function __build_summary_table which requires TExecStats to be imported.
output = []
self.__build_summary_table(summary, 0, False, 0, False, output)
return output
def __build_summary_table(self, summary, idx, is_fragment_root, indent_level,
new_indent_level, output):
"""NOTE: This was taken impala_shell.py. This method will be a placed in a library
that is shared between impala_shell and this file.
Direct translation of Coordinator::PrintExecSummary() to recursively build a list
of rows of summary statistics, one per exec node
summary: the TExecSummary object that contains all the summary data
idx: the index of the node to print
is_fragment_root: true if the node to print is the root of a fragment (and therefore
feeds into an exchange)
indent_level: the number of spaces to print before writing the node's label, to give
the appearance of a tree. The 0th child of a node has the same indent_level as its
parent. All other children have an indent_level of one greater than their parent.
new_indent_level: If true, this indent level is different from the previous row's.
output: the list of rows into which to append the rows produced for this node and its
children.
Returns the index of the next exec node in summary.exec_nodes that should be
processed, used internally to this method only.
"""
attrs = ["latency_ns", "cpu_time_ns", "cardinality", "memory_used"]
# Initialise aggregate and maximum stats
agg_stats, max_stats = TExecStats(), TExecStats()
for attr in attrs:
setattr(agg_stats, attr, 0)
setattr(max_stats, attr, 0)
node = summary.nodes[idx]
for stats in node.exec_stats:
for attr in attrs:
val = getattr(stats, attr)
if val is not None:
setattr(agg_stats, attr, getattr(agg_stats, attr) + val)
setattr(max_stats, attr, max(getattr(max_stats, attr), val))
if len(node.exec_stats) > 0:
avg_time = agg_stats.latency_ns / len(node.exec_stats)
else:
avg_time = 0
# If the node is a broadcast-receiving exchange node, the cardinality of rows produced
# is the max over all instances (which should all have received the same number of
# rows). Otherwise, the cardinality is the sum over all instances which process
# disjoint partitions.
if node.is_broadcast and is_fragment_root:
cardinality = max_stats.cardinality
else:
cardinality = agg_stats.cardinality
est_stats = node.estimated_stats
label_prefix = ""
if indent_level > 0:
label_prefix = "|"
label_prefix += " |" * (indent_level - 1)
if new_indent_level:
label_prefix += "--"
else:
label_prefix += " "
row = {}
row["prefix"] = label_prefix
row["operator"] = node.label
row["num_hosts"] = len(node.exec_stats)
row["avg_time"] = avg_time
row["max_time"] = max_stats.latency_ns
row["num_rows"] = cardinality
row["est_num_rows"] = est_stats.cardinality
row["peak_mem"] = max_stats.memory_used
row["est_peak_mem"] = est_stats.memory_used
row["detail"] = node.label_detail
output.append(row)
try:
sender_idx = summary.exch_to_sender_map[idx]
# This is an exchange node, so the sender is a fragment root, and should be printed
# next.
self.__build_summary_table(summary, sender_idx, True, indent_level, False, output)
except (KeyError, TypeError):
# Fall through if idx not in map, or if exch_to_sender_map itself is not set
pass
idx += 1
if node.num_children > 0:
first_child_output = []
idx = \
self.__build_summary_table(
summary, idx, False, indent_level, False, first_child_output)
for child_idx in xrange(1, node.num_children):
# All other children are indented (we only have 0, 1 or 2 children for every exec
# node at the moment)
idx = self.__build_summary_table(
summary, idx, False, indent_level + 1, True, output)
output += first_child_output
return idx
def get_runtime_profile(self, handle):
return self.__do_rpc(lambda: self.imp_service.GetRuntimeProfile(handle))
def execute_query_async(self, query_string, user=None):
"""
Executes a query asynchronously
Issues a query and returns the query handle to the caller for processing.
"""
query = BeeswaxService.Query()
query.query = query_string
query.hadoop_user = user if user is not None else getpass.getuser()
query.configuration = self.__options_to_string_list()
return self.__do_rpc(lambda: self.imp_service.query(query,))
def __execute_query(self, query_string, user=None):
"""Executes a query and waits for completion"""
handle = self.execute_query_async(query_string, user=user)
# Wait for the query to finish execution.
self.wait_for_completion(handle)
return handle
def cancel_query(self, query_id):
return self.__do_rpc(lambda: self.imp_service.Cancel(query_id))
def close_query(self, handle):
self.__do_rpc(lambda: self.imp_service.close(handle))
def wait_for_completion(self, query_handle):
"""Given a query handle, polls the coordinator waiting for the query to complete"""
while True:
query_state = self.get_state(query_handle)
# if the rpc succeeded, the output is the query state
if query_state == self.query_states["FINISHED"]:
break
elif query_state == self.query_states["EXCEPTION"]:
try:
error_log = self.__do_rpc(
lambda: self.imp_service.get_log(query_handle.log_context))
raise ImpalaBeeswaxException("Query aborted:" + error_log, None)
finally:
self.close_query(query_handle)
time.sleep(0.05)
def get_default_configuration(self):
return self.__do_rpc(lambda: self.imp_service.get_default_configuration(False))
def get_state(self, query_handle):
return self.__do_rpc(lambda: self.imp_service.get_state(query_handle))
def get_log(self, query_handle):
return self.__do_rpc(lambda: self.imp_service.get_log(query_handle))
def refresh(self):
"""Invalidate the Impalad catalog"""
return self.execute("invalidate metadata")
def refresh_table(self, db_name, table_name):
"""Refresh a specific table from the catalog"""
return self.execute("refresh %s.%s" % (db_name, table_name))
def fetch_results(self, query_string, query_handle, max_rows = -1):
"""Fetches query results given a handle and query type (insert, use, other)"""
query_type = self.__get_query_type(query_string)
if query_type == 'use':
# TODO: "use <database>" does not currently throw an error. Need to update this
# to handle the error case once that behavior has been changed.
return ImpalaBeeswaxResult(query=query_string, success=True, data=[''])
# Result fetching for insert is different from other queries.
exec_result = None
if query_type == 'insert':
exec_result = self.__fetch_insert_results(query_handle)
else:
exec_result = self.__fetch_results(query_handle, max_rows)
exec_result.query = query_string
return exec_result
def __fetch_results(self, handle, max_rows = -1):
"""Handles query results, returns a ImpalaBeeswaxResult object"""
schema = self.__do_rpc(lambda: self.imp_service.get_results_metadata(handle)).schema
# The query has finished, we can fetch the results
result_rows = []
while len(result_rows) < max_rows or max_rows < 0:
fetch_rows = -1 if max_rows < 0 else max_rows - len(result_rows)
results = self.__do_rpc(lambda: self.imp_service.fetch(handle, False, fetch_rows))
result_rows.extend(results.data)
if not results.has_more:
break
# The query executed successfully and all the data was fetched.
exec_result = ImpalaBeeswaxResult(success=True, data=result_rows, schema=schema)
exec_result.summary = 'Returned %d rows' % (len(result_rows))
return exec_result
def __fetch_insert_results(self, handle):
"""Executes an insert query"""
result = self.__do_rpc(lambda: self.imp_service.CloseInsert(handle))
# The insert was successful
num_rows = sum(map(int, result.rows_appended.values()))
data = ["%s: %s" % row for row in result.rows_appended.iteritems()]
exec_result = ImpalaBeeswaxResult(success=True, data=data)
exec_result.summary = "Inserted %d rows" % (num_rows,)
return exec_result
def __get_query_type(self, query_string):
# Set posix=True and add "'" to escaped quotes
# to deal with escaped quotes in string literals
lexer = shlex.shlex(query_string.lstrip(), posix=True)
lexer.escapedquotes += "'"
tokens = list(lexer)
# Do not classify explain queries as 'insert'
if (tokens[0].lower() == "explain"):
return tokens[0].lower()
# Because the WITH clause may precede INSERT or SELECT queries,
# just checking the first token is insufficient.
if filter(self.INSERT_REGEX.match, tokens):
return "insert"
return tokens[0].lower()
def __build_error_message(self, exception):
"""Construct a meaningful exception string"""
message = str(exception)
if isinstance(exception, BeeswaxService.BeeswaxException):
message = exception.message
return 'INNER EXCEPTION: %s\n MESSAGE: %s' % (exception.__class__, message)
def __do_rpc(self, rpc):
"""Executes the RPC lambda provided with some error checking.
Catches all the relevant exceptions and re throws them wrapped
in a custom exception [ImpalaBeeswaxException].
"""
if not self.connected:
raise ImpalaBeeswaxException("Not connected", None)
try:
return rpc()
except BeeswaxService.BeeswaxException, b:
raise ImpalaBeeswaxException(self.__build_error_message(b), b)
except TTransportException, e:
self.connected = False
raise ImpalaBeeswaxException(self.__build_error_message(e), e)
except TApplicationException, t:
raise ImpalaBeeswaxException(self.__build_error_message(t), t)
except Exception, u:
raise ImpalaBeeswaxException(self.__build_error_message(u), u)
|
|
# Copyright (c) 2014 Alex Meade. All rights reserved.
# Copyright (c) 2014 Clinton Knight. All rights reserved.
# Copyright (c) 2015 Tom Barron. All rights reserved.
# Copyright (c) 2015 Goutham Pacha Ravi. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Mock unit tests for the NetApp block storage 7-mode library
"""
import ddt
from lxml import etree
import mock
from cinder import exception
from cinder import test
import cinder.tests.unit.volume.drivers.netapp.dataontap.client.fakes \
as client_fakes
import cinder.tests.unit.volume.drivers.netapp.dataontap.fakes as fake
import cinder.tests.unit.volume.drivers.netapp.fakes as na_fakes
from cinder.volume.drivers.netapp.dataontap import block_7mode
from cinder.volume.drivers.netapp.dataontap import block_base
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
from cinder.volume.drivers.netapp.dataontap.client import client_base
from cinder.volume.drivers.netapp import utils as na_utils
@ddt.ddt
class NetAppBlockStorage7modeLibraryTestCase(test.TestCase):
"""Test case for NetApp's 7-Mode iSCSI library."""
def setUp(self):
super(NetAppBlockStorage7modeLibraryTestCase, self).setUp()
kwargs = {'configuration': self.get_config_7mode()}
self.library = block_7mode.NetAppBlockStorage7modeLibrary(
'driver', 'protocol', **kwargs)
self.library.zapi_client = mock.Mock()
self.zapi_client = self.library.zapi_client
self.library.vfiler = mock.Mock()
# Deprecated option
self.library.configuration.netapp_volume_list = None
def tearDown(self):
super(NetAppBlockStorage7modeLibraryTestCase, self).tearDown()
def get_config_7mode(self):
config = na_fakes.create_configuration_7mode()
config.netapp_storage_protocol = 'iscsi'
config.netapp_login = 'admin'
config.netapp_password = 'pass'
config.netapp_server_hostname = '127.0.0.1'
config.netapp_transport_type = 'http'
config.netapp_server_port = '80'
return config
@mock.patch.object(client_base.Client, 'get_ontapi_version',
mock.MagicMock(return_value=(1, 20)))
@mock.patch.object(block_7mode.NetAppBlockStorage7modeLibrary,
'_get_root_volume_name')
@mock.patch.object(block_7mode.NetAppBlockStorage7modeLibrary,
'_do_partner_setup')
@mock.patch.object(block_base.NetAppBlockStorageLibrary, 'do_setup')
def test_do_setup(self, super_do_setup, mock_do_partner_setup,
mock_get_root_volume_name):
mock_get_root_volume_name.return_value = 'vol0'
context = mock.Mock()
self.library.do_setup(context)
super_do_setup.assert_called_once_with(context)
mock_do_partner_setup.assert_called_once_with()
mock_get_root_volume_name.assert_called_once_with()
@mock.patch.object(client_base.Client, 'get_ontapi_version',
mock.MagicMock(return_value=(1, 20)))
def test_do_partner_setup(self):
self.library.configuration.netapp_partner_backend_name = 'partner'
self.library._do_partner_setup()
self.assertIsNotNone(self.library.partner_zapi_client)
@mock.patch.object(client_base.Client, 'get_ontapi_version',
mock.MagicMock(return_value=(1, 20)))
def test_do_partner_setup_no_partner(self):
self.library._do_partner_setup()
self.assertFalse(hasattr(self.library, 'partner_zapi_client'))
@mock.patch.object(
block_base.NetAppBlockStorageLibrary, 'check_for_setup_error')
def test_check_for_setup_error(self, super_check_for_setup_error):
self.zapi_client.get_ontapi_version.return_value = (1, 9)
self.mock_object(self.library, '_refresh_volume_info')
self.library.volume_list = ['open1', 'open2']
self.library.check_for_setup_error()
super_check_for_setup_error.assert_called_once_with()
def test_check_for_setup_error_no_filtered_pools(self):
self.zapi_client.get_ontapi_version.return_value = (1, 9)
self.mock_object(self.library, '_refresh_volume_info')
self.library.volume_list = []
self.assertRaises(exception.NetAppDriverException,
self.library.check_for_setup_error)
def test_check_for_setup_error_too_old(self):
self.zapi_client.get_ontapi_version.return_value = (1, 8)
self.assertRaises(exception.VolumeBackendAPIException,
self.library.check_for_setup_error)
def test_find_mapped_lun_igroup(self):
response = netapp_api.NaElement(etree.XML("""
<results status="passed">
<initiator-groups>
<initiator-group-info>
<initiator-group-name>%(initiator-group-name)s</initiator-group-name>
<initiator-group-type>%(initiator-group-type)s</initiator-group-type>
<initiator-group-uuid>1477ee47-0e1f-4b35-a82c-dcca0b76fc44
</initiator-group-uuid>
<initiator-group-os-type>linux</initiator-group-os-type>
<initiator-group-throttle-reserve>0</initiator-group-throttle-reserve>
<initiator-group-throttle-borrow>false
</initiator-group-throttle-borrow>
<initiator-group-vsa-enabled>false</initiator-group-vsa-enabled>
<initiator-group-alua-enabled>true</initiator-group-alua-enabled>
<initiator-group-report-scsi-name-enabled>true
</initiator-group-report-scsi-name-enabled>
<initiator-group-use-partner>true</initiator-group-use-partner>
<initiators>
<initiator-info>
<initiator-name>21:00:00:24:ff:40:6c:c3</initiator-name>
</initiator-info>
<initiator-info>
<initiator-name>21:00:00:24:ff:40:6c:c2</initiator-name>
<initiator-alias-info>
<initiator-alias>Centos</initiator-alias>
</initiator-alias-info>
</initiator-info>
</initiators>
<lun-id>2</lun-id>
</initiator-group-info>
</initiator-groups>
</results>""" % fake.IGROUP1))
initiators = fake.FC_FORMATTED_INITIATORS
self.zapi_client.get_lun_map.return_value = response
(igroup, lun_id) = self.library._find_mapped_lun_igroup('path',
initiators)
self.assertEqual(fake.IGROUP1_NAME, igroup)
self.assertEqual('2', lun_id)
def test_find_mapped_lun_igroup_initiator_mismatch(self):
response = netapp_api.NaElement(etree.XML("""
<results status="passed">
<initiator-groups>
<initiator-group-info>
<initiator-group-name>openstack-igroup1</initiator-group-name>
<initiator-group-type>fcp</initiator-group-type>
<initiator-group-uuid>1477ee47-0e1f-4b35-a82c-dcca0b76fc44
</initiator-group-uuid>
<initiator-group-os-type>linux</initiator-group-os-type>
<initiator-group-throttle-reserve>0</initiator-group-throttle-reserve>
<initiator-group-throttle-borrow>false
</initiator-group-throttle-borrow>
<initiator-group-vsa-enabled>false</initiator-group-vsa-enabled>
<initiator-group-alua-enabled>true</initiator-group-alua-enabled>
<initiator-group-report-scsi-name-enabled>true
</initiator-group-report-scsi-name-enabled>
<initiator-group-use-partner>true</initiator-group-use-partner>
<initiators>
<initiator-info>
<initiator-name>21:00:00:24:ff:40:6c:c3</initiator-name>
</initiator-info>
</initiators>
<lun-id>2</lun-id>
</initiator-group-info>
</initiator-groups>
</results>"""))
initiators = fake.FC_FORMATTED_INITIATORS
self.zapi_client.get_lun_map.return_value = response
(igroup, lun_id) = self.library._find_mapped_lun_igroup('path',
initiators)
self.assertIsNone(igroup)
self.assertIsNone(lun_id)
def test_find_mapped_lun_igroup_no_igroups(self):
response = netapp_api.NaElement(etree.XML("""
<results status="passed">
<initiator-groups />
</results>"""))
initiators = fake.FC_FORMATTED_INITIATORS
self.zapi_client.get_lun_map.return_value = response
(igroup, lun_id) = self.library._find_mapped_lun_igroup('path',
initiators)
self.assertIsNone(igroup)
self.assertIsNone(lun_id)
def test_find_mapped_lun_igroup_raises(self):
self.zapi_client.get_lun_map.side_effect = netapp_api.NaApiError
initiators = fake.FC_FORMATTED_INITIATORS
self.assertRaises(netapp_api.NaApiError,
self.library._find_mapped_lun_igroup,
'path',
initiators)
def test_has_luns_mapped_to_initiators_local_map(self):
initiator_list = fake.FC_FORMATTED_INITIATORS
self.zapi_client.has_luns_mapped_to_initiators.return_value = True
self.library.partner_zapi_client = mock.Mock()
result = self.library._has_luns_mapped_to_initiators(initiator_list)
self.assertTrue(result)
self.zapi_client.has_luns_mapped_to_initiators.assert_called_once_with(
initiator_list)
self.assertEqual(0, self.library.partner_zapi_client.
has_luns_mapped_to_initiators.call_count)
def test_has_luns_mapped_to_initiators_partner_map(self):
initiator_list = fake.FC_FORMATTED_INITIATORS
self.zapi_client.has_luns_mapped_to_initiators.return_value = False
self.library.partner_zapi_client = mock.Mock()
self.library.partner_zapi_client.has_luns_mapped_to_initiators.\
return_value = True
result = self.library._has_luns_mapped_to_initiators(initiator_list)
self.assertTrue(result)
self.zapi_client.has_luns_mapped_to_initiators.assert_called_once_with(
initiator_list)
self.library.partner_zapi_client.has_luns_mapped_to_initiators.\
assert_called_with(initiator_list)
def test_has_luns_mapped_to_initiators_no_maps(self):
initiator_list = fake.FC_FORMATTED_INITIATORS
self.zapi_client.has_luns_mapped_to_initiators.return_value = False
self.library.partner_zapi_client = mock.Mock()
self.library.partner_zapi_client.has_luns_mapped_to_initiators.\
return_value = False
result = self.library._has_luns_mapped_to_initiators(initiator_list)
self.assertFalse(result)
self.zapi_client.has_luns_mapped_to_initiators.assert_called_once_with(
initiator_list)
self.library.partner_zapi_client.has_luns_mapped_to_initiators.\
assert_called_with(initiator_list)
def test_has_luns_mapped_to_initiators_no_partner(self):
initiator_list = fake.FC_FORMATTED_INITIATORS
self.zapi_client.has_luns_mapped_to_initiators.return_value = False
self.library.partner_zapi_client = mock.Mock()
self.library.partner_zapi_client.has_luns_mapped_to_initiators.\
return_value = True
result = self.library._has_luns_mapped_to_initiators(
initiator_list, include_partner=False)
self.assertFalse(result)
self.zapi_client.has_luns_mapped_to_initiators.assert_called_once_with(
initiator_list)
self.assertEqual(0, self.library.partner_zapi_client.
has_luns_mapped_to_initiators.call_count)
def test_clone_lun_zero_block_count(self):
"""Test for when clone lun is not passed a block count."""
self.library._get_lun_attr = mock.Mock(return_value={
'Volume': 'fakeLUN', 'Path': '/vol/fake/fakeLUN'})
self.library.zapi_client = mock.Mock()
self.library.zapi_client.get_lun_by_args.return_value = [fake.FAKE_LUN]
self.library._add_lun_to_table = mock.Mock()
self.library._clone_lun('fakeLUN', 'newFakeLUN', 'false')
self.library.zapi_client.clone_lun.assert_called_once_with(
'/vol/fake/fakeLUN', '/vol/fake/newFakeLUN', 'fakeLUN',
'newFakeLUN', 'false', block_count=0, dest_block=0, src_block=0)
def test_clone_lun_no_space_reservation(self):
"""Test for when space_reservation is not passed."""
self.library._get_lun_attr = mock.Mock(return_value={
'Volume': 'fakeLUN', 'Path': '/vol/fake/fakeLUN'})
self.library.lun_space_reservation = 'false'
self.library.zapi_client = mock.Mock()
self.library.zapi_client.get_lun_by_args.return_value = [fake.FAKE_LUN]
self.library._add_lun_to_table = mock.Mock()
self.library._clone_lun('fakeLUN', 'newFakeLUN')
self.library.zapi_client.clone_lun.assert_called_once_with(
'/vol/fake/fakeLUN', '/vol/fake/newFakeLUN', 'fakeLUN',
'newFakeLUN', 'false', block_count=0, dest_block=0, src_block=0)
def test_clone_lun_qos_supplied(self):
"""Test for qos supplied in clone lun invocation."""
self.assertRaises(exception.VolumeDriverException,
self.library._clone_lun,
'fakeLUN',
'newFakeLUN',
qos_policy_group_name=fake.QOS_POLICY_GROUP_NAME)
def test_get_fc_target_wwpns(self):
ports1 = [fake.FC_FORMATTED_TARGET_WWPNS[0],
fake.FC_FORMATTED_TARGET_WWPNS[1]]
ports2 = [fake.FC_FORMATTED_TARGET_WWPNS[2],
fake.FC_FORMATTED_TARGET_WWPNS[3]]
self.zapi_client.get_fc_target_wwpns.return_value = ports1
self.library.partner_zapi_client = mock.Mock()
self.library.partner_zapi_client.get_fc_target_wwpns.return_value = \
ports2
result = self.library._get_fc_target_wwpns()
self.assertSetEqual(set(fake.FC_FORMATTED_TARGET_WWPNS), set(result))
def test_get_fc_target_wwpns_no_partner(self):
ports1 = [fake.FC_FORMATTED_TARGET_WWPNS[0],
fake.FC_FORMATTED_TARGET_WWPNS[1]]
ports2 = [fake.FC_FORMATTED_TARGET_WWPNS[2],
fake.FC_FORMATTED_TARGET_WWPNS[3]]
self.zapi_client.get_fc_target_wwpns.return_value = ports1
self.library.partner_zapi_client = mock.Mock()
self.library.partner_zapi_client.get_fc_target_wwpns.return_value = \
ports2
result = self.library._get_fc_target_wwpns(include_partner=False)
self.assertSetEqual(set(ports1), set(result))
@mock.patch.object(block_7mode.NetAppBlockStorage7modeLibrary,
'_refresh_volume_info', mock.Mock())
@mock.patch.object(block_7mode.NetAppBlockStorage7modeLibrary,
'_get_pool_stats', mock.Mock())
def test_vol_stats_calls_provide_ems(self):
self.library.zapi_client.provide_ems = mock.Mock()
self.library.get_volume_stats(refresh=True)
self.assertEqual(1, self.library.zapi_client.provide_ems.call_count)
def test_create_lun(self):
self.library.vol_refresh_voluntary = False
self.library._create_lun(fake.VOLUME_ID, fake.LUN_ID,
fake.LUN_SIZE, fake.LUN_METADATA)
self.library.zapi_client.create_lun.assert_called_once_with(
fake.VOLUME_ID, fake.LUN_ID, fake.LUN_SIZE, fake.LUN_METADATA,
None)
self.assertTrue(self.library.vol_refresh_voluntary)
def test_create_lun_with_qos_policy_group(self):
self.assertRaises(exception.VolumeDriverException,
self.library._create_lun, fake.VOLUME_ID,
fake.LUN_ID, fake.LUN_SIZE, fake.LUN_METADATA,
qos_policy_group_name=fake.QOS_POLICY_GROUP_NAME)
def test_check_volume_type_for_lun_legacy_qos_not_supported(self):
mock_get_volume_type = self.mock_object(na_utils,
'get_volume_type_from_volume')
self.assertRaises(exception.ManageExistingVolumeTypeMismatch,
self.library._check_volume_type_for_lun,
na_fakes.VOLUME, {}, {}, na_fakes.LEGACY_EXTRA_SPECS)
self.assertEqual(0, mock_get_volume_type.call_count)
def test_check_volume_type_for_lun_no_volume_type(self):
mock_get_volume_type = self.mock_object(na_utils,
'get_volume_type_from_volume')
mock_get_volume_type.return_value = None
mock_get_backend_spec = self.mock_object(
na_utils, 'get_backend_qos_spec_from_volume_type')
self.library._check_volume_type_for_lun(na_fakes.VOLUME, {}, {}, None)
self.assertEqual(0, mock_get_backend_spec.call_count)
def test_check_volume_type_for_lun_qos_spec_not_supported(self):
mock_get_volume_type = self.mock_object(na_utils,
'get_volume_type_from_volume')
mock_get_volume_type.return_value = na_fakes.VOLUME_TYPE
mock_get_backend_spec = self.mock_object(
na_utils, 'get_backend_qos_spec_from_volume_type')
mock_get_backend_spec.return_value = na_fakes.QOS_SPEC
self.assertRaises(exception.ManageExistingVolumeTypeMismatch,
self.library._check_volume_type_for_lun,
na_fakes.VOLUME, {}, {}, na_fakes.EXTRA_SPECS)
def test_get_preferred_target_from_list(self):
result = self.library._get_preferred_target_from_list(
fake.ISCSI_TARGET_DETAILS_LIST)
self.assertEqual(fake.ISCSI_TARGET_DETAILS_LIST[0], result)
def test_mark_qos_policy_group_for_deletion(self):
result = self.library._mark_qos_policy_group_for_deletion(
fake.QOS_POLICY_GROUP_INFO)
self.assertIsNone(result)
def test_setup_qos_for_volume(self):
result = self.library._setup_qos_for_volume(fake.VOLUME,
fake.EXTRA_SPECS)
self.assertIsNone(result)
def test_manage_existing_lun_same_name(self):
mock_lun = block_base.NetAppLun('handle', 'name', '1',
{'Path': '/vol/FAKE_CMODE_VOL1/name'})
self.library._get_existing_vol_with_manage_ref = mock.Mock(
return_value=mock_lun)
self.mock_object(na_utils, 'get_volume_extra_specs')
self.mock_object(na_utils, 'log_extra_spec_warnings')
self.library._check_volume_type_for_lun = mock.Mock()
self.library._add_lun_to_table = mock.Mock()
self.zapi_client.move_lun = mock.Mock()
self.library.manage_existing({'name': 'name'}, {'ref': 'ref'})
self.library._get_existing_vol_with_manage_ref.assert_called_once_with(
{'ref': 'ref'})
self.assertEqual(1, self.library._check_volume_type_for_lun.call_count)
self.assertEqual(1, self.library._add_lun_to_table.call_count)
self.assertEqual(0, self.zapi_client.move_lun.call_count)
def test_manage_existing_lun_new_path(self):
mock_lun = block_base.NetAppLun(
'handle', 'name', '1', {'Path': '/vol/FAKE_CMODE_VOL1/name'})
self.library._get_existing_vol_with_manage_ref = mock.Mock(
return_value=mock_lun)
self.mock_object(na_utils, 'get_volume_extra_specs')
self.mock_object(na_utils, 'log_extra_spec_warnings')
self.library._check_volume_type_for_lun = mock.Mock()
self.library._add_lun_to_table = mock.Mock()
self.zapi_client.move_lun = mock.Mock()
self.library.manage_existing({'name': 'volume'}, {'ref': 'ref'})
self.assertEqual(
2, self.library._get_existing_vol_with_manage_ref.call_count)
self.assertEqual(1, self.library._check_volume_type_for_lun.call_count)
self.assertEqual(1, self.library._add_lun_to_table.call_count)
self.zapi_client.move_lun.assert_called_once_with(
'/vol/FAKE_CMODE_VOL1/name', '/vol/FAKE_CMODE_VOL1/volume')
def test_get_pool_stats_no_volumes(self):
self.library.vols = []
result = self.library._get_pool_stats()
self.assertListEqual([], result)
@ddt.data({'netapp_lun_space_reservation': 'enabled'},
{'netapp_lun_space_reservation': 'disabled'})
@ddt.unpack
def test_get_pool_stats(self, netapp_lun_space_reservation):
self.library.volume_list = ['vol0', 'vol1', 'vol2']
self.library.root_volume_name = 'vol0'
self.library.reserved_percentage = 5
self.library.max_over_subscription_ratio = 10.0
self.library.configuration.netapp_lun_space_reservation = (
netapp_lun_space_reservation)
self.library.vols = netapp_api.NaElement(
client_fakes.VOLUME_LIST_INFO_RESPONSE).get_child_by_name(
'volumes').get_children()
thick = netapp_lun_space_reservation == 'enabled'
result = self.library._get_pool_stats()
expected = [{
'pool_name': 'vol1',
'QoS_support': False,
'thin_provisioned_support': not thick,
'thick_provisioned_support': thick,
'provisioned_capacity_gb': 2.94,
'free_capacity_gb': 1339.27,
'total_capacity_gb': 1342.21,
'reserved_percentage': 5,
'max_over_subscription_ratio': 10.0
}]
self.assertEqual(expected, result)
def test_get_filtered_pools_invalid_conf(self):
"""Verify an exception is raised if the regex pattern is invalid."""
self.library.configuration.netapp_pool_name_search_pattern = '(.+'
self.assertRaises(exception.InvalidConfigurationValue,
self.library._get_filtered_pools)
@ddt.data('.*?3$|mix.+', '(.+?[0-9]+) ', '^.+3$', '^[a-z].*?[^4]$')
def test_get_filtered_pools_match_select_pools(self, patterns):
self.library.vols = fake.FAKE_7MODE_VOLUME['all']
self.library.configuration.netapp_pool_name_search_pattern = patterns
filtered_pools = self.library._get_filtered_pools()
self.assertEqual(
fake.FAKE_7MODE_VOLUME['all'][0].get_child_content('name'),
filtered_pools[0]
)
self.assertEqual(
fake.FAKE_7MODE_VOLUME['all'][1].get_child_content('name'),
filtered_pools[1]
)
@ddt.data('', 'mix.+|open.+', '.+', 'open123, mixed3, open1234', '.+')
def test_get_filtered_pools_match_all_pools(self, patterns):
self.library.vols = fake.FAKE_7MODE_VOLUME['all']
self.library.configuration.netapp_pool_name_search_pattern = patterns
filtered_pools = self.library._get_filtered_pools()
self.assertEqual(
fake.FAKE_7MODE_VOLUME['all'][0].get_child_content('name'),
filtered_pools[0]
)
self.assertEqual(
fake.FAKE_7MODE_VOLUME['all'][1].get_child_content('name'),
filtered_pools[1]
)
self.assertEqual(
fake.FAKE_7MODE_VOLUME['all'][2].get_child_content('name'),
filtered_pools[2]
)
@ddt.data('abc|stackopen|openstack|abc.*', 'abc',
'stackopen, openstack, open', '^$')
def test_get_filtered_pools_non_matching_patterns(self, patterns):
self.library.vols = fake.FAKE_7MODE_VOLUME['all']
self.library.configuration.netapp_pool_name_search_pattern = patterns
filtered_pools = self.library._get_filtered_pools()
self.assertListEqual([], filtered_pools)
def test_get_pool_stats_no_ssc_vols(self):
self.library.vols = {}
pools = self.library._get_pool_stats()
self.assertListEqual([], pools)
def test_get_pool_stats_with_filtered_pools(self):
self.library.vols = fake.FAKE_7MODE_VOL1
self.library.volume_list = [
fake.FAKE_7MODE_VOL1[0].get_child_content('name')
]
self.library.root_volume_name = ''
pools = self.library._get_pool_stats()
self.assertListEqual(fake.FAKE_7MODE_POOLS, pools)
def test_get_pool_stats_no_filtered_pools(self):
self.library.vols = fake.FAKE_7MODE_VOL1
self.library.volume_list = ['open1', 'open2']
self.library.root_volume_name = ''
pools = self.library._get_pool_stats()
self.assertListEqual([], pools)
|
|
"""
mf module. Contains the ModflowGlobal, ModflowList, and Modflow classes.
"""
import os
import sys
from ..mbase import BaseModel
from ..modflow import Modflow
from ..version import __version__
class LgrChild():
def __init__(self, ishflg=1, ibflg=59, iucbhsv=0, iucbfsv=0,
mxlgriter=20, ioutlgr=1, relaxh=0.4, relaxf=0.4,
hcloselgr=5e-3, fcloselgr=5e-2,
nplbeg=0, nprbeg=0, npcbeg=0,
nplend=0, nprend=1, npcend=1,
ncpp=2, ncppl=1):
self.ishflg = ishflg
self.ibflg = ibflg
self.iucbhsv = iucbhsv
self.iucbfsv = iucbfsv
self.mxlgriter = mxlgriter
self.ioutlgr = ioutlgr
self.relaxh = relaxh
self.relaxf = relaxf
self.hcloselgr = hcloselgr
self.fcloselgr = fcloselgr
self.nplbeg = nplbeg
self.nprbeg = nprbeg
self.npcbeg = npcbeg
self.nplend = nplend
self.nprend = nprend
self.npcend = npcend
self.ncpp = ncpp
if isinstance(ncppl, int):
self.ncppl = [ncppl]
else:
self.ncppl = ncppl
class ModflowLgr(BaseModel):
"""
MODFLOW-LGR Model Class.
Parameters
----------
modelname : string, optional
Name of model. This string will be used to name the MODFLOW input
that are created with write_model. (the default is 'modflowtest')
namefile_ext : string, optional
Extension for the namefile (the default is 'nam')
version : string, optional
Version of MODFLOW to use (the default is 'mf2005').
exe_name : string, optional
The name of the executable to use (the default is
'mf2005').
listunit : integer, optional
Unit number for the list file (the default is 2).
model_ws : string, optional
model workspace. Directory name to create model data sets.
(default is the present working directory).
external_path : string
Location for external files (default is None).
verbose : boolean, optional
Print additional information to the screen (default is False).
load : boolean, optional
(default is True).
Attributes
----------
Methods
-------
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> lgr = flopy.modflowlgr.ModflowLgr(parent=parent, children=children,
>>> children_data=children_data)
"""
def __init__(self, modelname='modflowlgrtest', namefile_ext='lgr',
version='mflgr', exe_name='mflgr.exe',
iupbhsv=0, iupbfsv=0,
parent=None, children=None, children_data=None, model_ws='.',
external_path=None,
verbose=False, **kwargs):
BaseModel.__init__(self, modelname, namefile_ext, exe_name, model_ws,
structured=True, **kwargs)
self.version_types = {'mflgr': 'MODFLOW-LGR'}
self.set_version(version)
# external option stuff
self.array_free_format = True
self.array_format = 'modflow'
self.verbose = verbose
self.iupbhsv = iupbhsv
self.iupbfsv = iupbfsv
self.parent = parent
if children is not None:
if not isinstance(children, list):
children = [children]
self.children_models = children
if children_data is not None:
if not isinstance(children_data, list):
children_data = [children_data]
self.children_data = children_data
# set the number of grids
self.children = 0
if children is not None:
self.children += len(children)
self.load_fail = False
# the starting external data unit number
self._next_ext_unit = 2000
# convert iupbhsv, iupbhsv, iucbhsv, and iucbfsv units from
# external_files to output_files
ibhsv = self.iupbhsv
ibfsv = self.iupbhsv
if ibhsv > 0:
self.parent.add_output_file(ibhsv)
if ibfsv > 0:
self.parent.add_output_file(ibfsv)
for child, child_data in zip(self.children_models, self.children_data):
ibhsv = child_data.iucbhsv
ibfsv = child_data.iucbfsv
if ibhsv > 0:
child.add_output_file(ibhsv)
if ibfsv > 0:
child.add_output_file(ibfsv)
if external_path is not None:
if os.path.exists(os.path.join(model_ws, external_path)):
print("Note: external_path " + str(external_path) +
" already exists")
else:
os.makedirs(os.path.join(model_ws, external_path))
self.external_path = external_path
self.verbose = verbose
return
def __repr__(self):
return 'MODFLOW-LGR model with {} grids'.format(self.ngrids)
@property
def ngrids(self):
try:
return 1 + self.children
except:
return None
def write_input(self, SelPackList=False, check=False):
"""
Write the input. Overrides BaseModels's write_input
Parameters
----------
SelPackList : False or list of packages
"""
if check:
# run check prior to writing input
pass
if self.verbose:
print('\nWriting packages:')
# write lgr file
self.write_name_file()
# write MODFLOW files for parent model
self.parent.write_input(SelPackList=SelPackList, check=check)
# write MODFLOW files for the children models
for child in self.children_models:
child.write_input(SelPackList=SelPackList, check=check)
def _padline(self, line, comment=None, line_len=79):
if len(line) < line_len:
fmt = '{:' + '{}'.format(line_len) + 's}'
line = fmt.format(line)
if comment is not None:
line += ' # {}\n'.format(comment)
return line
def _get_path(self, bpth, pth, fpth=''):
lpth = os.path.abspath(bpth)
mpth = os.path.abspath(pth)
rpth = os.path.relpath(mpth, lpth)
if rpth == '.':
rpth = fpth
else:
rpth = os.path.join(rpth, fpth)
msg = 'namefiles must be in the same directory as ' + \
'the lgr control file\n'
msg += 'Control file path: {}\n'.format(lpth)
msg += 'Namefile path: {}\n'.format(mpth)
msg += 'Relative path: {}\n'.format(rpth)
raise ValueError(msg)
return rpth
def get_namefiles(self):
'''
Get the namefiles (with path) of the parent and children models
Returns
-------
namefiles : list
Examples
--------
>>> import flopy
>>> lgr = flopy.modflowlgr.ModflowLgr.load(f)
>>> namefiles = lgr.get_namefiles()
'''
pth = os.path.join(self.parent._model_ws, self.parent.namefile)
namefiles = [pth]
for child in self.children_models:
pth = os.path.join(child._model_ws, child.namefile)
namefiles.append(pth)
return namefiles
def write_name_file(self):
"""
Write the modflow-lgr control file.
"""
fn_path = os.path.join(self.model_ws, self.namefile)
f = open(fn_path, 'w')
f.write('{}\n'.format(self.heading))
# dataset 1
line = self._padline('LGR', comment='data set 1')
f.write(line)
# dataset 2
line = '{}'.format(self.ngrids)
line = self._padline(line, comment='data set 2 - ngridsS')
f.write(line)
# dataset 3
pth = self._get_path(self._model_ws, self.parent._model_ws,
fpth=self.parent.namefile)
line = self._padline(pth, comment='data set 3 - parent namefile')
f.write(line)
# dataset 4
line = self._padline('PARENTONLY', comment='data set 4 - gridstatus')
f.write(line)
# dataset 5
line = '{} {}'.format(self.iupbhsv, self.iupbfsv)
line = self._padline(line, comment='data set 5 - iupbhsv, iupbfsv')
f.write(line)
# dataset 6 to 15 for each child
for idx, (child, child_data) in enumerate(zip(self.children_models,
self.children_data)):
# dataset 6
pth = self._get_path(self._model_ws, child._model_ws,
fpth=child.namefile)
comment = 'data set 6 - child {} namefile'.format(idx + 1)
line = self._padline(pth, comment=comment)
f.write(line)
# dataset 7
comment = 'data set 7 - child {} gridstatus'.format(idx + 1)
line = self._padline('CHILDONLY',
comment=comment)
f.write(line)
# dataset 8
line = '{} {} {} {}'.format(child_data.ishflg, child_data.ibflg,
child_data.iucbhsv, child_data.iucbfsv)
comment = 'data set 8 - child {} '.format(idx + 1) + \
'ishflg, ibflg, iucbhsv, iucbfsv'
line = self._padline(line, comment=comment)
f.write(line)
# dataset 9
line = '{} {}'.format(child_data.mxlgriter, child_data.ioutlgr)
comment = 'data set 9 - child {} '.format(idx + 1) + \
'mxlgriter, ioutlgr'
line = self._padline(line, comment=comment)
f.write(line)
# dataset 10
line = '{} {}'.format(child_data.relaxh, child_data.relaxf)
comment = 'data set 10 - child {} '.format(idx + 1) + \
'relaxh, relaxf'
line = self._padline(line, comment=comment)
f.write(line)
# dataset 11
line = '{} {}'.format(child_data.hcloselgr, child_data.fcloselgr)
comment = 'data set 11 - child {} '.format(idx + 1) + \
'hcloselgr, fcloselgr'
line = self._padline(line, comment=comment)
f.write(line)
# dataset 12
line = '{} {} {}'.format(child_data.nplbeg + 1,
child_data.nprbeg + 1,
child_data.npcbeg + 1)
comment = 'data set 12 - child {} '.format(idx + 1) + \
'nplbeg, nprbeg, npcbeg'
line = self._padline(line, comment=comment)
f.write(line)
# dataset 13
line = '{} {} {}'.format(child_data.nplend + 1,
child_data.nprend + 1,
child_data.npcend + 1)
comment = 'data set 13 - child {} '.format(idx + 1) + \
'nplend, nprend, npcend'
line = self._padline(line, comment=comment)
f.write(line)
# dataset 14
line = '{}'.format(child_data.ncpp)
comment = 'data set 14 - child {} '.format(idx + 1) + \
'ncpp'
line = self._padline(line, comment=comment)
f.write(line)
# dataset 15
line = ''
for ndx in child_data.ncppl:
line += '{} '.format(ndx)
comment = 'data set 15 - child {} '.format(idx + 1) + \
'ncppl'
line = self._padline(line, comment=comment)
f.write(line)
# close the lgr control file
f.close()
def change_model_ws(self, new_pth=None, reset_external=False):
"""
Change the model work space.
Parameters
----------
new_pth : str
Location of new model workspace. If this path does not exist,
it will be created. (default is None, which will be assigned to
the present working directory).
Returns
-------
val : list of strings
Can be used to see what packages are in the model, and can then
be used with get_package to pull out individual packages.
"""
if new_pth is None:
new_pth = os.getcwd()
if not os.path.exists(new_pth):
try:
sys.stdout.write(
'\ncreating model workspace...\n {}\n'.format(new_pth))
os.makedirs(new_pth)
except:
line = '\n{} not valid, workspace-folder '.format(new_pth) + \
'was changed to {}\n'.format(os.getcwd())
print(line)
new_pth = os.getcwd()
# --reset the model workspace
old_pth = self._model_ws
self._model_ws = new_pth
line = '\nchanging model workspace...\n {}\n'.format(new_pth)
sys.stdout.write(line)
# reset model_ws for the parent
lpth = os.path.abspath(old_pth)
mpth = os.path.abspath(self.parent._model_ws)
rpth = os.path.relpath(mpth, lpth)
if rpth == '.':
npth = new_pth
else:
npth = os.path.join(new_pth, rpth)
self.parent.change_model_ws(new_pth=npth,
reset_external=reset_external)
# reset model_ws for the children
for child in self.children_models:
lpth = os.path.abspath(old_pth)
mpth = os.path.abspath(child._model_ws)
rpth = os.path.relpath(mpth, lpth)
if rpth == '.':
npth = new_pth
else:
npth = os.path.join(new_pth, rpth)
child.change_model_ws(new_pth=npth,
reset_external=reset_external)
@staticmethod
def load(f, version='mflgr', exe_name='mflgr.exe', verbose=False,
model_ws='.', load_only=None, forgive=True, check=True):
"""
Load an existing model.
Parameters
----------
f : MODFLOW name file
File to load.
model_ws : model workspace path
load_only : (optional) filetype(s) to load (e.g. ["bas6", "lpf"])
forgive : flag to raise exception(s) on package load failure - good for debugging
check : boolean
Check model input for common errors. (default True)
Returns
-------
ml : Modflow object
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow.load(f)
"""
# test if name file is passed with extension (i.e., is a valid file)
if os.path.isfile(os.path.join(model_ws, f)):
modelname = f.rpartition('.')[0]
else:
modelname = f
if not hasattr(f, 'read'):
filename = os.path.join(model_ws, f)
f = open(filename, 'r')
# dataset 0 -- header
header = ''
while True:
line = f.readline()
if line[0] != '#':
break
header += line.strip()
# dataset 1
ds1 = line.split()[0].lower()
msg = 'LGR must be entered as the first item in dataset 1\n'
msg += ' {}\n'.format(header)
assert ds1 == 'lgr', msg
# dataset 2
line = f.readline()
t = line.split()
ngrids = int(t[0])
nchildren = ngrids - 1
# dataset 3
line = f.readline()
t = line.split()
namefile = t[0]
pws = os.path.join(model_ws, os.path.dirname(namefile))
pn = os.path.basename(namefile)
# dataset 4
line = f.readline()
t = line.split()
gridstatus = t[0].lower()
msg = "GRIDSTATUS for the parent must be 'PARENTONLY'"
assert gridstatus == 'parentonly', msg
# dataset 5
line = f.readline()
t = line.split()
try:
iupbhsv, iupbfsv = int(t[0]), int(t[1])
except:
msg = 'could not read dataset 5 - IUPBHSV and IUPBFSV.'
raise ValueError(msg)
# non-zero values for IUPBHSV and IUPBFSV in dataset 5 are not
# supported
if iupbhsv + iupbfsv > 0:
msg = 'nonzero values for IUPBHSV () '.format(iupbhsv) + \
'and IUPBFSV ({}) '.format(iupbfsv) + \
'are not supported.'
raise ValueError(msg)
# load the parent model
parent = Modflow.load(pn, verbose=verbose, model_ws=pws,
load_only=load_only, forgive=forgive,
check=check)
children_data = []
children = []
for child in range(nchildren):
# dataset 6
line = f.readline()
t = line.split()
namefile = t[0]
cws = os.path.join(model_ws, os.path.dirname(namefile))
cn = os.path.basename(namefile)
# dataset 7
line = f.readline()
t = line.split()
gridstatus = t[0].lower()
msg = "GRIDSTATUS for the parent must be 'CHILDONLY'"
assert gridstatus == 'childonly', msg
# dataset 8
line = f.readline()
t = line.split()
ishflg, ibflg, iucbhsv, iucbfsv = int(t[0]), int(t[1]), int(
t[2]), int(t[3])
# dataset 9
line = f.readline()
t = line.split()
mxlgriter, ioutlgr = int(t[0]), int(t[1])
# dataset 10
line = f.readline()
t = line.split()
relaxh, relaxf = float(t[0]), float(t[1])
# dataset 11
line = f.readline()
t = line.split()
hcloselgr, fcloselgr = float(t[0]), float(t[1])
# dataset 12
line = f.readline()
t = line.split()
nplbeg, nprbeg, npcbeg = int(t[0]) - 1, int(t[1]) - 1, int(
t[2]) - 1
# dataset 13
line = f.readline()
t = line.split()
nplend, nprend, npcend = int(t[0]) - 1, int(t[1]) - 1, int(
t[2]) - 1
# dataset 14
line = f.readline()
t = line.split()
ncpp = int(t[0])
# dataset 15
line = f.readline()
t = line.split()
ncppl = []
for idx in range(nplend + 1 - nplbeg):
ncppl.append(int(t[idx]))
# build child data object
children_data.append(LgrChild(ishflg=ishflg, ibflg=ibflg,
iucbhsv=iucbhsv, iucbfsv=iucbfsv,
mxlgriter=mxlgriter, ioutlgr=ioutlgr,
relaxh=relaxh, relaxf=relaxf,
hcloselgr=hcloselgr,
fcloselgr=fcloselgr,
nplbeg=nplbeg, nprbeg=nprbeg,
npcbeg=npcbeg,
nplend=nplend, nprend=nprend,
npcend=npcend,
ncpp=ncpp, ncppl=ncppl))
# load child model
children.append(Modflow.load(cn, verbose=verbose, model_ws=cws,
load_only=load_only, forgive=forgive,
check=check))
lgr = ModflowLgr(version=version, exe_name=exe_name,
modelname=modelname, model_ws=model_ws,
verbose=verbose,
iupbhsv=iupbhsv, iupbfsv=iupbfsv,
parent=parent,
children=children, children_data=children_data)
# return model object
return lgr
|
|
"""
Sliding-window-based job/task queue class (& example of use.)
May use ``multiprocessing.Process`` or ``threading.Thread`` objects as queue
items, though within Fabric itself only ``Process`` objects are used/supported.
"""
import time
try:
import Queue
except ImportError:
import queue as Queue
from multiprocessing import Process
from fabric.network import ssh
from fabric.context_managers import settings
class JobQueue(object):
"""
The goal of this class is to make a queue of processes to run, and go
through them running X number at any given time.
So if the bubble is 5 start with 5 running and move the bubble of running
procs along the queue looking something like this:
Start
...........................
[~~~~~]....................
___[~~~~~].................
_________[~~~~~]...........
__________________[~~~~~]..
____________________[~~~~~]
___________________________
End
"""
def __init__(self, max_running, comms_queue):
"""
Setup the class to resonable defaults.
"""
self._queued = []
self._running = []
self._completed = []
self._num_of_jobs = 0
self._max = max_running
self._comms_queue = comms_queue
self._finished = False
self._closed = False
self._debug = False
def _all_alive(self):
"""
Simply states if all procs are alive or not. Needed to determine when
to stop looping, and pop dead procs off and add live ones.
"""
if self._running:
return all([x.is_alive() for x in self._running])
else:
return False
def __len__(self):
"""
Just going to use number of jobs as the JobQueue length.
"""
return self._num_of_jobs
def close(self):
"""
A sanity check, so that the need to care about new jobs being added in
the last throws of the job_queue's run are negated.
"""
if self._debug:
print("job queue closed.")
self._closed = True
def append(self, process):
"""
Add the Process() to the queue, so that later it can be checked up on.
That is if the JobQueue is still open.
If the queue is closed, this will just silently do nothing.
To get data back out of this process, give ``process`` access to a
``multiprocessing.Queue`` object, and give it here as ``queue``. Then
``JobQueue.run`` will include the queue's contents in its return value.
"""
if not self._closed:
self._queued.append(process)
self._num_of_jobs += 1
if self._debug:
print("job queue appended %s." % process.name)
def run(self):
"""
This is the workhorse. It will take the intial jobs from the _queue,
start them, add them to _running, and then go into the main running
loop.
This loop will check for done procs, if found, move them out of
_running into _completed. It also checks for a _running queue with open
spots, which it will then fill as discovered.
To end the loop, there have to be no running procs, and no more procs
to be run in the queue.
This function returns an iterable of all its children's exit codes.
"""
def _advance_the_queue():
"""
Helper function to do the job of poping a new proc off the queue
start it, then add it to the running queue. This will eventually
depleate the _queue, which is a condition of stopping the running
while loop.
It also sets the env.host_string from the job.name, so that fabric
knows that this is the host to be making connections on.
"""
job = self._queued.pop()
if self._debug:
print("Popping '%s' off the queue and starting it" % job.name)
with settings(clean_revert=True, host_string=job.name, host=job.name):
job.start()
self._running.append(job)
# Prep return value so we can start filling it during main loop
results = {}
for job in self._queued:
results[job.name] = dict.fromkeys(('exit_code', 'results'))
if not self._closed:
raise Exception("Need to close() before starting.")
if self._debug:
print("Job queue starting.")
while len(self._running) < self._max:
_advance_the_queue()
# Main loop!
while not self._finished:
while len(self._running) < self._max and self._queued:
_advance_the_queue()
if not self._all_alive():
for id, job in enumerate(self._running):
if not job.is_alive():
if self._debug:
print("Job queue found finished proc: %s." %
job.name)
done = self._running.pop(id)
self._completed.append(done)
if self._debug:
print("Job queue has %d running." % len(self._running))
if not (self._queued or self._running):
if self._debug:
print("Job queue finished.")
for job in self._completed:
job.join()
self._finished = True
# Each loop pass, try pulling results off the queue to keep its
# size down. At this point, we don't actually care if any results
# have arrived yet; they will be picked up after the main loop.
self._fill_results(results)
time.sleep(ssh.io_sleep)
# Consume anything left in the results queue. Note that there is no
# need to block here, as the main loop ensures that all workers will
# already have finished.
self._fill_results(results)
# Attach exit codes now that we're all done & have joined all jobs
for job in self._completed:
if isinstance(job, Process):
results[job.name]['exit_code'] = job.exitcode
return results
def _fill_results(self, results):
"""
Attempt to pull data off self._comms_queue and add to 'results' dict.
If no data is available (i.e. the queue is empty), bail immediately.
"""
while True:
try:
datum = self._comms_queue.get_nowait()
results[datum['name']]['results'] = datum['result']
except Queue.Empty:
break
#### Sample
def try_using(parallel_type):
"""
This will run the queue through it's paces, and show a simple way of using
the job queue.
"""
def print_number(number):
"""
Simple function to give a simple task to execute.
"""
print(number)
if parallel_type == "multiprocessing":
from multiprocessing import Process as Bucket
elif parallel_type == "threading":
from threading import Thread as Bucket
# Make a job_queue with a bubble of len 5, and have it print verbosely
queue = Queue.Queue()
jobs = JobQueue(5, queue)
jobs._debug = True
# Add 20 procs onto the stack
for x in range(20):
jobs.append(Bucket(
target=print_number,
args=[x],
kwargs={},
))
# Close up the queue and then start it's execution
jobs.close()
jobs.run()
if __name__ == '__main__':
try_using("multiprocessing")
try_using("threading")
|
|
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import collections
import json
import os
import re
import shutil
from datetime import datetime
from pathlib import Path
from pystache import Renderer
from pants.backend.docgen.tasks.generate_pants_reference import GeneratePantsReference
from pants.backend.docgen.tasks.markdown_to_html import MarkdownToHtml
from pants.base.exceptions import TaskError
from pants.task.task import Task
"""Static Site Generator for the Pants Build documentation site.
Suggested use:
cd pants
./build-support/bin/publish_docs.sh # invokes sitegen.py
"""
def beautiful_soup(*args, **kwargs):
"""Indirection function so we can lazy-import bs4.
It's an expensive import that invokes re.compile a lot, so we don't want to incur that cost
unless we must.
"""
import bs4
return bs4.BeautifulSoup(*args, **kwargs)
class SiteGen(Task):
"""Generate the Pants static web site."""
@classmethod
def register_options(cls, register):
super().register_options(register)
register("--config-path", type=list, help="Path to .json file describing site structure.")
# TODO: requiring these products ensures that the markdown and reference tasks run before this
# one, but we don't use those products.
@classmethod
def prepare(cls, options, round_manager):
round_manager.require(MarkdownToHtml.MARKDOWN_HTML_PRODUCT)
round_manager.require_data(GeneratePantsReference.PANTS_REFERENCE_PRODUCT)
round_manager.require_data(GeneratePantsReference.BUILD_DICTIONARY_PRODUCT)
def execute(self):
if not self.get_options().config_path:
raise TaskError(
"The config_path option must be specified, e.g., with the --config-path flag"
)
for config_path in self.get_options().config_path:
config = load_config(config_path)
soups = load_soups(config)
precomputed = precompute(config, soups)
transform_soups(config, soups, precomputed)
template = load_template(config)
write_en_pages(config, soups, precomputed, template)
copy_extras(config)
def load_config(json_path):
"""Load config info from a .json file and return it."""
with Path(json_path).open() as f:
config = json.load(f)
# sanity-test the config:
assert config["tree"][0]["page"] == "index"
return config
def load_soups(config):
"""Generate BeautifulSoup AST for each page listed in config."""
return {
page: beautiful_soup(Path(path).read_text(), features="html.parser")
for page, path in config["sources"].items()
}
class Precomputed:
"""Info we compute (and preserve) before we mutate things."""
def __init__(self, page, pantsref):
"""
:param page: dictionary of per-page precomputed info
:param pantsref: dictionary of pantsrefs {'foo': 'path/to/page.html#fooref', ...}
"""
self.page = page
self.pantsref = pantsref
class PrecomputedPageInfo:
"""Info we compute (and preserve) for each page before we mutate things."""
def __init__(self, title, show_toc):
"""
:param title: Page title
:param show_toc: True iff we should show a toc for this page.
"""
self.title = title
self.show_toc = show_toc
self.toc = []
def precompute_pantsrefs(soups):
"""Return links for <a pantsmark="foo"> tags. Mutates soups to give needed ids.
If we see <a pantsref="foo">something</a>, that's a link whose destination is
a <a pantsmark="foo"> </a> tag, perhaps on some other tag. To stitch these
together, we scan the docset to find all the pantsmarks. If an pantsmark does not
yet have an id to anchor, we give it one.
Return value dictionary maps pantsrefs to locations:
{ "foo": "path/to/foo.html#fooref", "bar": "other/page.html#barref", ...}
"""
accumulator = {}
for (page, soup) in soups.items():
existing_anchors = find_existing_anchors(soup)
count = 100
for tag in soup.find_all("a"):
if not tag.has_attr("pantsmark"):
continue
pantsmark = tag["pantsmark"]
if pantsmark in accumulator:
raise TaskError(
f'pantsmarks are unique but "{pantsmark}" appears in {page} and {accumulator[pantsmark]}'
)
# To link to a place "mid-page", we need an HTML anchor.
# If this tag already has such an anchor, use it.
# Else, make one up.
anchor = tag.get("id") or tag.get("name")
if not anchor:
anchor = pantsmark
while anchor in existing_anchors:
count += 1
anchor = f"{pantsmark}_{count}"
tag["id"] = anchor
existing_anchors = find_existing_anchors(soup)
link = f"{page}.html#{anchor}"
accumulator[pantsmark] = link
return accumulator
def precompute(config, soups):
"""Return info we want to compute (and preserve) before we mutate things."""
show_toc = config.get("show_toc", {})
page = {}
pantsrefs = precompute_pantsrefs(soups)
for p, soup in soups.items():
title = get_title(soup) or p
page[p] = PrecomputedPageInfo(title=title, show_toc=show_toc.get(p, True))
return Precomputed(page=page, pantsref=pantsrefs)
def fixup_internal_links(config, soups):
"""Find href="..." links that link to pages in our docset; fix them up.
We don't preserve relative paths between files as we copy-transform them from source to dest. So
adjust the paths to work with new locations.
"""
# Pages can come from different dirs; they can go to different dirs.
# Thus, there's some relative-path-computing here.
reverse_directory = {}
for d, s in config["sources"].items():
reverse_directory[s] = d
for name, soup in soups.items():
old_src_dir = os.path.dirname(config["sources"][name])
for tag in soup.find_all(True):
if "href" not in tag.attrs:
continue
old_rel_path = tag["href"].split("#")[0]
old_dst = os.path.normpath(os.path.join(old_src_dir, old_rel_path))
if old_dst not in reverse_directory:
continue
new_dst = reverse_directory[old_dst] + ".html"
new_rel_path = rel_href(name, new_dst)
# string replace instead of assign to not loose anchor in foo.html#anchor
tag["href"] = tag["href"].replace(old_rel_path, new_rel_path, 1)
_heading_re = re.compile(r"^h[1-6]$") # match heading tag names h1,h2,h3,...
def rel_href(src: str, dst: str) -> str:
"""For src='foo/bar.html', dst='garply.html#frotz' return relative link
'../garply.html#frotz'."""
src_dir = Path(src).parent
return os.path.relpath(dst, src_dir)
def find_existing_anchors(soup):
"""Return existing ids (and names) from a soup."""
existing_anchors = set()
for tag in soup.find_all(True):
for attr in ["id", "name"]:
if tag.has_attr(attr):
existing_anchors.add(tag.get(attr))
return existing_anchors
def ensure_headings_linkable(soups):
"""foreach soup, foreach h1,h2,etc, if no id=... or name=..., give it one.
Enables tables of contents.
"""
for soup in soups.values():
ensure_page_headings_linkable(soup)
def ensure_page_headings_linkable(soup):
# To avoid re-assigning an existing id, note 'em down.
# Case-insensitve because distinguishing links #Foo and #foo would be weird.
existing_anchors = find_existing_anchors(soup)
count = 100
for tag in soup.find_all(_heading_re):
if not (tag.has_attr("id") or tag.has_attr("name")):
snippet = "".join([c for c in tag.text if c.isalpha()])[:20]
while True:
count += 1
candidate_id = f"heading_{snippet}_{count}".lower()
if candidate_id not in existing_anchors:
existing_anchors.add(candidate_id)
tag["id"] = candidate_id
break
def link_pantsrefs(soups, precomputed):
"""Transorm soups: <a pantsref="foo"> becomes <a href="../foo_page.html#foo">"""
for (page, soup) in soups.items():
for a in soup.find_all("a"):
if not a.has_attr("pantsref"):
continue
pantsref = a["pantsref"]
if pantsref not in precomputed.pantsref:
raise TaskError(
f'Page {page} has pantsref "{pantsref}" and I cannot find pantsmark for it'
)
a["href"] = rel_href(page, precomputed.pantsref[pantsref])
def transform_soups(config, soups, precomputed):
"""Mutate our soups to be better when we write them out later."""
fixup_internal_links(config, soups)
ensure_headings_linkable(soups)
# Do this after ensure_headings_linkable so that there will be links.
generate_page_tocs(soups, precomputed)
link_pantsrefs(soups, precomputed)
def get_title(soup):
"""Given a soup, pick out a title."""
if soup.title:
return soup.title.string
if soup.h1:
return soup.h1.string
return ""
def generate_site_toc(config, precomputed, here):
site_toc = []
def recurse(tree, depth_so_far):
for node in tree:
if "collapsible_heading" in node and "pages" in node:
heading = node["collapsible_heading"]
pages = node["pages"]
links = []
collapse_open = False
for cur_page in pages:
html_filename = f"{cur_page}.html"
page_is_here = cur_page == here
if page_is_here:
link = html_filename
collapse_open = True
else:
link = os.path.relpath(html_filename, os.path.dirname(here))
links.append(
dict(link=link, text=precomputed.page[cur_page].title, here=page_is_here)
)
site_toc.append(
dict(
depth=depth_so_far,
links=links,
dropdown=True,
heading=heading,
id=heading.replace(" ", "-"),
open=collapse_open,
)
)
if "heading" in node:
heading = node["heading"]
site_toc.append(
dict(
depth=depth_so_far,
links=None,
dropdown=False,
heading=heading,
id=heading.replace(" ", "-"),
)
)
if "pages" in node and "collapsible_heading" not in node:
pages = node["pages"]
links = []
for cur_page in pages:
html_filename = f"{cur_page}.html"
page_is_here = cur_page == here
if page_is_here:
link = html_filename
else:
link = os.path.relpath(html_filename, os.path.dirname(here))
links.append(
dict(link=link, text=precomputed.page[cur_page].title, here=page_is_here)
)
site_toc.append(
dict(
depth=depth_so_far,
links=links,
dropdown=False,
heading=None,
id=heading.replace(" ", "-"),
)
)
if "children" in node:
recurse(node["children"], depth_so_far + 1)
if "tree" in config:
recurse(config["tree"], 0)
return site_toc
def hdepth(tag):
"""Compute an h tag's "outline depth".
E.g., h1 at top level is 1, h1 in a section is 2, h2 at top level is 2.
"""
if not _heading_re.search(tag.name):
raise TaskError(f"Can't compute heading depth of non-heading {tag}")
depth = int(tag.name[1], 10) # get the 2 from 'h2'
cursor = tag
while cursor:
if cursor.name == "section":
depth += 1
cursor = cursor.parent
return depth
def generate_page_tocs(soups, precomputed):
for name, soup in soups.items():
if precomputed.page[name].show_toc:
precomputed.page[name].toc = generate_page_toc(soup)
def generate_page_toc(soup):
"""Return page-level (~list of headings) TOC template data for soup."""
# Maybe we don't want to show all the headings. E.g., it's common for a page
# to have just one H1, a title at the top. Our heuristic: if a page has just
# one heading of some outline level, don't show it.
found_depth_counts = collections.defaultdict(int)
for tag in soup.find_all(_heading_re):
if tag.get("id") or tag.get("name"):
found_depth_counts[hdepth(tag)] += 1
depth_list = [i for i in range(100) if 1 < found_depth_counts[i]]
depth_list = depth_list[:4]
toc = []
for tag in soup.find_all(_heading_re):
depth = hdepth(tag)
if depth in depth_list:
toc.append(
dict(
depth=depth_list.index(depth) + 1,
link=tag.get("id") or tag.get("name"),
text=tag.text,
)
)
return toc
def generate_generated(config, here):
return f"{config['sources'][here]} {datetime.now().isoformat()}"
def render_html(dst, config, soups, precomputed, template):
soup = soups[dst]
renderer = Renderer()
title = precomputed.page[dst].title
topdots = "../" * dst.count("/")
body_html = f"{soup.body if soup.body else soup}"
html = renderer.render(
template,
body_html=body_html,
generated=generate_generated(config, dst),
site_toc=generate_site_toc(config, precomputed, dst),
has_page_toc=bool(precomputed.page[dst].toc),
page_path=dst,
page_toc=precomputed.page[dst].toc,
title=title,
topdots=topdots,
)
return html
def write_en_pages(config, soups, precomputed, template):
outdir = config["outdir"]
for dst in soups:
dst_path = Path(outdir, f"{dst}.html")
dst_path.parent.mkdir(parents=True, exist_ok=True)
dst_path.write_text(data=render_html(dst, config, soups, precomputed, template))
def copy_extras(config):
"""copy over "extra" files named in config json: stylesheets, logos, ..."""
outdir = config["outdir"]
for dst, src in config["extras"].items():
dst_path = Path(outdir, dst)
dst_path.parent.mkdir(parents=True, exist_ok=True)
shutil.copy(src, dst_path)
def load_template(config):
"""Return text of template file specified in config."""
return Path(config["template"]).read_text()
|
|
# Copyright (c) 2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import binascii
import json
import sys
import time
from testtools import testcase
from barbican.plugin.util import translations
from barbican.tests import keys
from barbican.tests import utils
from functionaltests.api import base
from functionaltests.api.v1.behaviors import secret_behaviors
from functionaltests.api.v1.models import secret_models
def get_pem_content(pem):
b64_content = translations.get_pem_components(pem)[1]
return base64.b64decode(b64_content)
def get_private_key_req():
return {'name': 'myprivatekey',
'payload_content_type': 'application/octet-stream',
'payload_content_encoding': 'base64',
'algorithm': 'rsa',
'bit_length': 2048,
'secret_type': 'private',
'payload': base64.b64encode(keys.get_private_key_pem())}
def get_public_key_req():
return {'name': 'mypublickey',
'payload_content_type': 'application/octet-stream',
'payload_content_encoding': 'base64',
'algorithm': 'rsa',
'bit_length': 2048,
'secret_type': 'public',
'payload': base64.b64encode(keys.get_public_key_pem())}
def get_certificate_req():
return {'name': 'mycertificate',
'payload_content_type': 'application/octet-stream',
'payload_content_encoding': 'base64',
'algorithm': 'rsa',
'bit_length': 2048,
'secret_type': 'certificate',
'payload': base64.b64encode(keys.get_certificate_pem())}
def get_passphrase_req():
return {'name': 'mypassphrase',
'payload_content_type': 'text/plain',
'secret_type': 'passphrase',
'payload': 'mysecretpassphrase'}
def get_default_data():
return {
"name": "AES key",
"expiration": "2018-02-28T19:14:44.180394",
"algorithm": "aes",
"bit_length": 256,
"mode": "cbc",
"payload": get_default_payload(),
"payload_content_type": "application/octet-stream",
"payload_content_encoding": "base64",
}
def get_default_payload():
return "gF6+lLoF3ohA9aPRpt+6bQ=="
@utils.parameterized_test_case
class SecretsTestCase(base.TestCase):
def setUp(self):
super(SecretsTestCase, self).setUp()
self.behaviors = secret_behaviors.SecretBehaviors(self.client)
# make a local mutable copies of the default data to prevent
# possible data contamination if (when?) the data contains
# any nested dicts.
# TODO(tdink) Move to a config file
self.default_secret_create_data = get_default_data()
self.default_secret_create_all_none_data = {
"name": None,
"expiration": None,
"algorithm": None,
"bit_length": None,
"mode": None,
"payload": None,
"payload_content_type": None,
"payload_content_encoding": None,
}
self.default_secret_create_emptystrings_data = {
"name": '',
"expiration": '',
"algorithm": '',
"bit_length": '',
"mode": '',
"payload": '',
"payload_content_type": '',
"payload_content_encoding": '',
}
self.default_secret_create_two_phase_data = {
"name": "AES key",
"expiration": "2018-02-28T19:14:44.180394",
"algorithm": "aes",
"bit_length": 256,
"mode": "cbc",
}
def tearDown(self):
self.behaviors.delete_all_created_secrets()
super(SecretsTestCase, self).tearDown()
@testcase.attr('negative')
def test_secret_create_with_only_content_type_no_payload(self):
"""Create secret with valid content type but no payload."""
test_model = secret_models.SecretModel(
**self.default_secret_create_all_none_data)
test_model.payload_content_type = 'application/octet-stream'
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(resp.status_code, 400)
@testcase.attr('positive')
def test_secret_create_then_check_content_types(self):
"""Check that set content-type attribute is retained in metadata."""
test_model = secret_models.SecretModel(
**self.default_secret_create_data)
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(resp.status_code, 201)
resp = self.behaviors.get_secret_metadata(secret_ref)
self.assertEqual(resp.status_code, 200)
content_types = resp.model.content_types
self.assertIsNotNone(content_types)
self.assertIn('default', content_types)
self.assertEqual(content_types['default'],
test_model.payload_content_type)
@testcase.attr('positive')
def test_secret_create_all_none(self):
"""Covers case of a POST request with no JSON data."""
test_model = secret_models.SecretModel(
**self.default_secret_create_all_none_data)
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(resp.status_code, 201)
@testcase.attr('negative')
def test_secret_get_secret_doesnt_exist(self):
"""GET a non-existent secret.
Should return a 404.
"""
resp = self.behaviors.get_secret_metadata('not_a_uuid')
self.assertEqual(resp.status_code, 404)
@testcase.attr('positive')
def test_secret_get_payload_no_accept_header(self):
"""GET a secret payload, do not pass in accept header.
Should return a 200.
"""
test_model = secret_models.SecretModel(
**self.default_secret_create_data)
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(resp.status_code, 201)
get_resp = self.behaviors.get_secret(
secret_ref,
payload_content_type='')
self.assertEqual(get_resp.status_code, 200)
self.assertIn(test_model.payload,
binascii.b2a_base64(get_resp.content))
@testcase.attr('negative')
def test_secret_delete_doesnt_exist(self):
"""DELETE a non-existent secret.
Should return a 404.
"""
resp = self.behaviors.delete_secret('not_a_uuid', expected_fail=True)
self.assertEqual(resp.status_code, 404)
@testcase.attr('negative')
def test_secret_get_invalid_mime_type(self):
"""Covers getting a secret with an invalid mime type."""
test_model = secret_models.SecretModel(
**self.default_secret_create_data)
resp, secret_ref = self.behaviors.create_secret(test_model)
resp = self.behaviors.get_secret(secret_ref,
payload_content_type="i/m")
self.assertEqual(resp.status_code, 406)
@testcase.attr('negative')
def test_secret_create_with_expiration_passed(self):
"""Create a secret with an expiration that has already passed.
Should return a 400.
"""
test_model = secret_models.SecretModel(
**self.default_secret_create_data)
test_model.expiration = '2000-01-10T14:58:52.546795'
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(resp.status_code, 400)
@testcase.attr('negative')
def test_secret_create_with_empty_strings(self):
"""Secret create with empty Strings for all attributes.
Should return a 400.
"""
test_model = secret_models.SecretModel(
**self.default_secret_create_emptystrings_data)
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(resp.status_code, 400)
@testcase.attr('negative')
def test_secret_create_with_invalid_content_type(self):
"""Create secret with an invalid content type in HTTP header.
Should return a 415.
"""
test_model = secret_models.SecretModel(
**self.default_secret_create_data)
headers = {"Content-Type": "crypto/boom"}
resp, secret_ref = self.behaviors.create_secret(test_model, headers)
self.assertEqual(resp.status_code, 415)
@testcase.attr('negative')
def test_secret_create_with_oversized_payload(self):
"""Create a secret that is larger than the max payload size.
Should return a 413 if the secret size is greater than the
maximum allowed size.
"""
test_model = secret_models.SecretModel(
**self.default_secret_create_data)
test_model.payload = str(self.oversized_payload)
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(resp.status_code, 413)
@testcase.attr('negative')
def test_secret_put_when_payload_doesnt_exist(self):
"""PUT secret to a non-existent secret.
Should return 404.
"""
resp = self.behaviors.update_secret_payload(
secret_ref='not_a_uuid',
payload_content_type='application/octet-stream',
payload_content_encoding='base64',
payload='testing putting to non-existent secret')
self.assertEqual(resp.status_code, 404)
@testcase.attr('negative')
def test_secret_put_when_payload_already_exists(self):
"""PUT against a secret that already has encrypted data.
Should return 409.
"""
test_model = secret_models.SecretModel(
**self.default_secret_create_data)
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(resp.status_code, 201)
resp = self.behaviors.update_secret_payload(
secret_ref=secret_ref,
payload_content_type='application/octet-stream',
payload_content_encoding='base64',
payload='testing putting data in secret that already has data')
self.assertEqual(resp.status_code, 409)
@testcase.attr('negative')
def test_secret_put_two_phase_empty_payload(self):
"""Covers case of putting empty String to a secret.
Should return 400.
"""
test_model = secret_models.SecretModel(
**self.default_secret_create_two_phase_data)
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(resp.status_code, 201)
put_resp = self.behaviors.update_secret_payload(
secret_ref=secret_ref,
payload_content_type='application/octet-stream',
payload_content_encoding='base64',
payload='')
self.assertEqual(put_resp.status_code, 400)
@testcase.attr('negative')
def test_secret_put_two_phase_invalid_content_type(self):
"""PUT with an invalid content type. Should return 415.
Launchpad bug #1208601
- Updated in Barbican blueprint barbican-enforce-content-type
"""
test_model = secret_models.SecretModel(
**self.default_secret_create_two_phase_data)
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(resp.status_code, 201)
put_resp = self.behaviors.update_secret_payload(
secret_ref=secret_ref,
payload_content_type='crypto/boom',
payload_content_encoding='base64',
payload='invalid content type')
self.assertEqual(put_resp.status_code, 415)
@testcase.attr('negative')
def test_secret_put_two_phase_no_payload(self):
"""Covers case of putting null String to a secret.
Should return 400.
"""
test_model = secret_models.SecretModel(
**self.default_secret_create_two_phase_data)
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(resp.status_code, 201)
put_resp = self.behaviors.update_secret_payload(
secret_ref=secret_ref,
payload_content_type='application/octet-stream',
payload_content_encoding='base64',
payload=None)
self.assertEqual(put_resp.status_code, 400)
@testcase.attr('negative')
def test_secret_put_two_phase_w_oversized_binary_data_not_utf8(self):
"""PUT with an oversized binary string that isn't UTF-8.
Launchpad bug #1315498.
"""
oversized_payload = bytearray().zfill(self.max_payload_size + 1)
# put a value in the middle of the data that does not have a UTF-8
# code point. Using // to be python3-friendly.
oversized_payload[self.max_payload_size // 2] = b'\xb0'
test_model = secret_models.SecretModel(
**self.default_secret_create_two_phase_data)
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(resp.status_code, 201)
put_resp = self.behaviors.update_secret_payload(
secret_ref=secret_ref,
payload_content_type='application/octet-stream',
payload_content_encoding='base64',
payload=str(oversized_payload))
self.assertEqual(put_resp.status_code, 413)
@testcase.attr('negative')
def test_secret_put_two_phase_oversized_payload(self):
"""PUT with oversized payload should return 413.
Covers the case of putting secret data that is larger than the maximum
secret size allowed by Barbican. Beyond that it should return 413.
"""
oversized_payload = self.oversized_payload
test_model = secret_models.SecretModel(
**self.default_secret_create_two_phase_data)
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(resp.status_code, 201)
put_resp = self.behaviors.update_secret_payload(
secret_ref=secret_ref,
payload_content_type='application/octet-stream',
payload_content_encoding='base64',
payload=oversized_payload)
self.assertEqual(put_resp.status_code, 413)
@testcase.attr('positive')
def test_secret_put_two_phase_valid_binary_data_not_utf8(self):
"""A string with binary data that doesn't contain UTF-8 code points.
Launchpad bug #1315498.
"""
# put a value in the data that does not have a UTF-8 code point.
data = b'\xb0'
test_model = secret_models.SecretModel(
**self.default_secret_create_two_phase_data)
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(resp.status_code, 201)
put_resp = self.behaviors.update_secret_payload(
secret_ref=secret_ref,
payload_content_type='application/octet-stream',
payload_content_encoding='base64',
payload=str(data))
self.assertEqual(put_resp.status_code, 204)
@testcase.attr('positive')
def test_secret_put_two_phase_high_range_unicode_character(self):
"""Tests a high-range unicode character on a two-step PUT.
Launchpad bug #1315498
"""
data = u'\U0001F37A'
data = data.encode('utf-8')
test_model = secret_models.SecretModel(
**self.default_secret_create_two_phase_data)
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(resp.status_code, 201)
put_resp = self.behaviors.update_secret_payload(
secret_ref=secret_ref,
payload_content_type='application/octet-stream',
payload_content_encoding='base64',
payload=data)
self.assertEqual(put_resp.status_code, 204)
@testcase.attr('positive')
def test_secret_get_nones_payload_with_a_octet_stream(self):
"""Tests getting a secret with octet-stream."""
test_model = secret_models.SecretModel(
**self.default_secret_create_two_phase_data)
test_model.payload_content_encoding = 'base64'
test_model.payload_content_type = 'application/octet-stream'
test_model.payload = base64.b64encode('abcdef')
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(resp.status_code, 201)
get_resp = self.behaviors.get_secret(
secret_ref,
payload_content_type=test_model.payload_content_type,
payload_content_encoding=test_model.payload_content_encoding)
self.assertEqual(get_resp.status_code, 200)
self.assertIn(test_model.payload,
binascii.b2a_base64(get_resp.content))
@testcase.attr('negative')
def test_secret_create_defaults_bad_content_type_check_message(self):
"""Verifying the returned error message matches the expected form."""
test_model = secret_models.SecretModel(
**self.default_secret_create_data)
test_model.payload_content_type = 'plain-text'
resp, secret_ref = self.behaviors.create_secret(test_model)
# first, ensure that the return code is 400
self.assertEqual(resp.status_code, 400)
resp_dict = json.loads(resp.content)
self.assertIn(
"Provided object does not match schema 'Secret': "
"payload_content_type plain-text is not one of ['text/plain', "
"'text/plain;charset=utf-8', 'text/plain; charset=utf-8', "
"'application/octet-stream'", resp_dict['description'])
self.assertIn("Bad Request", resp_dict['title'])
@testcase.attr('negative')
def test_secret_create_then_expire_then_check(self):
"""Covers case where you try to retrieve a secret that is expired.
This test creates a secret that will soon expire.
After it expires, check it and verify that it is no longer
a valid secret.
"""
# create a secret that expires in 5 seconds
timestamp = utils.create_timestamp_w_tz_and_offset(seconds=5)
test_model = secret_models.SecretModel(
**self.default_secret_create_data)
test_model.expiration = timestamp
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(resp.status_code, 201)
# now get the secret - will be still valid
get_resp = self.behaviors.get_secret_metadata(secret_ref)
self.assertEqual(get_resp.status_code, 200)
# now wait 10 seconds
time.sleep(10)
# now get the secret - should be invalid (expired)
resp = self.behaviors.get_secret_metadata(secret_ref)
self.assertEqual(resp.status_code, 404)
@utils.parameterized_dataset({
'alphanumeric': ['1f34ds'],
'punctuation': ['~!@#$%^&*()_+`-={}[]|:;<>,.?'],
'uuid': ['54262d9d-4bc7-4821-8df0-dc2ca8e112bb'],
'len_255': [base.TestCase.max_sized_field],
'empty': [''],
'null': [None]
})
@testcase.attr('positive')
def test_secret_create_defaults_valid_name(self, name):
"""Covers cases of creating secrets with valid names."""
test_model = secret_models.SecretModel(
**self.default_secret_create_data)
test_model.name = name
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(resp.status_code, 201)
@utils.parameterized_dataset({
'int': [400]
})
@testcase.attr('negative')
def test_secret_create_defaults_invalid_name(self, name):
"""Create secrets with various invalid names.
Should return 400.
"""
test_model = secret_models.SecretModel(
**self.default_secret_create_data)
test_model.name = name
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(resp.status_code, 400)
@utils.parameterized_dataset({
'invalid': ['invalid']
})
@testcase.attr('positive')
def test_secret_create_valid_algorithms(self, algorithm):
"""Creates secrets with various valid algorithms."""
test_model = secret_models.SecretModel(
**self.default_secret_create_data)
test_model.algorithm = algorithm
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(resp.status_code, 201)
@utils.parameterized_dataset({
'int': [400]
})
@testcase.attr('negative')
def test_secret_create_invalid_algorithms(self, algorithm):
"""Creates secrets with various invalid algorithms."""
test_model = secret_models.SecretModel(
**self.default_secret_create_data)
test_model.algorithm = algorithm
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(resp.status_code, 400)
@utils.parameterized_dataset({
'512': [512],
'sixteen': [16],
'fifteen': [15],
'eight': [8],
'seven': [7],
'one': [1],
'none': [None]
})
@testcase.attr('positive')
def test_secret_create_defaults_valid_bit_length(self, bit_length):
"""Covers cases of creating secrets with valid bit lengths."""
test_model = secret_models.SecretModel(
**self.default_secret_create_data)
test_model.bit_length = bit_length
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(resp.status_code, 201)
@utils.parameterized_dataset({
'str_type': ['not-an-int'],
'empty': [''],
'blank': [' '],
'negative_maxint': [-sys.maxint],
'negative_one': [-1],
'zero': [0]
})
@testcase.attr('negative')
def test_secret_create_defaults_invalid_bit_length(self, bit_length):
"""Covers cases of creating secrets with invalid bit lengths."""
test_model = secret_models.SecretModel(
**self.default_secret_create_data)
test_model.bit_length = bit_length
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(resp.status_code, 400)
@utils.parameterized_dataset({
'cbc': ['cbc'],
'unknown_positive': ['unknown']
})
@testcase.attr('positive')
def test_secret_create_defaults_valid_mode(self, mode):
"""Covers cases of creating secrets with valid modes."""
test_model = secret_models.SecretModel(
**self.default_secret_create_data)
test_model.mode = mode
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(resp.status_code, 201)
@utils.parameterized_dataset({
'zero': [0],
'oversized_string': [base.TestCase.oversized_field],
'int': [400]
})
@testcase.attr('negative')
def test_secret_create_defaults_invalid_mode(self, mode):
"""Covers cases of creating secrets with invalid modes."""
test_model = secret_models.SecretModel(
**self.default_secret_create_data)
test_model.mode = mode
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(resp.status_code, 400)
@utils.parameterized_dataset({
'text_content_type_none_encoding': {
'payload_content_type': 'text/plain',
'payload_content_encoding': None},
'utf8_text_content_type_none_encoding': {
'payload_content_type': 'text/plain; charset=utf-8',
'payload_content_encoding': None},
'no_space_utf8_text_content_type_none_encoding': {
'payload_content_type': 'text/plain;charset=utf-8',
'payload_content_encoding': None},
'octet_content_type_base64_encoding': {
'payload_content_type': 'application/octet-stream',
'payload_content_encoding': 'base64'}
})
@testcase.attr('positive')
def test_secret_create_defaults_valid_types_and_encoding(
self, payload_content_type, payload_content_encoding):
"""Creates secrets with various content types and encodings."""
test_model = secret_models.SecretModel(
**self.default_secret_create_data)
test_model.payload_content_type = payload_content_type
test_model.payload_content_encoding = payload_content_encoding
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(resp.status_code, 201)
get_resp = self.behaviors.get_secret(
secret_ref,
payload_content_type=payload_content_type,
payload_content_encoding=payload_content_encoding)
self.assertEqual(get_resp.status_code, 200)
if payload_content_encoding == 'base64':
self.assertIn(test_model.payload,
binascii.b2a_base64(get_resp.content))
else:
self.assertIn(test_model.payload, get_resp.content)
@utils.parameterized_dataset({
'text_content_type_none_encoding': {
'payload_content_type': 'text/plain',
'payload_content_encoding': None},
'utf8_text_content_type_none_encoding': {
'payload_content_type': 'text/plain; charset=utf-8',
'payload_content_encoding': None},
'no_space_utf8_text_content_type_none_encoding': {
'payload_content_type': 'text/plain;charset=utf-8',
'payload_content_encoding': None},
'octet_content_type_base64_encoding': {
'payload_content_type': 'application/octet-stream',
'payload_content_encoding': 'base64'}
})
@testcase.attr('positive', 'deprecated')
def test_secret_create_defaults_valid_types_and_encoding_old_way(
self, payload_content_type, payload_content_encoding):
"""Creates secrets with various content types and encodings."""
test_model = secret_models.SecretModel(
**self.default_secret_create_data)
test_model.payload_content_type = payload_content_type
test_model.payload_content_encoding = payload_content_encoding
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(resp.status_code, 201)
get_resp = self.behaviors.get_secret_based_on_content_type(
secret_ref,
payload_content_type=payload_content_type,
payload_content_encoding=payload_content_encoding)
self.assertEqual(get_resp.status_code, 200)
if payload_content_encoding == 'base64':
self.assertIn(test_model.payload,
binascii.b2a_base64(get_resp.content))
else:
self.assertIn(test_model.payload, get_resp.content)
@utils.parameterized_dataset({
'empty_content_type_and_encoding': {
'payload_content_type': '',
'payload_content_encoding': ''},
'none_content_type_and_encoding': {
'payload_content_type': None,
'payload_content_encoding': None},
'large_string_content_type_and_encoding': {
'payload_content_type': base.TestCase.oversized_field,
'payload_content_encoding': base.TestCase.oversized_field},
'int_content_type_and_encoding': {
'payload_content_type': 123,
'payload_content_encoding': 123},
'none_content_type_base64_content_encoding': {
'payload_content_type': None,
'payload_content_encoding': 'base64'},
'text_content_type_none_content_encoding': {
'payload_content_type': 'text/plain',
'payload_content_encoding': ''},
'text_no_subtype_content_type_none_content_encoding': {
'payload_content_type': 'text',
'payload_content_encoding': None},
'text_slash_no_subtype_content_type_none_content_encoding': {
'payload_content_type': 'text/',
'payload_content_encoding': None},
'text_content_type_empty_content_encoding': {
'payload_content_type': 'text/plain',
'payload_content_encoding': ' '},
'text_content_type_spaces_content_encoding': {
'payload_content_type': 'text/plain',
'payload_content_encoding': ' '},
'text_content_type_base64_content_encoding': {
'payload_content_type': 'text/plain',
'payload_content_encoding': 'base64'},
'text_and_utf88_content_type_none_content_encoding': {
'payload_content_type': 'text/plain; charset=utf-88',
'payload_content_encoding': None},
'invalid_content_type_base64_content_encoding': {
'payload_content_type': 'invalid',
'payload_content_encoding': 'base64'},
'invalid_content_type_none_content_encoding': {
'payload_content_type': 'invalid',
'payload_content_encoding': None},
'octet_content_type_invalid_content_encoding': {
'payload_content_type': 'application/octet-stream',
'payload_content_encoding': 'invalid'},
'text_content_type_invalid_content_encoding': {
'payload_content_type': 'text/plain',
'payload_content_encoding': 'invalid'},
'none_content_type_invalid_content_encoding': {
'payload_content_type': None,
'payload_content_encoding': 'invalid'},
})
@testcase.attr('negative')
def test_secret_create_defaults_invalid_types_and_encoding(
self, payload_content_type, payload_content_encoding):
"""Creating secrets with invalid payload types and encodings."""
test_model = secret_models.SecretModel(
**self.default_secret_create_data)
test_model.payload_content_type = payload_content_type
test_model.payload_content_encoding = payload_content_encoding
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(resp.status_code, 400)
@utils.parameterized_dataset({
'max_payload_string': [base.TestCase.max_sized_payload]
})
@testcase.attr('positive')
def test_secret_create_defaults_valid_payload(self, payload):
"""Create secrets with a various valid payloads."""
test_model = secret_models.SecretModel(
**self.default_secret_create_data)
overrides = {"payload": payload}
test_model.override_values(**overrides)
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(resp.status_code, 201)
@utils.parameterized_dataset({
'empty': [''],
'array': [['boom']],
'int': [123],
'none': [None],
'bad_character': ['\u0080']
})
@testcase.attr('negative')
def test_secret_create_defaults_invalid_payload(self, payload):
"""Covers creating secrets with various invalid payloads."""
test_model = secret_models.SecretModel(
**self.default_secret_create_data)
overrides = {"payload_content_type": "application/octet-stream",
"payload_content_encoding": "base64",
"payload": payload}
test_model.override_values(**overrides)
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(resp.status_code, 400)
@utils.parameterized_dataset({
'negative_five_long_expire': {
'timezone': '-05:00',
'days': 5},
'positive_five_long_expire': {
'timezone': '+05:00',
'days': 5},
'negative_one_short_expire': {
'timezone': '-01',
'days': 1},
'positive_one_short_expire': {
'timezone': '+01',
'days': 1}
})
@testcase.attr('positive')
def test_secret_create_defaults_valid_expiration(self, timezone, days):
"""Create secrets with a various valid expiration data."""
timestamp = utils.create_timestamp_w_tz_and_offset(timezone=timezone,
days=days)
test_model = secret_models.SecretModel(
**self.default_secret_create_data)
test_model.expiration = timestamp
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(resp.status_code, 201)
@utils.parameterized_dataset({
'malformed_timezone': {
'timezone': '-5:00',
'days': 0}
})
@testcase.attr('negative')
def test_secret_create_defaults_invalid_expiration(self, timezone, days):
"""Create secrets with various invalid expiration data."""
timestamp = utils.create_timestamp_w_tz_and_offset(timezone=timezone,
days=days)
test_model = secret_models.SecretModel(
**self.default_secret_create_data)
test_model.expiration = timestamp
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(resp.status_code, 400)
@testcase.attr('positive')
def test_secret_create_change_host_header(self, **kwargs):
"""Create a secret with a (possibly) malicious host name in header."""
test_model = secret_models.SecretModel(
**self.default_secret_create_data)
malicious_hostname = 'some.bad.server.com'
changed_host_header = {'Host': malicious_hostname}
resp, secret_ref = self.behaviors.create_secret(
test_model, headers=changed_host_header)
self.assertEqual(resp.status_code, 201)
# get Location field from result and assert that it is NOT the
# malicious one.
regex = '.*{0}.*'.format(malicious_hostname)
self.assertNotRegexpMatches(resp.headers['location'], regex)
@utils.parameterized_dataset({
'symmetric': ['symmetric',
base64.b64decode(
get_default_payload()),
get_default_data()],
'private': ['private',
keys.get_private_key_pem(),
get_private_key_req()],
'public': ['public',
keys.get_public_key_pem(),
get_public_key_req()],
'certificate': ['certificate',
keys.get_certificate_pem(),
get_certificate_req()],
'passphrase': ['passphrase',
'mysecretpassphrase',
get_passphrase_req()]
})
@testcase.attr('positive')
def test_secret_create_with_secret_type(self, secret_type, expected, spec):
"""Create secrets with various secret types."""
test_model = secret_models.SecretModel(**spec)
test_model.secret_type = secret_type
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(201, resp.status_code)
resp = self.behaviors.get_secret_metadata(secret_ref)
secret_type_response = resp.model.secret_type
self.assertIsNotNone(secret_type_response)
self.assertEqual(secret_type, secret_type_response)
content_type = spec['payload_content_type']
get_resp = self.behaviors.get_secret(secret_ref,
content_type)
self.assertEqual(expected, get_resp.content)
@utils.parameterized_dataset({
'invalid_http_content_type_characaters_latin': {
'http_content_type': u'\u00c4'.encode('utf-8')},
'invalid_http_content_type_characaters_arabic': {
'http_content_type': u'\u060f'.encode('utf-8')},
'invalid_http_content_type_characaters_cyrillic': {
'http_content_type': u'\u0416'.encode('utf-8')},
'invalid_http_content_type_characaters_replacement_character': {
'http_content_type': u'\ufffd'.encode('utf-8')},
})
@testcase.attr('negative')
def test_secret_create_with_invalid_http_content_type_characters(
self, http_content_type):
"""Attempt to create secrets with invalid unicode characters in the
HTTP request's Content-Type header. Should return a 415.
"""
test_model = secret_models.SecretModel(
**self.default_secret_create_data)
headers = {"Content-Type": http_content_type}
resp, secret_ref = self.behaviors.create_secret(test_model, headers)
self.assertEqual(resp.status_code, 415)
@utils.parameterized_dataset({
'invalid_http_content_type_characaters_latin': {
'payload_content_type': u'\u00c4'.encode('utf-8')},
'invalid_http_content_type_characaters_arabic': {
'payload_content_type': u'\u060f'.encode('utf-8')},
'invalid_http_content_type_characaters_cyrillic': {
'payload_content_type': u'\u0416'.encode('utf-8')},
'invalid_http_content_type_characaters_replacement_character': {
'payload_content_type': u'\ufffd'.encode('utf-8')},
})
@testcase.attr('negative')
def test_secret_create_with_invalid_payload_content_type_characters(
self, payload_content_type):
"""Attempt to create secrets with non-ascii characters in the
payload's content type attribute. Should return a 400.
"""
test_model = secret_models.SecretModel(
**self.default_secret_create_data)
test_model.payload_content_type = payload_content_type
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(resp.status_code, 400)
class SecretsPagingTestCase(base.PagingTestCase):
def setUp(self):
super(SecretsPagingTestCase, self).setUp()
self.behaviors = secret_behaviors.SecretBehaviors(self.client)
# make a local mutable copy of the default data to prevent
# possible data contamination
self.create_default_data = get_default_data()
def tearDown(self):
self.behaviors.delete_all_created_secrets()
super(SecretsPagingTestCase, self).tearDown()
def create_model(self):
return secret_models.SecretModel(**self.create_default_data)
def create_resources(self, count=0, model=None):
for x in range(0, count):
self.behaviors.create_secret(model)
def get_resources(self, limit=10, offset=0, filter=None):
return self.behaviors.get_secrets(limit=limit, offset=offset,
filter=filter)
def set_filter_field(self, unique_str, model):
'''Set the name field which we use in the get_resources '''
model.name = unique_str
class SecretsUnauthedTestCase(base.TestCase):
def setUp(self):
super(SecretsUnauthedTestCase, self).setUp()
self.behaviors = secret_behaviors.SecretBehaviors(self.client)
self.default_secret_create_data = get_default_data()
self.dummy_secret_ref = 'orders/dummy-7b86-4071-935d-ef6b83729200'
self.dummy_project_id = 'dummy'
resp, self.real_secret_ref = self.behaviors.create_secret(
secret_models.SecretModel(**self.default_secret_create_data)
)
stored_auth = self.client._auth[
self.client._default_user_name].stored_auth
project_id = stored_auth.values()[0]['project_id']
self.project_id_header = {
'X-Project-Id': project_id
}
self.dummy_project_id_header = {
'X-Project-Id': self.dummy_project_id
}
def tearDown(self):
self.behaviors.delete_all_created_secrets()
super(SecretsUnauthedTestCase, self).tearDown()
@testcase.attr('negative', 'security')
def test_secret_create_unauthed_no_proj_id(self):
"""Attempt to create a secret without a token or project id
Should return 401
"""
model = secret_models.SecretModel(self.default_secret_create_data)
resp, secret_ref = self.behaviors.create_secret(model, use_auth=False)
self.assertEqual(401, resp.status_code)
@testcase.attr('negative', 'security')
def test_secret_create_unauthed_fake_proj_id(self):
"""Attempt to create a secret with a project id but no token
Should return 401
"""
model = secret_models.SecretModel(self.default_secret_create_data)
resp, secret_ref = self.behaviors.create_secret(
model, headers=self.dummy_project_id_header, use_auth=False
)
self.assertEqual(401, resp.status_code)
@testcase.attr('negative', 'security')
def test_secret_create_unauthed_real_proj_id(self):
"""Attempt to create a secret with a project id but no token
Should return 401
"""
model = secret_models.SecretModel(self.default_secret_create_data)
resp, secret_ref = self.behaviors.create_secret(
model, headers=self.project_id_header, use_auth=False
)
self.assertEqual(401, resp.status_code)
@testcase.attr('negative', 'security')
def test_secret_get_unauthed_no_proj_id_fake_secret(self):
"""Attempt to read a non-existant secret without a token or project id
Should return 401
"""
resp = self.behaviors.get_secret(
self.dummy_secret_ref,
payload_content_type='application/octet-stream',
payload_content_encoding='base64', use_auth=False
)
self.assertEqual(401, resp.status_code)
@testcase.attr('negative', 'security')
def test_secret_get_unauthed_no_proj_id_real_secret(self):
"""Attempt to read an existing secret without a token or project id
Should return 401
"""
resp = self.behaviors.get_secret(
self.real_secret_ref,
payload_content_type='application/octet-stream',
payload_content_encoding='base64', use_auth=False
)
self.assertEqual(401, resp.status_code)
@testcase.attr('negative', 'security')
def test_secret_get_unauthed_fake_proj_id_fake_secret(self):
"""Attempt to get a non-existant secret with a project id but no token
Should return 401
"""
resp = self.behaviors.get_secret(
self.dummy_secret_ref,
payload_content_type='application/octet-stream',
payload_content_encoding='base64',
extra_headers=self.dummy_project_id_header,
use_auth=False
)
self.assertEqual(401, resp.status_code)
@testcase.attr('negative', 'security')
def test_secret_get_unauthed_fake_proj_id_real_secret(self):
"""Attempt to get an existing secret with a project id but no token
Should return 401
"""
resp = self.behaviors.get_secret(
self.real_secret_ref,
payload_content_type='application/octet-stream',
payload_content_encoding='base64',
extra_headers=self.dummy_project_id_header,
use_auth=False
)
self.assertEqual(401, resp.status_code)
@testcase.attr('negative', 'security')
def test_secret_get_unauthed_real_proj_id_fake_secret(self):
"""Attempt to get a non-existant secret with a project id but no token
Should return 401
"""
resp = self.behaviors.get_secret(
self.dummy_secret_ref,
payload_content_type='application/octet-stream',
payload_content_encoding='base64',
extra_headers=self.project_id_header,
use_auth=False
)
self.assertEqual(401, resp.status_code)
@testcase.attr('negative', 'security')
def test_secret_get_unauthed_real_proj_id_real_secret(self):
"""Attempt to get an existing secret with a project id but no token
Should return 401
"""
resp = self.behaviors.get_secret(
self.real_secret_ref,
payload_content_type='application/octet-stream',
payload_content_encoding='base64',
extra_headers=self.project_id_header,
use_auth=False
)
self.assertEqual(401, resp.status_code)
@testcase.attr('negative', 'security')
def test_secret_put_unauthed_no_proj_id_fake_secret(self):
"""Attempt to update a non-existant secret without a token or project id
Should return 401
"""
resp = self.behaviors.update_secret_payload(
self.dummy_secret_ref, payload=None,
payload_content_type='application/octet-stream',
payload_content_encoding='base64', use_auth=False
)
self.assertEqual(401, resp.status_code)
@testcase.attr('negative', 'security')
def test_secret_put_unauthed_no_proj_id_real_secret(self):
"""Attempt to update an existing secret without a token or project id
Should return 401
"""
resp = self.behaviors.update_secret_payload(
self.real_secret_ref, payload=None,
payload_content_type='application/octet-stream',
payload_content_encoding='base64', use_auth=False
)
self.assertEqual(401, resp.status_code)
@testcase.attr('negative', 'security')
def test_secret_put_unauthed_fake_proj_id_fake_secret(self):
"""Attempt to update a non-existant secret with a project id, but no token
Should return 401
"""
resp = self.behaviors.update_secret_payload(
self.dummy_secret_ref, payload=None,
payload_content_type='application/octet-stream',
payload_content_encoding='base64',
extra_headers=self.dummy_project_id_header,
use_auth=False
)
self.assertEqual(401, resp.status_code)
@testcase.attr('negative', 'security')
def test_secret_put_unauthed_fake_proj_id_real_secret(self):
"""Attempt to update an existing secret with a project id, but no token
Should return 401
"""
resp = self.behaviors.update_secret_payload(
self.real_secret_ref, payload=None,
payload_content_type='application/octet-stream',
payload_content_encoding='base64',
extra_headers=self.dummy_project_id_header,
use_auth=False
)
self.assertEqual(401, resp.status_code)
@testcase.attr('negative', 'security')
def test_secret_put_unauthed_real_proj_id_fake_secret(self):
"""Attempt to update a non-existant secret with a project id, but no token
Should return 401
"""
resp = self.behaviors.update_secret_payload(
self.dummy_secret_ref, payload=None,
payload_content_type='application/octet-stream',
payload_content_encoding='base64',
extra_headers=self.project_id_header,
use_auth=False
)
self.assertEqual(401, resp.status_code)
@testcase.attr('negative', 'security')
def test_secret_put_unauthed_real_proj_id_real_secret(self):
"""Attempt to update an existing secret with a project id, but no token
Should return 401
"""
resp = self.behaviors.update_secret_payload(
self.real_secret_ref, payload=None,
payload_content_type='application/octet-stream',
payload_content_encoding='base64',
extra_headers=self.project_id_header,
use_auth=False
)
self.assertEqual(401, resp.status_code)
@testcase.attr('negative', 'security')
def test_secret_delete_unauthed_no_proj_id_fake_secret(self):
"""Attempt to delete a non-existant secret without a token or project id
Should return 401
"""
resp = self.behaviors.delete_secret(
self.dummy_secret_ref, expected_fail=True, use_auth=False
)
self.assertEqual(401, resp.status_code)
@testcase.attr('negative', 'security')
def test_secret_delete_unauthed_no_proj_id_real_secret(self):
"""Attempt to delete an existing secret without a token or project id
Should return 401
"""
resp = self.behaviors.delete_secret(
self.real_secret_ref, expected_fail=True, use_auth=False
)
self.assertEqual(401, resp.status_code)
@testcase.attr('negative', 'security')
def test_secret_delete_unauthed_fake_proj_id_fake_secret(self):
"""Attempt to delete a non-existant secret with a project id, but no token
Should return 401
"""
resp = self.behaviors.delete_secret(
self.dummy_secret_ref,
extra_headers=self.dummy_project_id_header, expected_fail=True,
use_auth=False
)
self.assertEqual(401, resp.status_code)
@testcase.attr('negative', 'security')
def test_secret_delete_unauthed_fake_proj_id_real_secret(self):
"""Attempt to delete an existing secret with a project id, but no token
Should return 401
"""
resp = self.behaviors.delete_secret(
self.real_secret_ref,
extra_headers=self.dummy_project_id_header, expected_fail=True,
use_auth=False
)
self.assertEqual(401, resp.status_code)
@testcase.attr('negative', 'security')
def test_secret_delete_unauthed_real_proj_id_fake_secret(self):
"""Attempt to delete a non-existant secret with a project id, but no token
Should return 401
"""
resp = self.behaviors.delete_secret(
self.dummy_secret_ref,
extra_headers=self.project_id_header, expected_fail=True,
use_auth=False
)
self.assertEqual(401, resp.status_code)
@testcase.attr('negative', 'security')
def test_secret_delete_unauthed_real_proj_id_real_secret(self):
"""Attempt to delete an existing secret with a project id, but no token
Should return 401
"""
resp = self.behaviors.delete_secret(
self.real_secret_ref,
extra_headers=self.project_id_header, expected_fail=True,
use_auth=False
)
self.assertEqual(401, resp.status_code)
|
|
import comtypes
import comtypes.automation
from comtypes.automation import IEnumVARIANT
from comtypes.automation import DISPATCH_METHOD
from comtypes.automation import DISPATCH_PROPERTYGET
from comtypes.automation import DISPATCH_PROPERTYPUT
from comtypes.automation import DISPATCH_PROPERTYPUTREF
from comtypes.automation import DISPID_VALUE
from comtypes.automation import DISPID_NEWENUM
from comtypes.typeinfo import FUNC_PUREVIRTUAL, FUNC_DISPATCH
class FuncDesc(object):
"""Stores important FUNCDESC properties by copying them from a
real FUNCDESC instance.
"""
def __init__(self, **kw):
self.__dict__.update(kw)
# What is missing?
#
# Should NamedProperty support __call__()?
_all_slice = slice(None, None, None)
class NamedProperty(object):
def __init__(self, disp, get, put, putref):
self.get = get
self.put = put
self.putref = putref
self.disp = disp
def __getitem__(self, arg):
if self.get is None:
raise TypeError("unsubscriptable object")
if isinstance(arg, tuple):
return self.disp._comobj._invoke(self.get.memid,
self.get.invkind,
0,
*arg)
elif arg == _all_slice:
return self.disp._comobj._invoke(self.get.memid,
self.get.invkind,
0)
return self.disp._comobj._invoke(self.get.memid,
self.get.invkind,
0,
*[arg])
def __call__(self, *args):
if self.get is None:
raise TypeError("object is not callable")
return self.disp._comobj._invoke(self.get.memid,
self.get.invkind,
0,
*args)
def __setitem__(self, name, value):
# See discussion in Dispatch.__setattr__ below.
if self.put is None and self.putref is None:
raise TypeError("object does not support item assignment")
if comtypes._is_object(value):
descr = self.putref or self.put
else:
descr = self.put or self.putref
if isinstance(name, tuple):
self.disp._comobj._invoke(descr.memid,
descr.invkind,
0,
*(name + (value,)))
elif name == _all_slice:
self.disp._comobj._invoke(descr.memid,
descr.invkind,
0,
value)
else:
self.disp._comobj._invoke(descr.memid,
descr.invkind,
0,
name,
value)
def __iter__(self):
""" Explicitly disallow iteration. """
msg = "%r is not iterable" % self.disp
raise TypeError(msg)
# The following 'Dispatch' class, returned from
# CreateObject(progid, dynamic=True)
# differ in behaviour from objects created with
# CreateObject(progid, dynamic=False)
# (let us call the latter 'Custom' objects for this discussion):
#
#
# 1. Dispatch objects support __call__(), custom objects do not
#
# 2. Custom objects method support named arguments, Dispatch
# objects do not (could be added, would probably be expensive)
class Dispatch(object):
"""Dynamic dispatch for an object the exposes type information.
Binding at runtime is done via ITypeComp::Bind calls.
"""
def __init__(self, comobj, tinfo):
self.__dict__["_comobj"] = comobj
self.__dict__["_tinfo"] = tinfo
self.__dict__["_tcomp"] = tinfo.GetTypeComp()
self.__dict__["_tdesc"] = {}
## self.__dict__["_iid"] = tinfo.GetTypeAttr().guid
def __bind(self, name, invkind):
"""Bind (name, invkind) and return a FuncDesc instance or
None. Results (even unsuccessful ones) are cached."""
# We could cache the info in the class instead of the
# instance, but we would need an additional key for that:
# self._iid
try:
return self._tdesc[(name, invkind)]
except KeyError:
try:
descr = self._tcomp.Bind(name, invkind)[1]
except comtypes.COMError:
info = None
else:
# Using a separate instance to store interesting
# attributes of descr avoids that the typecomp instance is
# kept alive...
info = FuncDesc(memid=descr.memid,
invkind=descr.invkind,
cParams=descr.cParams,
funckind=descr.funckind)
self._tdesc[(name, invkind)] = info
return info
def QueryInterface(self, *args):
"QueryInterface is forwarded to the real com object."
return self._comobj.QueryInterface(*args)
def __cmp__(self, other):
if not isinstance(other, Dispatch):
return 1
return cmp(self._comobj, other._comobj)
def __eq__(self, other):
return isinstance(other, Dispatch) and \
self._comobj == other._comobj
def __hash__(self):
return hash(self._comobj)
def __getattr__(self, name):
"""Get a COM attribute."""
if name.startswith("__") and name.endswith("__"):
raise AttributeError(name)
# check for propget or method
descr = self.__bind(name, DISPATCH_METHOD | DISPATCH_PROPERTYGET)
if descr is None:
raise AttributeError(name)
if descr.invkind == DISPATCH_PROPERTYGET:
# DISPATCH_PROPERTYGET
if descr.funckind == FUNC_DISPATCH:
if descr.cParams == 0:
return self._comobj._invoke(descr.memid, descr.invkind, 0)
elif descr.funckind == FUNC_PUREVIRTUAL:
# FUNC_PUREVIRTUAL descriptions contain the property
# itself as a parameter.
if descr.cParams == 1:
return self._comobj._invoke(descr.memid, descr.invkind, 0)
else:
raise RuntimeError("funckind %d not yet implemented" % descr.funckind)
put = self.__bind(name, DISPATCH_PROPERTYPUT)
putref = self.__bind(name, DISPATCH_PROPERTYPUTREF)
return NamedProperty(self, descr, put, putref)
else:
# DISPATCH_METHOD
def caller(*args):
return self._comobj._invoke(descr.memid, descr.invkind, 0, *args)
try:
caller.__name__ = name
except TypeError:
# In Python 2.3, __name__ is readonly
pass
return caller
def __setattr__(self, name, value):
# Hm, this can be a propput, a propputref, or 'both' property.
# (Or nothing at all.)
#
# Whether propput or propputref is called will depend on what
# is available, and on the type of 'value' as determined by
# comtypes._is_object(value).
#
# I think that the following table MAY be correct; although I
# have no idea whether the cases marked (?) are really valid.
#
# invkind available | _is_object(value) | invkind we should use
# ---------------------------------------------------------------
# put | True | put (?)
# put | False | put
# putref | True | putref
# putref | False | putref (?)
# put, putref | True | putref
# put, putref | False | put
put = self.__bind(name, DISPATCH_PROPERTYPUT)
putref = self.__bind(name, DISPATCH_PROPERTYPUTREF)
if not put and not putref:
raise AttributeError(name)
if comtypes._is_object(value):
descr = putref or put
else:
descr = put or putref
if descr.cParams == 1:
self._comobj._invoke(descr.memid, descr.invkind, 0, value)
return
raise AttributeError(name)
def __call__(self, *args):
return self._comobj._invoke(DISPID_VALUE,
DISPATCH_METHOD | DISPATCH_PROPERTYGET,
0,
*args)
def __getitem__(self, arg):
if isinstance(arg, tuple):
args = arg
elif arg == _all_slice:
args = ()
else:
args = (arg,)
try:
return self._comobj._invoke(DISPID_VALUE,
DISPATCH_METHOD | DISPATCH_PROPERTYGET,
0,
*args)
except comtypes.COMError:
return iter(self)[arg]
def __setitem__(self, name, value):
if comtypes._is_object(value):
invkind = DISPATCH_PROPERTYPUTREF
else:
invkind = DISPATCH_PROPERTYPUT
if isinstance(name, tuple):
args = name + (value,)
elif name == _all_slice:
args = (value,)
else:
args = (name, value)
return self._comobj._invoke(DISPID_VALUE,
invkind,
0,
*args)
def __iter__(self):
punk = self._comobj._invoke(DISPID_NEWENUM,
DISPATCH_METHOD | DISPATCH_PROPERTYGET,
0)
enum = punk.QueryInterface(IEnumVARIANT)
enum._dynamic = True
return enum
|
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import numpy as np
import tensorflow as tf
from tensorflow.contrib import summary
N_CLASSES = 10
# ## Gradient Reversal Layer
#
# When applied to a tensor this layer is the identity map, but it reverses
# the sign of the gradient, and optionally multiplies the reversed gradient
# by a weight.
#
# For details, see [Domain-Adversarial Training of Neural Networks](https://arxiv.org/abs/1505.07818).
#
class GradientReversalLayer(tf.layers.Layer):
def __init__(self, weight=1.0):
super(GradientReversalLayer, self).__init__()
self.weight = weight
def call(self, input_):
@tf.custom_gradient
def _call(input_):
def reversed_gradient(output_grads):
return self.weight * tf.negative(output_grads)
return input_, reversed_gradient
return _call(input_)
# ## The model function
# The network consists of 3 sub-networks:
#
# * Feature extractor: extracts internal representation for both the source and target distributions.
#
# * Label predictor: predicts label from the extracted features.
#
# * Domain classifier: classifies the origin (`source` or `target`) of the extracted features.
#
#
# Both the label predictor and the domain classifier will try to minimize
# classification loss, but the gradients backpropagated from the domain
# classifier to the feature extractor have their signs reversed.
#
#
# This model function also shows how to use `host_call` to output summaries.
#
def model_fn(features, labels, mode, params):
source = features['source']
target = features['target']
onehot_labels = tf.one_hot(labels, N_CLASSES)
global_step = tf.train.get_global_step()
# In this sample we use dense layers for each of the sub-networks.
feature_extractor = tf.layers.Dense(7, activation=tf.nn.sigmoid)
label_predictor_logits = tf.layers.Dense(N_CLASSES)
# There are two domains, 0: source and 1: target
domain_classifier_logits = tf.layers.Dense(2)
source_features = feature_extractor(source)
target_features = feature_extractor(target)
# Apply the gradient reversal layer to target features
gr_weight = params['gr_weight']
gradient_reversal = GradientReversalLayer(gr_weight)
target_features = gradient_reversal(target_features)
# The predictions are the predicted labels from the `target` distribution.
predictions = tf.nn.softmax(label_predictor_logits(target_features))
loss = None
train_op = None
if mode == tf.estimator.ModeKeys.TRAIN:
# define loss
label_prediction_loss = tf.losses.softmax_cross_entropy(
onehot_labels=onehot_labels,
logits=label_predictor_logits(source_features)
)
# There are two domains, 0: source and 1: target
domain_labels = tf.concat((tf.zeros(source.shape[0], dtype=tf.int32), tf.ones(target.shape[0], dtype=tf.int32)), axis=0)
domain_onehot_labels = tf.one_hot(domain_labels, 2)
source_target_features = tf.concat([source_features, target_features], axis=0)
domain_classification_loss = tf.losses.softmax_cross_entropy(
onehot_labels=domain_onehot_labels,
logits=domain_classifier_logits(source_target_features)
)
lambda_ = params['lambda']
loss = label_prediction_loss + lambda_ * domain_classification_loss
# define train_op
optimizer = tf.train.RMSPropOptimizer(learning_rate=0.05)
# wrapper to make the optimizer work with TPUs
if params['use_tpu']:
optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)
train_op = optimizer.minimize(loss, global_step=global_step)
if params['use_tpu']:
# Use host_call to log the losses on the CPU
def host_call_fn(gs, lpl, dcl, ls):
gs = gs[0]
with summary.create_file_writer(params['model_dir'], max_queue=params['save_checkpoints_steps']).as_default():
with summary.always_record_summaries():
summary.scalar('label_prediction_loss', lpl[0], step=gs)
summary.scalar('domain_classification_loss', dcl[0], step=gs)
summary.scalar('loss', ls[0], step=gs)
return summary.all_summary_ops()
# host_call's arguments must be at least 1D
gs_t = tf.reshape(global_step, [1])
lpl_t = tf.reshape(label_prediction_loss, [1])
dcl_t = tf.reshape(domain_classification_loss, [1])
ls_t = tf.reshape(loss, [1])
host_call = (host_call_fn, [gs_t, lpl_t, dcl_t, ls_t])
# TPU version of EstimatorSpec
return tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
host_call=host_call)
else:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op)
# ## The input function
# There are two input data sets, `source` is labeled and `target` is unlabeled.
def train_input_fn(params={}):
# source distribution: labeled data
source = np.random.rand(100, 5)
labels = np.random.randint(0, N_CLASSES, 100)
# target distribution: unlabeled data
target = np.random.rand(100, 5)
source_tensor = tf.constant(source, dtype=tf.float32)
labels_tensor = tf.constant(labels, dtype=tf.int32)
target_tensor = tf.constant(target, dtype=tf.float32)
# shuffle source and target separately
source_labels_dataset = tf.data.Dataset.from_tensor_slices((source_tensor, labels_tensor)).repeat().shuffle(32)
target_dataset = tf.data.Dataset.from_tensor_slices(target_tensor).repeat().shuffle(32)
# zip them together to set shapes
dataset = tf.data.Dataset.zip((source_labels_dataset, target_dataset))
# TPUEstimator passes params when calling input_fn
batch_size = params.get('batch_size', 16)
dataset = dataset.batch(batch_size, drop_remainder=True)
# TPUs need to know all dimensions when the graph is built
# Datasets know the batch size only when the graph is run
def set_shapes_and_format(source_labels, target):
source, labels = source_labels
source_shape = source.get_shape().merge_with([batch_size, None])
labels_shape = labels.get_shape().merge_with([batch_size])
target_shape = target.get_shape().merge_with([batch_size, None])
source.set_shape(source_shape)
labels.set_shape(labels_shape)
target.set_shape(target_shape)
# Also format the dataset with a dict for features
features = {'source': source, 'target': target}
return features, labels
dataset = dataset.map(set_shapes_and_format)
dataset = dataset.prefetch(tf.contrib.data.AUTOTUNE)
return dataset
def main(args):
# pass the args as params so the model_fn can use
# the TPU specific args
params = vars(args)
if args.use_tpu:
# additional configs required for using TPUs
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(args.tpu)
tpu_config = tf.contrib.tpu.TPUConfig(
num_shards=8, # using Cloud TPU v2-8
iterations_per_loop=args.save_checkpoints_steps)
# use the TPU version of RunConfig
config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
model_dir=args.model_dir,
tpu_config=tpu_config,
save_checkpoints_steps=args.save_checkpoints_steps,
save_summary_steps=100)
# TPUEstimator
estimator = tf.contrib.tpu.TPUEstimator(
model_fn=model_fn,
config=config,
params=params,
train_batch_size=args.train_batch_size,
eval_batch_size=32,
export_to_tpu=False)
else:
config = tf.estimator.RunConfig(model_dir=args.model_dir)
estimator = tf.estimator.Estimator(
model_fn,
config=config,
params=params)
estimator.train(train_input_fn, max_steps=args.max_steps)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--model-dir',
type=str,
default='/tmp/tpu-template',
help='Location to write checkpoints and summaries to. Must be a GCS URI when using Cloud TPU.')
parser.add_argument(
'--max-steps',
type=int,
default=1000,
help='The total number of steps to train the model.')
parser.add_argument(
'--train-batch-size',
type=int,
default=16,
help='The training batch size. The training batch is divided evenly across the TPU cores.')
parser.add_argument(
'--save-checkpoints-steps',
type=int,
default=100,
help='The number of training steps before saving each checkpoint.')
parser.add_argument(
'--use-tpu',
action='store_true',
help='Whether to use TPU.')
parser.add_argument(
'--tpu',
default=None,
help='The name or GRPC URL of the TPU node. Leave it as `None` when training on AI Platform.')
parser.add_argument(
'--gr-weight',
default=1.0,
help='The weight used in the gradient reversal layer.')
parser.add_argument(
'--lambda',
default=1.0,
help='The trade-off between label_prediction_loss and domain_classification_loss.')
args, _ = parser.parse_known_args()
main(args)
|
|
"""The tests for the MQTT sensor platform."""
import copy
from datetime import datetime, timedelta
import json
from unittest.mock import patch
import pytest
from homeassistant.components.mqtt.sensor import MQTT_SENSOR_ATTRIBUTES_BLOCKED
import homeassistant.components.sensor as sensor
from homeassistant.const import EVENT_STATE_CHANGED, STATE_UNAVAILABLE, STATE_UNKNOWN
import homeassistant.core as ha
from homeassistant.helpers import device_registry as dr
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from .test_common import (
help_test_availability_when_connection_lost,
help_test_availability_without_topic,
help_test_custom_availability_payload,
help_test_default_availability_list_payload,
help_test_default_availability_list_payload_all,
help_test_default_availability_list_payload_any,
help_test_default_availability_list_single,
help_test_default_availability_payload,
help_test_discovery_broken,
help_test_discovery_removal,
help_test_discovery_update,
help_test_discovery_update_attr,
help_test_discovery_update_availability,
help_test_discovery_update_unchanged,
help_test_entity_category,
help_test_entity_debug_info,
help_test_entity_debug_info_max_messages,
help_test_entity_debug_info_message,
help_test_entity_debug_info_remove,
help_test_entity_debug_info_update_entity_id,
help_test_entity_device_info_remove,
help_test_entity_device_info_update,
help_test_entity_device_info_with_connection,
help_test_entity_device_info_with_identifier,
help_test_entity_disabled_by_default,
help_test_entity_id_update_discovery_update,
help_test_entity_id_update_subscriptions,
help_test_setting_attribute_via_mqtt_json_message,
help_test_setting_attribute_with_template,
help_test_setting_blocked_attribute_via_mqtt_json_message,
help_test_unique_id,
help_test_update_with_json_attrs_bad_JSON,
help_test_update_with_json_attrs_not_dict,
)
from tests.common import async_fire_mqtt_message, async_fire_time_changed
DEFAULT_CONFIG = {
sensor.DOMAIN: {"platform": "mqtt", "name": "test", "state_topic": "test-topic"}
}
async def test_setting_sensor_value_via_mqtt_message(hass, mqtt_mock):
"""Test the setting of the value via MQTT."""
assert await async_setup_component(
hass,
sensor.DOMAIN,
{
sensor.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "test-topic",
"unit_of_measurement": "fav unit",
}
},
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "test-topic", "100")
state = hass.states.get("sensor.test")
assert state.state == "100"
assert state.attributes.get("unit_of_measurement") == "fav unit"
@pytest.mark.parametrize(
"device_class,native_value,state_value,log",
[
(sensor.DEVICE_CLASS_DATE, "2021-11-18", "2021-11-18", False),
(sensor.DEVICE_CLASS_DATE, "invalid", STATE_UNKNOWN, True),
(
sensor.DEVICE_CLASS_TIMESTAMP,
"2021-11-18T20:25:00+00:00",
"2021-11-18T20:25:00+00:00",
False,
),
(
sensor.DEVICE_CLASS_TIMESTAMP,
"2021-11-18 20:25:00+00:00",
"2021-11-18T20:25:00+00:00",
False,
),
(
sensor.DEVICE_CLASS_TIMESTAMP,
"2021-11-18 20:25:00+01:00",
"2021-11-18T19:25:00+00:00",
False,
),
(sensor.DEVICE_CLASS_TIMESTAMP, "invalid", STATE_UNKNOWN, True),
],
)
async def test_setting_sensor_native_value_handling_via_mqtt_message(
hass, mqtt_mock, caplog, device_class, native_value, state_value, log
):
"""Test the setting of the value via MQTT."""
assert await async_setup_component(
hass,
sensor.DOMAIN,
{
sensor.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "test-topic",
"device_class": device_class,
}
},
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "test-topic", native_value)
state = hass.states.get("sensor.test")
assert state.state == state_value
assert state.attributes.get("device_class") == device_class
assert log == ("Invalid state message" in caplog.text)
async def test_setting_sensor_value_expires_availability_topic(hass, mqtt_mock, caplog):
"""Test the expiration of the value."""
assert await async_setup_component(
hass,
sensor.DOMAIN,
{
sensor.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "test-topic",
"expire_after": 4,
"force_update": True,
"availability_topic": "availability-topic",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("sensor.test")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic", "online")
# State should be unavailable since expire_after is defined and > 0
state = hass.states.get("sensor.test")
assert state.state == STATE_UNAVAILABLE
await expires_helper(hass, mqtt_mock, caplog)
async def test_setting_sensor_value_expires(hass, mqtt_mock, caplog):
"""Test the expiration of the value."""
assert await async_setup_component(
hass,
sensor.DOMAIN,
{
sensor.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "test-topic",
"unit_of_measurement": "fav unit",
"expire_after": "4",
"force_update": True,
}
},
)
await hass.async_block_till_done()
# State should be unavailable since expire_after is defined and > 0
state = hass.states.get("sensor.test")
assert state.state == STATE_UNAVAILABLE
await expires_helper(hass, mqtt_mock, caplog)
async def expires_helper(hass, mqtt_mock, caplog):
"""Run the basic expiry code."""
realnow = dt_util.utcnow()
now = datetime(realnow.year + 1, 1, 1, 1, tzinfo=dt_util.UTC)
with patch(("homeassistant.helpers.event.dt_util.utcnow"), return_value=now):
async_fire_time_changed(hass, now)
async_fire_mqtt_message(hass, "test-topic", "100")
await hass.async_block_till_done()
# Value was set correctly.
state = hass.states.get("sensor.test")
assert state.state == "100"
# Time jump +3s
now = now + timedelta(seconds=3)
async_fire_time_changed(hass, now)
await hass.async_block_till_done()
# Value is not yet expired
state = hass.states.get("sensor.test")
assert state.state == "100"
# Next message resets timer
with patch(("homeassistant.helpers.event.dt_util.utcnow"), return_value=now):
async_fire_time_changed(hass, now)
async_fire_mqtt_message(hass, "test-topic", "101")
await hass.async_block_till_done()
# Value was updated correctly.
state = hass.states.get("sensor.test")
assert state.state == "101"
# Time jump +3s
now = now + timedelta(seconds=3)
async_fire_time_changed(hass, now)
await hass.async_block_till_done()
# Value is not yet expired
state = hass.states.get("sensor.test")
assert state.state == "101"
# Time jump +2s
now = now + timedelta(seconds=2)
async_fire_time_changed(hass, now)
await hass.async_block_till_done()
# Value is expired now
state = hass.states.get("sensor.test")
assert state.state == STATE_UNAVAILABLE
async def test_setting_sensor_value_via_mqtt_json_message(hass, mqtt_mock):
"""Test the setting of the value via MQTT with JSON payload."""
assert await async_setup_component(
hass,
sensor.DOMAIN,
{
sensor.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "test-topic",
"unit_of_measurement": "fav unit",
"value_template": "{{ value_json.val }}",
}
},
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "test-topic", '{ "val": "100" }')
state = hass.states.get("sensor.test")
assert state.state == "100"
async def test_setting_sensor_last_reset_via_mqtt_message(hass, mqtt_mock, caplog):
"""Test the setting of the last_reset property via MQTT."""
assert await async_setup_component(
hass,
sensor.DOMAIN,
{
sensor.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "test-topic",
"unit_of_measurement": "fav unit",
"last_reset_topic": "last-reset-topic",
}
},
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "last-reset-topic", "2020-01-02 08:11:00")
state = hass.states.get("sensor.test")
assert state.attributes.get("last_reset") == "2020-01-02T08:11:00"
assert "'last_reset_topic' must be same as 'state_topic'" in caplog.text
assert (
"'last_reset_value_template' must be set if 'last_reset_topic' is set"
in caplog.text
)
@pytest.mark.parametrize("datestring", ["2020-21-02 08:11:00", "Hello there!"])
async def test_setting_sensor_bad_last_reset_via_mqtt_message(
hass, caplog, datestring, mqtt_mock
):
"""Test the setting of the last_reset property via MQTT."""
assert await async_setup_component(
hass,
sensor.DOMAIN,
{
sensor.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "test-topic",
"unit_of_measurement": "fav unit",
"last_reset_topic": "last-reset-topic",
}
},
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "last-reset-topic", datestring)
state = hass.states.get("sensor.test")
assert state.attributes.get("last_reset") is None
assert "Invalid last_reset message" in caplog.text
async def test_setting_sensor_empty_last_reset_via_mqtt_message(
hass, caplog, mqtt_mock
):
"""Test the setting of the last_reset property via MQTT."""
assert await async_setup_component(
hass,
sensor.DOMAIN,
{
sensor.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "test-topic",
"unit_of_measurement": "fav unit",
"last_reset_topic": "last-reset-topic",
}
},
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "last-reset-topic", "")
state = hass.states.get("sensor.test")
assert state.attributes.get("last_reset") is None
assert "Ignoring empty last_reset message" in caplog.text
async def test_setting_sensor_last_reset_via_mqtt_json_message(hass, mqtt_mock):
"""Test the setting of the value via MQTT with JSON payload."""
assert await async_setup_component(
hass,
sensor.DOMAIN,
{
sensor.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "test-topic",
"unit_of_measurement": "fav unit",
"last_reset_topic": "last-reset-topic",
"last_reset_value_template": "{{ value_json.last_reset }}",
}
},
)
await hass.async_block_till_done()
async_fire_mqtt_message(
hass, "last-reset-topic", '{ "last_reset": "2020-01-02 08:11:00" }'
)
state = hass.states.get("sensor.test")
assert state.attributes.get("last_reset") == "2020-01-02T08:11:00"
@pytest.mark.parametrize("extra", [{}, {"last_reset_topic": "test-topic"}])
async def test_setting_sensor_last_reset_via_mqtt_json_message_2(
hass, mqtt_mock, caplog, extra
):
"""Test the setting of the value via MQTT with JSON payload."""
assert await async_setup_component(
hass,
sensor.DOMAIN,
{
sensor.DOMAIN: {
**{
"platform": "mqtt",
"name": "test",
"state_topic": "test-topic",
"unit_of_measurement": "kWh",
"value_template": "{{ value_json.value | float / 60000 }}",
"last_reset_value_template": "{{ utcnow().fromtimestamp(value_json.time / 1000, tz=utcnow().tzinfo) }}",
},
**extra,
}
},
)
await hass.async_block_till_done()
async_fire_mqtt_message(
hass,
"test-topic",
'{"type":"minute","time":1629385500000,"value":947.7706166666667}',
)
state = hass.states.get("sensor.test")
assert float(state.state) == pytest.approx(0.015796176944444445)
assert state.attributes.get("last_reset") == "2021-08-19T15:05:00+00:00"
assert "'last_reset_topic' must be same as 'state_topic'" not in caplog.text
assert (
"'last_reset_value_template' must be set if 'last_reset_topic' is set"
not in caplog.text
)
async def test_force_update_disabled(hass, mqtt_mock):
"""Test force update option."""
assert await async_setup_component(
hass,
sensor.DOMAIN,
{
sensor.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "test-topic",
"unit_of_measurement": "fav unit",
}
},
)
await hass.async_block_till_done()
events = []
@ha.callback
def callback(event):
events.append(event)
hass.bus.async_listen(EVENT_STATE_CHANGED, callback)
async_fire_mqtt_message(hass, "test-topic", "100")
await hass.async_block_till_done()
assert len(events) == 1
async_fire_mqtt_message(hass, "test-topic", "100")
await hass.async_block_till_done()
assert len(events) == 1
async def test_force_update_enabled(hass, mqtt_mock):
"""Test force update option."""
assert await async_setup_component(
hass,
sensor.DOMAIN,
{
sensor.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "test-topic",
"unit_of_measurement": "fav unit",
"force_update": True,
}
},
)
await hass.async_block_till_done()
events = []
@ha.callback
def callback(event):
events.append(event)
hass.bus.async_listen(EVENT_STATE_CHANGED, callback)
async_fire_mqtt_message(hass, "test-topic", "100")
await hass.async_block_till_done()
assert len(events) == 1
async_fire_mqtt_message(hass, "test-topic", "100")
await hass.async_block_till_done()
assert len(events) == 2
async def test_availability_when_connection_lost(hass, mqtt_mock):
"""Test availability after MQTT disconnection."""
await help_test_availability_when_connection_lost(
hass, mqtt_mock, sensor.DOMAIN, DEFAULT_CONFIG
)
async def test_availability_without_topic(hass, mqtt_mock):
"""Test availability without defined availability topic."""
await help_test_availability_without_topic(
hass, mqtt_mock, sensor.DOMAIN, DEFAULT_CONFIG
)
async def test_default_availability_payload(hass, mqtt_mock):
"""Test availability by default payload with defined topic."""
await help_test_default_availability_payload(
hass, mqtt_mock, sensor.DOMAIN, DEFAULT_CONFIG
)
async def test_default_availability_list_payload(hass, mqtt_mock):
"""Test availability by default payload with defined topic."""
await help_test_default_availability_list_payload(
hass, mqtt_mock, sensor.DOMAIN, DEFAULT_CONFIG
)
async def test_default_availability_list_payload_all(hass, mqtt_mock):
"""Test availability by default payload with defined topic."""
await help_test_default_availability_list_payload_all(
hass, mqtt_mock, sensor.DOMAIN, DEFAULT_CONFIG
)
async def test_default_availability_list_payload_any(hass, mqtt_mock):
"""Test availability by default payload with defined topic."""
await help_test_default_availability_list_payload_any(
hass, mqtt_mock, sensor.DOMAIN, DEFAULT_CONFIG
)
async def test_default_availability_list_single(hass, mqtt_mock, caplog):
"""Test availability list and availability_topic are mutually exclusive."""
await help_test_default_availability_list_single(
hass, mqtt_mock, caplog, sensor.DOMAIN, DEFAULT_CONFIG
)
async def test_custom_availability_payload(hass, mqtt_mock):
"""Test availability by custom payload with defined topic."""
await help_test_custom_availability_payload(
hass, mqtt_mock, sensor.DOMAIN, DEFAULT_CONFIG
)
async def test_discovery_update_availability(hass, mqtt_mock):
"""Test availability discovery update."""
await help_test_discovery_update_availability(
hass, mqtt_mock, sensor.DOMAIN, DEFAULT_CONFIG
)
async def test_invalid_device_class(hass, mqtt_mock):
"""Test device_class option with invalid value."""
assert await async_setup_component(
hass,
sensor.DOMAIN,
{
sensor.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "test-topic",
"device_class": "foobarnotreal",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("sensor.test")
assert state is None
async def test_valid_device_class(hass, mqtt_mock):
"""Test device_class option with valid values."""
assert await async_setup_component(
hass,
"sensor",
{
"sensor": [
{
"platform": "mqtt",
"name": "Test 1",
"state_topic": "test-topic",
"device_class": "temperature",
},
{"platform": "mqtt", "name": "Test 2", "state_topic": "test-topic"},
]
},
)
await hass.async_block_till_done()
state = hass.states.get("sensor.test_1")
assert state.attributes["device_class"] == "temperature"
state = hass.states.get("sensor.test_2")
assert "device_class" not in state.attributes
async def test_invalid_state_class(hass, mqtt_mock):
"""Test state_class option with invalid value."""
assert await async_setup_component(
hass,
sensor.DOMAIN,
{
sensor.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "test-topic",
"state_class": "foobarnotreal",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("sensor.test")
assert state is None
async def test_valid_state_class(hass, mqtt_mock):
"""Test state_class option with valid values."""
assert await async_setup_component(
hass,
"sensor",
{
"sensor": [
{
"platform": "mqtt",
"name": "Test 1",
"state_topic": "test-topic",
"state_class": "measurement",
},
{"platform": "mqtt", "name": "Test 2", "state_topic": "test-topic"},
]
},
)
await hass.async_block_till_done()
state = hass.states.get("sensor.test_1")
assert state.attributes["state_class"] == "measurement"
state = hass.states.get("sensor.test_2")
assert "state_class" not in state.attributes
async def test_setting_attribute_via_mqtt_json_message(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_attribute_via_mqtt_json_message(
hass, mqtt_mock, sensor.DOMAIN, DEFAULT_CONFIG
)
async def test_setting_blocked_attribute_via_mqtt_json_message(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_blocked_attribute_via_mqtt_json_message(
hass, mqtt_mock, sensor.DOMAIN, DEFAULT_CONFIG, MQTT_SENSOR_ATTRIBUTES_BLOCKED
)
async def test_setting_attribute_with_template(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_attribute_with_template(
hass, mqtt_mock, sensor.DOMAIN, DEFAULT_CONFIG
)
async def test_update_with_json_attrs_not_dict(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
await help_test_update_with_json_attrs_not_dict(
hass, mqtt_mock, caplog, sensor.DOMAIN, DEFAULT_CONFIG
)
async def test_update_with_json_attrs_bad_JSON(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
await help_test_update_with_json_attrs_bad_JSON(
hass, mqtt_mock, caplog, sensor.DOMAIN, DEFAULT_CONFIG
)
async def test_discovery_update_attr(hass, mqtt_mock, caplog):
"""Test update of discovered MQTTAttributes."""
await help_test_discovery_update_attr(
hass, mqtt_mock, caplog, sensor.DOMAIN, DEFAULT_CONFIG
)
async def test_unique_id(hass, mqtt_mock):
"""Test unique id option only creates one sensor per unique_id."""
config = {
sensor.DOMAIN: [
{
"platform": "mqtt",
"name": "Test 1",
"state_topic": "test-topic",
"unique_id": "TOTALLY_UNIQUE",
},
{
"platform": "mqtt",
"name": "Test 2",
"state_topic": "test-topic",
"unique_id": "TOTALLY_UNIQUE",
},
]
}
await help_test_unique_id(hass, mqtt_mock, sensor.DOMAIN, config)
async def test_discovery_removal_sensor(hass, mqtt_mock, caplog):
"""Test removal of discovered sensor."""
data = '{ "name": "test", "state_topic": "test_topic" }'
await help_test_discovery_removal(hass, mqtt_mock, caplog, sensor.DOMAIN, data)
async def test_discovery_update_sensor_topic_template(hass, mqtt_mock, caplog):
"""Test update of discovered sensor."""
config = {"name": "test", "state_topic": "test_topic"}
config1 = copy.deepcopy(config)
config2 = copy.deepcopy(config)
config1["name"] = "Beer"
config2["name"] = "Milk"
config1["state_topic"] = "sensor/state1"
config2["state_topic"] = "sensor/state2"
config1["value_template"] = "{{ value_json.state | int }}"
config2["value_template"] = "{{ value_json.state | int * 2 }}"
state_data1 = [
([("sensor/state1", '{"state":100}')], "100", None),
]
state_data2 = [
([("sensor/state1", '{"state":1000}')], "100", None),
([("sensor/state1", '{"state":1000}')], "100", None),
([("sensor/state2", '{"state":100}')], "200", None),
]
await help_test_discovery_update(
hass,
mqtt_mock,
caplog,
sensor.DOMAIN,
config1,
config2,
state_data1=state_data1,
state_data2=state_data2,
)
async def test_discovery_update_sensor_template(hass, mqtt_mock, caplog):
"""Test update of discovered sensor."""
config = {"name": "test", "state_topic": "test_topic"}
config1 = copy.deepcopy(config)
config2 = copy.deepcopy(config)
config1["name"] = "Beer"
config2["name"] = "Milk"
config1["state_topic"] = "sensor/state1"
config2["state_topic"] = "sensor/state1"
config1["value_template"] = "{{ value_json.state | int }}"
config2["value_template"] = "{{ value_json.state | int * 2 }}"
state_data1 = [
([("sensor/state1", '{"state":100}')], "100", None),
]
state_data2 = [
([("sensor/state1", '{"state":100}')], "200", None),
]
await help_test_discovery_update(
hass,
mqtt_mock,
caplog,
sensor.DOMAIN,
config1,
config2,
state_data1=state_data1,
state_data2=state_data2,
)
async def test_discovery_update_unchanged_sensor(hass, mqtt_mock, caplog):
"""Test update of discovered sensor."""
data1 = '{ "name": "Beer", "state_topic": "test_topic" }'
with patch(
"homeassistant.components.mqtt.sensor.MqttSensor.discovery_update"
) as discovery_update:
await help_test_discovery_update_unchanged(
hass, mqtt_mock, caplog, sensor.DOMAIN, data1, discovery_update
)
@pytest.mark.no_fail_on_log_exception
async def test_discovery_broken(hass, mqtt_mock, caplog):
"""Test handling of bad discovery message."""
data1 = '{ "name": "Beer", "state_topic": "test_topic#" }'
data2 = '{ "name": "Milk", "state_topic": "test_topic" }'
await help_test_discovery_broken(
hass, mqtt_mock, caplog, sensor.DOMAIN, data1, data2
)
async def test_entity_device_info_with_connection(hass, mqtt_mock):
"""Test MQTT sensor device registry integration."""
await help_test_entity_device_info_with_connection(
hass, mqtt_mock, sensor.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_with_identifier(hass, mqtt_mock):
"""Test MQTT sensor device registry integration."""
await help_test_entity_device_info_with_identifier(
hass, mqtt_mock, sensor.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_update(hass, mqtt_mock):
"""Test device registry update."""
await help_test_entity_device_info_update(
hass, mqtt_mock, sensor.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_remove(hass, mqtt_mock):
"""Test device registry remove."""
await help_test_entity_device_info_remove(
hass, mqtt_mock, sensor.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_id_update_subscriptions(hass, mqtt_mock):
"""Test MQTT subscriptions are managed when entity_id is updated."""
await help_test_entity_id_update_subscriptions(
hass, mqtt_mock, sensor.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_id_update_discovery_update(hass, mqtt_mock):
"""Test MQTT discovery update when entity_id is updated."""
await help_test_entity_id_update_discovery_update(
hass, mqtt_mock, sensor.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_with_hub(hass, mqtt_mock):
"""Test MQTT sensor device registry integration."""
registry = dr.async_get(hass)
hub = registry.async_get_or_create(
config_entry_id="123",
connections=set(),
identifiers={("mqtt", "hub-id")},
manufacturer="manufacturer",
model="hub",
)
data = json.dumps(
{
"platform": "mqtt",
"name": "Test 1",
"state_topic": "test-topic",
"device": {"identifiers": ["helloworld"], "via_device": "hub-id"},
"unique_id": "veryunique",
}
)
async_fire_mqtt_message(hass, "homeassistant/sensor/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")})
assert device is not None
assert device.via_device_id == hub.id
async def test_entity_debug_info(hass, mqtt_mock):
"""Test MQTT sensor debug info."""
await help_test_entity_debug_info(hass, mqtt_mock, sensor.DOMAIN, DEFAULT_CONFIG)
async def test_entity_debug_info_max_messages(hass, mqtt_mock):
"""Test MQTT sensor debug info."""
await help_test_entity_debug_info_max_messages(
hass, mqtt_mock, sensor.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_debug_info_message(hass, mqtt_mock):
"""Test MQTT debug info."""
await help_test_entity_debug_info_message(
hass, mqtt_mock, sensor.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_debug_info_remove(hass, mqtt_mock):
"""Test MQTT sensor debug info."""
await help_test_entity_debug_info_remove(
hass, mqtt_mock, sensor.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_debug_info_update_entity_id(hass, mqtt_mock):
"""Test MQTT sensor debug info."""
await help_test_entity_debug_info_update_entity_id(
hass, mqtt_mock, sensor.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_disabled_by_default(hass, mqtt_mock):
"""Test entity disabled by default."""
await help_test_entity_disabled_by_default(
hass, mqtt_mock, sensor.DOMAIN, DEFAULT_CONFIG
)
@pytest.mark.no_fail_on_log_exception
async def test_entity_category(hass, mqtt_mock):
"""Test entity category."""
await help_test_entity_category(hass, mqtt_mock, sensor.DOMAIN, DEFAULT_CONFIG)
async def test_value_template_with_entity_id(hass, mqtt_mock):
"""Test the access to attributes in value_template via the entity_id."""
assert await async_setup_component(
hass,
sensor.DOMAIN,
{
sensor.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "test-topic",
"unit_of_measurement": "fav unit",
"value_template": '\
{% if state_attr(entity_id, "friendly_name") == "test" %} \
{{ value | int + 1 }} \
{% else %} \
{{ value }} \
{% endif %}',
}
},
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "test-topic", "100")
state = hass.states.get("sensor.test")
assert state.state == "101"
|
|
"""Turns a surface description into a panair network"""
import numpy as np
import sys
from math import sin, cos, sqrt
def axisymmetric_surf(data_x, data_r, N_theta):
theta_start = np.pi # radians
theta_end = np.pi/2. # radians
data_t = np.linspace(theta_start, theta_end, N_theta)
surf_coords = np.zeros([len(data_t), len(data_x), 3])
for i, t in enumerate(data_t):
for j, x in enumerate(data_x):
surf_coords[i, j, 0] = x
surf_coords[i, j, 1] = data_r[j]*sin(t)
surf_coords[i, j, 2] = data_r[j]*cos(t)
num_points = len(data_x)
max_axial = 200
num_network = int(num_points/max_axial)
if not (num_points % max_axial) == 0:
num_network += 1
nn = int(num_points/num_network)
network_list = []
if num_network > 1:
for i in range(num_network):
if i == num_network-1:
network_list.append(surf_coords[:, i*nn:])
else:
network_list.append(surf_coords[:, i*nn:(i+1)*nn+1])
else:
network_list.append(surf_coords)
return network_list
def generate_wake(te_points, x_end, n_points=10, angle_of_attack=0.,
user_spacing=None):
# check that x_end is downstream of all trailing edge points
if not np.all(te_points[:, 0] < x_end):
raise RuntimeError("wake must terminate downstream of trailing edge")
if user_spacing is None:
spacing = np.linspace
else:
spacing = user_spacing
Ny = te_points.shape[0]
wake = np.zeros((n_points, Ny, 3))
aoa_r = angle_of_attack*np.pi/180.
for j, p in enumerate(te_points):
x_te, y_te, z_te = p
length = (x_end-x_te)/np.cos(aoa_r)
X_0 = spacing(0., length, n_points)
X_r = X_0*np.cos(aoa_r)
Z_r = X_0*np.sin(aoa_r)
wake[:, j, 0] = x_te+X_r
wake[:, j, 1] = y_te
wake[:, j, 2] = z_te+Z_r
return wake
def constant_eta_edge(eta, n_points): # , cos_spacing=True):
edge = np.full((n_points, 2), eta)
cos_space = cosine_spacing()
edge[:, 0] = cos_space(0., 1., n_points)
return edge
def constant_psi_edge(psi, n_points, eta_lim): # , cos_spacing=True):
edge = np.full((n_points, 2), psi)
eta0, eta1 = eta_lim
cos_space = cosine_spacing()
edge[:, 1] = cos_space(eta0, eta1, n_points)
return edge
def gen_network_edge(surface, intersection, N_b=None, N_t=None,
vertical=False):
"""takes in intersection(s) and returns properly ordered and
complete network edge
Parameters
----------
surface : CST3D
CST surface corresponding to network
intersection : tuple of 3 1D numpy arrays
x, y, and z coordinates of points that define intersection
N_b : int
Number of points to use in back of intersection if intersection
doesn't fully define edge.
N_f : int
Number of points to use in front of intersection if intersection
doesn't fully define edge.
vertical : bool
If vertical is false, then the edge runs parallel(ish) to the
psi axis. In other words, eta as a function of psi. If vertical
is true, than the edge corresponds to the opposite.
Returns
-------
edge : 2D numpy array
Points that define network edge
"""
# Bring intersections into parameter space
x_i, y_i, z_i = intersection
psi_i, eta_i = surface.inverse(x_i, y_i, z_i)
if vertical:
eta_i, psi_i = _process_edge(eta_i, psi_i, N_b, N_t)
else:
psi_i, eta_i = _process_edge(psi_i, eta_i, N_b, N_t)
edge = np.array([psi_i, eta_i]).T
return edge
def _process_edge(p1, p2, N_b, N_t):
# sort by p1
i_sort = np.argsort(p1)
p1 = p1[i_sort]
p2 = p2[i_sort]
# verify completeness. If close, make complete.
# Completeness means edge completely cuts through domain.
if abs(p1[0]-0.) < 1e-10:
p1[0] = 0.
elif N_b:
# p1_b = cosine_spacing(0., p1[0], N_b)
p1_b = np.linspace(0., p1[0], N_b)
p2_b = np.full(N_b, p2[0])
p1 = np.concatenate((p1_b[:-1], p1))
p2 = np.concatenate((p2_b[:-1], p2))
else:
raise RuntimeError("edge not complete")
if abs(p1[-1]-1.) < 1e-10:
p1[-1] = 1.
elif N_t:
# p1_t = cosine_spacing(p1[-1], 1., N_t)
p1_t = np.linspace(p1[-1], 1., N_t)
p2_t = np.full(N_t, p2[-1])
p1 = np.concatenate((p1, p1_t[1:]))
p2 = np.concatenate((p2, p2_t[1:]))
else:
raise RuntimeError("edge not complete")
return p1, p2
class _uniform_spacing:
def __init__(self, limits, index):
self._limits = limits
self._i = index
# print(self._limits)
# print(self._i)
def __call__(self, *dummy):
lower, upper = self._limits
if lower is not None:
return lower[:, self._i]
elif upper is not None:
return upper[:, self._i]
else:
raise RuntimeError("must specify edge to use uniform spacing")
def meshparameterspace(shape=(20, 20), psi_limits=(None, None),
eta_limits=(None, None),
psi_spacing="linear",
eta_spacing="linear",
user_spacing=(None, None)):
"""Builds curvilinear mesh inside parameter space.
:param psi_spacing and eta_spacing:
- 'linear': uniform spacing on interior of the surface
- 'cosine': cosine spacing
- 'uniform': spacing matches the spacing along edge
- 'user': user spacing that is passed in through user_spacing
:param psi_limits and eta_limits: only define if 'uniform'. Should be
points where intersection is located.
"""
if psi_spacing == "cosine":
x_spacing = cosine_spacing()
elif psi_spacing == "linear":
x_spacing = np.linspace
elif psi_spacing == "uniform":
x_spacing = _uniform_spacing(eta_limits, 0)
elif psi_spacing == "user":
if user_spacing[0] is not None:
x_spacing = user_spacing[0]
else:
raise RuntimeError("must provide user_spacing w/ psi_spacing=user")
else:
raise RuntimeError("specified spacing not recognized")
if eta_spacing == "cosine":
y_spacing = cosine_spacing()
elif eta_spacing == "linear":
y_spacing = np.linspace
elif eta_spacing == "uniform":
y_spacing = _uniform_spacing(psi_limits, 1)
elif eta_spacing == "user":
if user_spacing[1] is not None:
y_spacing = user_spacing[1]
else:
raise RuntimeError("must provide user_spacing w/ psi_spacing=user")
else:
raise RuntimeError("specified spacing not recognized")
n_psi, n_eta = shape
psi_lower, psi_upper = psi_limits
eta_lower, eta_upper = eta_limits
# if limits aren't specified, set lower to 0 and upper to 1
if psi_lower is None:
psi_lower = np.full((n_eta, 2), 0.)
eta_min = eta_lower[0, 1] if eta_lower is not None else 0.
eta_max = eta_upper[0, 1] if eta_upper is not None else 1.
psi_lower[:, 1] = y_spacing(eta_min, eta_max, n_eta)
if psi_upper is None:
psi_upper = np.full((n_eta, 2), 1.)
eta_min = eta_lower[-1, 1] if eta_lower is not None else 0.
eta_max = eta_upper[-1, 1] if eta_upper is not None else 1.
psi_upper[:, 1] = y_spacing(eta_min, eta_max, n_eta)
if eta_lower is None:
eta_lower = np.full((n_psi, 2), 0.)
psi_min = psi_lower[0, 0] if psi_lower is not None else 0.
psi_max = psi_upper[0, 0] if psi_upper is not None else 1.
eta_lower[:, 0] = x_spacing(psi_min, psi_max, n_psi)
if eta_upper is None:
eta_upper = np.full((n_psi, 2), 1.)
psi_min = psi_lower[-1, 0] if psi_lower is not None else 0.
psi_max = psi_upper[-1, 0] if psi_upper is not None else 1.
eta_upper[:, 0] = x_spacing(psi_min, psi_max, n_psi)
grid = mesh_curvilinear(psi_lower, psi_upper, eta_lower, eta_upper,
x_spacing, y_spacing)
# TODO: the following probably belongs outside the scope of this class
# if flip:
# grid = np.flipud(grid)
return grid[:, :, 0], grid[:, :, 1]
def mesh_curvilinear(x_lower, x_upper, y_lower, y_upper, x_spacing, y_spacing):
# verify that corner points match
xlyl = np.allclose(x_lower[0], y_lower[0], atol=1e-13, rtol=0.)
xlyu = np.allclose(x_lower[-1], y_upper[0], atol=1e-13, rtol=0.)
xuyl = np.allclose(x_upper[0], y_lower[-1], atol=1e-13, rtol=0.)
xuyu = np.allclose(x_upper[-1], y_upper[-1], atol=1e-13, rtol=0.)
if not (xlyl and xlyu and xuyl and xuyu):
print(xlyl, xlyu, xuyl, xuyu)
print(x_lower[0]-y_lower[0])
print(x_lower[-1]-y_upper[0])
print(x_upper[0]-y_lower[-1])
print(x_upper[-1]-y_upper[-1])
raise RuntimeError("corner points do not match")
n_x = y_lower.shape[0]
n_y = x_lower.shape[0]
grid = np.zeros((n_x, n_y, 2))
# boundary points are set to match limits exactly
grid[0, :] = x_lower
grid[-1, :] = x_upper
grid[:, 0] = y_lower
grid[:, -1] = y_upper
# inner points are evenly spaced between corresponding limits in x and y
for i in range(1, n_x-1):
grid[i, 1:-1, 1] = y_spacing(y_lower[i, 1], y_upper[i, 1], n_y)[1:-1]
for j in range(1, n_y-1):
grid[1:-1, j, 0] = x_spacing(x_lower[j, 0], x_upper[j, 0], n_x)[1:-1]
return grid
class cosine_spacing:
"""Parametric function for obtaining a cosine distribution of points"""
def __init__(self, offset=0, period=1.):
self._offset = offset
self._period = period
def __call__(self, start, stop, num=50):
# calculates the cosine spacing
p = self._period
offset = self._offset
index = np.linspace(0., 1., num)
# spacing = .5*(1.-np.cos(p*np.pi*(index-offset)))
spacing = ((np.cos(np.pi*offset)-np.cos(np.pi*(p*index+offset))) /
(np.cos(np.pi*offset)-np.cos(np.pi*(p+offset))))
points = start+spacing*(stop-start)
return points
# def cosine_spacing(start, stop, num=50, offset=0, period=1.):
# # calculates the cosine spacing
# index = np.linspace(0., 1., num)
# spacing = .5*(1.-np.cos(period*np.pi*(index-offset)))
# points = start+spacing*(stop-start)
# return points
def _distance_point_to_line(P1, P2, PQ):
x0, y0 = PQ
x1, y1 = P1
x2, y2 = P2
dy = y2-y1
dx = x2-x1
return abs(dy*x0-dx*y0+x2*y1-y2*x1)/sqrt(dy*dy+dx*dx)
def _calc_error(point_list):
# calculates error if all points between endpoints of point_list
# were removed.
error = 0.
front = point_list[0]
back = point_list[-1]
for i in range(1, len(point_list)-1):
error += _distance_point_to_line(front, back, point_list[i])
return error
def _calc_length(point_list):
# calculates error if all points between endpoints of point_list
# were removed.
x_f, y_f = point_list[0]
x_b, y_b = point_list[-1]
length = sqrt((x_b-x_f)**2+(y_b-y_f)**2)
return length
def coarsen_axi(data_x, data_r, tol, max_length):
# move x and r data into a list of "points"
point_list = []
for i in range(len(data_x)):
point_list.append(np.array([data_x[i], data_r[i]]))
# ITERATIVE ALGORITHM
# Indices for the start and end points of the algorithm
Pstart = 0
Pend = len(point_list)-1
# Indices for 2 pointers that define current range being examined
P1 = Pstart
P2 = Pstart+2
new_point_list = [point_list[Pstart]]
while P2 <= Pend:
error = _calc_error(point_list[P1:P2+1])
if error > tol:
new_point_list.extend(point_list[P1+1:P2+1])
P1 = P2
P2 = P1 + 2
else:
while error < tol and P2 <= Pend:
P2 += 1
error = _calc_error(point_list[P1:P2+1])
cell_length = _calc_length(point_list[P1:P2+1])
# print(cell_length)
if cell_length > max_length:
error += tol*10.
P2 -= 1
new_point_list.append(point_list[P2])
P1 = P2
P2 = P1 + 2
if not (new_point_list[-1][0] == point_list[-1][0]):
new_point_list.append(point_list[-1])
# print("size of new list", len(new_point_list))
sys.stdout.flush()
new_x = np.zeros(len(new_point_list))
new_r = np.zeros(len(new_point_list))
for i in range(0, len(new_point_list)):
new_x[i] = new_point_list[i][0]
new_r[i] = new_point_list[i][1]
return new_x, new_r
|
|
#!/usr/bin/env python3
"""
Open a shell over MAVLink.
@author: Beat Kueng ([email protected])
"""
from __future__ import print_function
import sys, select
import termios
from timeit import default_timer as timer
from argparse import ArgumentParser
import os
try:
from pymavlink import mavutil
except ImportError as e:
print("Failed to import pymavlink: " + str(e))
print("")
print("You may need to install it with:")
print(" pip3 install --user pymavlink")
print("")
sys.exit(1)
try:
import serial
except ImportError as e:
print("Failed to import pyserial: " + str(e))
print("")
print("You may need to install it with:")
print(" pip3 install --user pyserial")
print("")
sys.exit(1)
class MavlinkSerialPort():
'''an object that looks like a serial port, but
transmits using mavlink SERIAL_CONTROL packets'''
def __init__(self, portname, baudrate, devnum=0, debug=0):
self.baudrate = 0
self._debug = debug
self.buf = ''
self.port = devnum
self.debug("Connecting with MAVLink to %s ..." % portname)
self.mav = mavutil.mavlink_connection(portname, autoreconnect=True, baud=baudrate)
self.mav.wait_heartbeat()
self.debug("HEARTBEAT OK\n")
self.debug("Locked serial device\n")
def debug(self, s, level=1):
'''write some debug text'''
if self._debug >= level:
print(s)
def write(self, b):
'''write some bytes'''
self.debug("sending '%s' (0x%02x) of len %u\n" % (b, ord(b[0]), len(b)), 2)
while len(b) > 0:
n = len(b)
if n > 70:
n = 70
buf = [ord(x) for x in b[:n]]
buf.extend([0]*(70-len(buf)))
self.mav.mav.serial_control_send(self.port,
mavutil.mavlink.SERIAL_CONTROL_FLAG_EXCLUSIVE |
mavutil.mavlink.SERIAL_CONTROL_FLAG_RESPOND,
0,
0,
n,
buf)
b = b[n:]
def close(self):
self.mav.mav.serial_control_send(self.port, 0, 0, 0, 0, [0]*70)
def _recv(self):
'''read some bytes into self.buf'''
m = self.mav.recv_match(condition='SERIAL_CONTROL.count!=0',
type='SERIAL_CONTROL', blocking=True,
timeout=0.03)
if m is not None:
if self._debug > 2:
print(m)
data = m.data[:m.count]
self.buf += ''.join(str(chr(x)) for x in data)
def read(self, n):
'''read some bytes'''
if len(self.buf) == 0:
self._recv()
if len(self.buf) > 0:
if n > len(self.buf):
n = len(self.buf)
ret = self.buf[:n]
self.buf = self.buf[n:]
if self._debug >= 2:
for b in ret:
self.debug("read 0x%x" % ord(b), 2)
return ret
return ''
def main():
parser = ArgumentParser(description=__doc__)
parser.add_argument('port', metavar='PORT', nargs='?', default = None,
help='Mavlink port name: serial: DEVICE[,BAUD], udp: IP:PORT, tcp: tcp:IP:PORT. Eg: \
/dev/ttyUSB0 or 0.0.0.0:14550. Auto-detect serial if not given.')
parser.add_argument("--baudrate", "-b", dest="baudrate", type=int,
help="Mavlink port baud rate (default=57600)", default=57600)
args = parser.parse_args()
if args.port == None:
if sys.platform == "darwin":
args.port = "/dev/tty.usbmodem01"
else:
serial_list = mavutil.auto_detect_serial(preferred_list=['*FTDI*',
"*Arduino_Mega_2560*", "*3D_Robotics*", "*USB_to_UART*", '*PX4*', '*FMU*', "*Gumstix*"])
if len(serial_list) == 0:
print("Error: no serial connection found")
return
if len(serial_list) > 1:
print('Auto-detected serial ports are:')
for port in serial_list:
print(" {:}".format(port))
print('Using port {:}'.format(serial_list[0]))
args.port = serial_list[0].device
print("Connecting to MAVLINK...")
mav_serialport = MavlinkSerialPort(args.port, args.baudrate, devnum=10)
mav_serialport.write('\n') # make sure the shell is started
# disable echo & avoid buffering on stdin
fd_in = sys.stdin.fileno()
try:
old_attr = termios.tcgetattr(fd_in)
new_attr = termios.tcgetattr(fd_in)
new_attr[3] = new_attr[3] & ~termios.ECHO # lflags
new_attr[3] = new_attr[3] & ~termios.ICANON
termios.tcsetattr(fd_in, termios.TCSANOW, new_attr)
except termios.error:
# tcgetattr can fail if stdin is not a tty
old_attr = None
ubuf_stdin = os.fdopen(fd_in, 'rb', buffering=0)
try:
cur_line = ''
command_history = []
cur_history_index = 0
def erase_last_n_chars(N):
if N == 0: return
CURSOR_BACK_N = '\x1b['+str(N)+'D'
ERASE_END_LINE = '\x1b[K'
sys.stdout.write(CURSOR_BACK_N + ERASE_END_LINE)
next_heartbeat_time = timer()
quit_time = None
while quit_time is None or quit_time > timer():
while True:
i, o, e = select.select([ubuf_stdin], [], [], 0)
if not i: break
ch = ubuf_stdin.read(1).decode('utf8')
if len(ch) == 0: # EOF
if quit_time is None:
# run a bit longer to read the response (we could also
# read until we get a prompt)
quit_time = timer() + 1
break
# provide a simple shell with command history
if ch == '\n':
if len(cur_line) > 0:
# erase current text (mavlink shell will echo it as well)
erase_last_n_chars(len(cur_line))
# add to history
if len(command_history) == 0 or command_history[-1] != cur_line:
command_history.append(cur_line)
if len(command_history) > 50:
del command_history[0]
cur_history_index = len(command_history)
mav_serialport.write(cur_line+'\n')
cur_line = ''
elif ord(ch) == 127: # backslash
if len(cur_line) > 0:
erase_last_n_chars(1)
cur_line = cur_line[:-1]
sys.stdout.write(ch)
elif ord(ch) == 27:
ch = ubuf_stdin.read(1).decode('utf8') # skip one
ch = ubuf_stdin.read(1).decode('utf8')
if ch == 'A': # arrow up
if cur_history_index > 0:
cur_history_index -= 1
elif ch == 'B': # arrow down
if cur_history_index < len(command_history):
cur_history_index += 1
# TODO: else: support line editing
erase_last_n_chars(len(cur_line))
if cur_history_index == len(command_history):
cur_line = ''
else:
cur_line = command_history[cur_history_index]
sys.stdout.write(cur_line)
elif ord(ch) > 3:
cur_line += ch
sys.stdout.write(ch)
sys.stdout.flush()
data = mav_serialport.read(4096)
if data and len(data) > 0:
sys.stdout.write(data)
sys.stdout.flush()
# handle heartbeat sending
heartbeat_time = timer()
if heartbeat_time > next_heartbeat_time:
mav_serialport.mav.mav.heartbeat_send(mavutil.mavlink.MAV_TYPE_GCS,
mavutil.mavlink.MAV_AUTOPILOT_GENERIC, 0, 0, 0)
next_heartbeat_time = heartbeat_time + 1
except serial.serialutil.SerialException as e:
print(e)
except KeyboardInterrupt:
mav_serialport.close()
finally:
if old_attr:
termios.tcsetattr(fd_in, termios.TCSADRAIN, old_attr)
if __name__ == '__main__':
main()
|
|
import asyncio
from kafka.admin import NewTopic, NewPartitions
from kafka.admin.config_resource import ConfigResource, ConfigResourceType
from aiokafka.admin import AIOKafkaAdminClient
from aiokafka.consumer import AIOKafkaConsumer
from aiokafka.producer import AIOKafkaProducer
from aiokafka.structs import TopicPartition
from ._testutil import (
KafkaIntegrationTestCase, kafka_versions, run_until_complete
)
class TestAdmin(KafkaIntegrationTestCase):
async def create_admin(self):
admin = AIOKafkaAdminClient(bootstrap_servers=self.hosts)
await admin.start()
self.add_cleanup(admin.close)
return admin
@kafka_versions('>=0.10.0.0')
@run_until_complete
async def test_metadata(self):
admin = await self.create_admin()
metadata = await admin._get_cluster_metadata()
assert metadata.brokers is not None
assert metadata.topics is not None
assert len(metadata.brokers) == 1
@kafka_versions('>=0.10.1.0')
@run_until_complete
async def test_create_topics(self):
admin = await self.create_admin()
resp = await admin.create_topics([NewTopic(self.topic, 1, 1)])
assert resp.topic_errors is not None
assert len(resp.topic_errors) == 1
topic, error_code, error = resp.topic_errors[0]
assert topic == self.topic
assert error_code == 0
assert not error
@kafka_versions('>=0.10.1.0') # Since we use `create_topics()`
@run_until_complete
async def test_list_topics(self):
admin = await self.create_admin()
topic_names = {self.random_topic_name(), self.random_topic_name()}
topics = [NewTopic(tn, 1, 1) for tn in topic_names]
await admin.create_topics(topics)
actual = await admin.list_topics()
assert set(actual) >= topic_names
# @kafka_versions('>=0.10.1.0')
@kafka_versions('>=1.0.0') # XXX Times out with 0.10.2.1 and 0.11.0.3
@run_until_complete
async def test_delete_topics(self):
admin = await self.create_admin()
resp = await admin.create_topics([NewTopic(self.topic, 1, 1)])
assert resp.topic_errors[0][2] is None
topics = await admin.list_topics()
assert self.topic in topics
resp = await admin.delete_topics([self.topic])
errors = resp.topic_error_codes
assert len(errors) == 1
topic, error_code = errors[0]
assert topic == self.topic
assert error_code == 0
topics = await admin.list_topics()
assert self.topic not in topics
@kafka_versions('>=0.11.0.0')
@run_until_complete
async def test_describe_configs_topic(self):
admin = await self.create_admin()
await admin.create_topics([NewTopic(self.topic, 1, 1)])
cr = ConfigResource(ConfigResourceType.TOPIC, self.topic)
resp = await admin.describe_configs([cr])
assert len(resp) == 1
assert len(resp[0].resources) == 1
config_resource = resp[0].resources[0]
error_code, error_message, resource_type, resource_name, *_ = config_resource
assert error_code == 0
assert not error_message # None or "" depending on kafka version
assert resource_type == ConfigResourceType.TOPIC
assert resource_name == self.topic
@kafka_versions('>=0.11.0.0')
@run_until_complete
async def test_describe_configs_broker(self):
admin = await self.create_admin()
[broker_id] = admin._client.cluster._brokers.keys()
cr = ConfigResource(ConfigResourceType.BROKER, broker_id)
resp = await admin.describe_configs([cr])
assert len(resp) == 1
assert len(resp[0].resources) == 1
config_resource = resp[0].resources[0]
error_code, error_message, resource_type, resource_name, *_ = config_resource
assert error_code == 0
assert not error_message # None or "" depending on kafka version
assert resource_type == ConfigResourceType.BROKER
assert resource_name == str(broker_id)
@kafka_versions('>=0.11.0.0')
@run_until_complete
async def test_alter_configs(self):
admin = await self.create_admin()
await admin.create_topics([NewTopic(self.topic, 1, 1)])
cr = ConfigResource(
ConfigResourceType.TOPIC, self.topic, {"cleanup.policy": "delete"}
)
await admin.alter_configs([cr])
new_configs_resp = await admin.describe_configs([cr])
assert len(new_configs_resp) == 1
assert len(new_configs_resp[0].resources) == 1
config_entries = new_configs_resp[0].resources[0][4]
assert len(config_entries) == 1
name, value, *_ = config_entries[0]
assert name == "cleanup.policy"
assert value == "delete"
@kafka_versions('>=0.10.0.0')
@run_until_complete
async def test_describe_cluster(self):
admin = await self.create_admin()
[broker_id] = admin._client.cluster._brokers.keys()
resp = await admin.describe_cluster()
assert len(resp['brokers']) == 1
assert resp['brokers'][0]['node_id'] == broker_id
@kafka_versions('>=1.0.0')
@run_until_complete
async def test_create_partitions(self):
admin = await self.create_admin()
await admin.create_topics([NewTopic(self.topic, 1, 1)])
old_desc = await admin.describe_topics([self.topic])
old_partitions = {p["partition"] for p in old_desc[0]["partitions"]}
assert len(old_partitions) == 1
new_part = NewPartitions(total_count=2)
await admin.create_partitions({self.topic: new_part})
new_desc = await admin.describe_topics([self.topic])
new_partitions = {p["partition"] for p in new_desc[0]["partitions"]}
assert len(new_partitions) == 2
assert new_partitions > old_partitions
@kafka_versions('>=0.10.0.0')
@run_until_complete
async def test_list_consumer_groups(self):
admin = await self.create_admin()
group_id = f'group-{self.id()}'
consumer = AIOKafkaConsumer(
self.topic, group_id=group_id, bootstrap_servers=self.hosts
)
await consumer.start()
self.add_cleanup(consumer.stop)
await asyncio.sleep(0.1) # Otherwise we can get GroupLoadInProgressError
resp = await admin.list_consumer_groups()
assert len(resp) >= 1 # There can be group left from other test
groups = [group for group, *_ in resp]
assert group_id in groups
@kafka_versions('>=0.10.0.0')
@run_until_complete
async def test_describe_consumer_groups(self):
admin = await self.create_admin()
group_id = f'group-{self.id()}'
consumer = AIOKafkaConsumer(
self.topic, group_id=group_id, bootstrap_servers=self.hosts
)
await consumer.start()
self.add_cleanup(consumer.stop)
resp = await admin.describe_consumer_groups([group_id])
assert len(resp) == 1
assert len(resp[0].groups) == 1
error_code, group, *_ = resp[0].groups[0]
assert error_code == 0
assert group == group_id
@kafka_versions('>=0.10.0.0')
@run_until_complete
async def test_list_consumer_group_offsets(self):
admin = await self.create_admin()
group_id = f'group-{self.id()}'
consumer = AIOKafkaConsumer(
self.topic, group_id=group_id, bootstrap_servers=self.hosts,
enable_auto_commit=False
)
await consumer.start()
self.add_cleanup(consumer.stop)
async with AIOKafkaProducer(bootstrap_servers=self.hosts) as producer:
await producer.send_and_wait(self.topic, b'some-message')
await producer.send_and_wait(self.topic, b'other-message')
msg = await consumer.getone()
await consumer.commit()
resp = await admin.list_consumer_group_offsets(group_id)
tp = TopicPartition(msg.topic, msg.partition)
assert resp[tp].offset == msg.offset + 1
resp = await admin.list_consumer_group_offsets(group_id, partitions=[tp])
assert resp[tp].offset == msg.offset + 1
|
|
from django.core.urlresolvers import reverse
from django.test import TestCase
from .models import ZipCodeLocation, Location
SAMPLE_POINT_1 = 'POINT(-92.289595 34.746481)'
SAMPLE_POINT_2 = 'POINT(-92.273494 34.744487)'
SAMPLE_POINT_3 = 'POINT(-92.489047 34.810632)'
SAMPLE_POINT_4 = 'POINT(-94.251795 35.7813)'
SAMPLE_POINT_5 = 'POINT(-93.053321 34.516402)'
class ModelTestCase(TestCase):
def test_can_create_zip_loctation(self):
z = ZipCodeLocation.objects.create(zip_code='72201',
location=SAMPLE_POINT_1)
self.assertEqual(z.__unicode__(), z.zip_code)
def test_can_create_location(self):
l = Location.objects.create(name='Location 1',
address='addr',
address_2='addr2',
city='city',
state='AR',
zip_code='90210',
location=SAMPLE_POINT_2,
description='lorem ipsum')
self.assertEqual(l.__unicode__(
), "{0} :: {1}, {2}".format(l.name, l.city, l.state))
class LocationTestBase(TestCase):
def setUp(self):
self.l_1 = Location.objects.create(name='Location 1 - LR',
address='addr',
address_2='addr2',
city='city 4',
state='AR',
zip_code='90210',
location=SAMPLE_POINT_1,
description='lorem ipsum')
self.l_2 = Location.objects.create(name='Location 2 - LR',
address='addr',
address_2='addr2',
city='city 2',
state='AR',
zip_code='90210',
location=SAMPLE_POINT_2,
description='lorem ipsum')
self.l_3 = Location.objects.create(name='Location 3 - WLR',
address='addr',
address_2='addr2',
city='city 3',
state='AR',
zip_code='90210',
location=SAMPLE_POINT_3,
description='lorem ipsum')
self.l_4 = Location.objects.create(name='Location 4 - DD',
address='addr',
address_2='addr2',
city='city 1',
state='AR',
zip_code='90210',
location=SAMPLE_POINT_4,
description='lorem ipsum')
self.l_5 = Location.objects.create(name='Location 5 - HS',
address='addr',
address_2='addr2',
city='city 5',
state='AR',
zip_code='90210',
location=SAMPLE_POINT_5,
description='lorem ipsum')
class LocationViewTest(LocationTestBase):
def test_location_by_state_view(self):
response = self.client.get(reverse('store_locator_locations_by_state',
kwargs={'state': 'AR'}))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'store_locator/location_list.html')
self.assertQuerysetEqual(response.context['location_list'],
[repr(self.l_4), repr(self.l_2), repr(self.l_3), repr(self.l_1),
repr(self.l_5)])
def test_location_by_state_view_unknown_state(self):
response = self.client.get(reverse('store_locator_locations_by_state',
kwargs={'state': 'BLAH'}))
self.assertEqual(response.status_code, 200)
self.assertQuerysetEqual(response.context['location_list'],
Location.objects.none())
def test_location_detail_view(self):
response = self.client.get(self.l_1.get_absolute_url())
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'store_locator/location_detail.html')
self.assertEqual(response.context['location'], self.l_1)
class LocationRadiusSearchTest(LocationTestBase):
def test_radius_search(self):
search_point = '34.74759,-92.271053'
search_distance = '5'
response = self.client.get(reverse('store_location_find_by_point'),
{'location': search_point,
'distance': search_distance})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'store_locator/location_search.html')
self.assertEqual(response.context['distance'], int(search_distance))
self.assertEqual(response.context['search_spot'], search_point)
self.assertQuerysetEqual(response.context['location_list'],
[repr(self.l_2), repr(self.l_1)])
def test_radius_search_invalid_distance(self):
search_point = '34.74759,-92.271053'
response = self.client.get(reverse('store_location_find_by_point'),
{'location': search_point,
'distance': '-2'})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['distance'], 20)
response = self.client.get(reverse('store_location_find_by_point'),
{'location': search_point,
'distance': '0'})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['distance'], 20)
response = self.client.get(reverse('store_location_find_by_point'),
{'location': search_point,
'distance': '101'})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['distance'], 20)
response = self.client.get(reverse('store_location_find_by_point'),
{'location': search_point})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['distance'], 20)
response = self.client.get(reverse('store_location_find_by_point'),
{'location': search_point,
'distance': 'asdfadfadsf'})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['distance'], 20)
def test_radius_search_invalid_location(self):
response = self.client.get(reverse('store_location_find_by_point'),
{'location': 'adfadsf',
'distance': '5'})
self.assertEqual(response.status_code, 200)
self.assertQuerysetEqual(
response.context['location_list'], Location.objects.none())
response = self.client.get(reverse('store_location_find_by_point'),
{'location': 'adfaasdf,sdafdsf',
'distance': '5'})
self.assertEqual(response.status_code, 200)
self.assertQuerysetEqual(
response.context['location_list'], Location.objects.none())
response = self.client.get(reverse('store_location_find_by_point'),
{'location': '34.00,',
'distance': '5'})
self.assertEqual(response.status_code, 200)
self.assertQuerysetEqual(
response.context['location_list'], Location.objects.none())
response = self.client.get(reverse('store_location_find_by_point'),
{'location': '34.00,asdfsaf',
'distance': '5'})
self.assertEqual(response.status_code, 200)
self.assertQuerysetEqual(
response.context['location_list'], Location.objects.none())
response = self.client.get(reverse('store_location_find_by_point'),
{'location': '',
'distance': '5'})
self.assertEqual(response.status_code, 200)
self.assertQuerysetEqual(
response.context['location_list'], Location.objects.none())
response = self.client.get(reverse('store_location_find_by_point'),
{'distance': '5'})
self.assertEqual(response.status_code, 200)
self.assertQuerysetEqual(
response.context['location_list'], Location.objects.none())
class LocationZipSearchTest(LocationTestBase):
def setUp(self):
super(LocationZipSearchTest, self).setUp()
ZipCodeLocation.objects.create(zip_code='72201',
location='Point(-92.27987 34.745692)')
def test_zip_search(self):
search_zip = '72201'
search_distance = '5'
response = self.client.get(reverse('store_location_find_by_zip'),
{'zip': search_zip,
'distance': search_distance})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'store_locator/location_search.html')
self.assertEqual(response.context['distance'], int(search_distance))
self.assertEqual(response.context['search_spot'], search_zip)
self.assertQuerysetEqual(response.context['location_list'],
[repr(self.l_2), repr(self.l_1)])
def test_invalid_zip(self):
response = self.client.get(reverse('store_location_find_by_zip'),
{'zip': 'asfasdf',
'distance': '5'})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'store_locator/location_search.html')
self.assertQuerysetEqual(response.context['location_list'],
Location.objects.none())
response = self.client.get(reverse('store_location_find_by_zip'),
{'zip': '',
'distance': '5'})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'store_locator/location_search.html')
self.assertQuerysetEqual(response.context['location_list'],
Location.objects.none())
response = self.client.get(reverse('store_location_find_by_zip'),
{'distance': '5'})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'store_locator/location_search.html')
self.assertQuerysetEqual(response.context['location_list'],
Location.objects.none())
|
|
import math
def is_spaces(st):
for x in st:
if x == '#':
return True
if x != ' ' and x != '\t' and x != '\n':
return False
return True
class BlankBeforeFunction:
""" Number of blank lines before function in files. Verbose version doesn't require specific logic. """
TOO_FEW_LINES = 'too_few_lines'
TOO_MANY_LINES = 'too_many_lines'
inspections = {
TOO_FEW_LINES: 'Too few lines between functions, comparing to the repository.'
'Add new lines here.',
TOO_MANY_LINES: 'Too many lines between functions, comparing to the repository!'
'Reduce lines here.',
}
discrete_groups = [
{
'name': '0',
'from': 0,
'to': 0,
},
{
'name': '1',
'from': 1,
'to': 1,
},
{
'name': '2',
'from': 2,
'to': 2,
},
{
'name': '3+',
'from': 3,
'to': math.inf,
},
]
def count(self, file, verbose=False):
# Count blank lines before function
with open(file) as f:
res = [0, 0, 0, 0]
res_list = [[], [], [], []]
blank = 0
i = 0
beginning = True
for line in f.readlines():
i += 1
if line[: 4] == "def " and not beginning:
res[min(3, blank)] += 1
res_list[min(3, blank)].append(i)
if is_spaces(line):
blank += 1
else:
blank = 0
beginning = False
# Form result
result = {}
if res[0] > 0:
result['0'] = res[0]
if res[1] > 0:
result['1'] = res[1]
if res[2] > 0:
result['2'] = res[2]
if res[3] > 0:
result['3+'] = res[3]
if verbose:
if (res[0] > 0):
result['0'] = {
'count': res[0],
'lines': res_list[0],
}
if (res[1] > 0):
result['1'] = {
'count': res[1],
'lines': res_list[1],
}
if (res[2] > 0):
result['2'] = {
'count': res[2],
'lines': res_list[2],
}
if (res[3] > 0):
result['3+'] = {
'count': res[3],
'lines': res_list[3],
}
return result
def discretize(self, values):
discrete_values = {}
sum = 0.0
# Set initial values for groups to 0
for group in self.discrete_groups:
discrete_values[group['name']] = 0
# Sum values for each group
for value, count in values.items():
for group in self.discrete_groups:
if group['from'] <= int(value[0]) <= group['to']:
discrete_values[group['name']] += count
sum += count
continue
# Normalize
for key, value in discrete_values.items():
discrete_values[key] = value / sum
return discrete_values
def inspect(self, discrete, values):
inspections = {}
# Inspections for 0 new lines between functions (if repository contains less than 30% of such a things)
if discrete['0'] > 0.5:
for nc_count in values.keys():
if nc_count == '3+':
int_nc = 5
else:
int_nc = int(nc_count)
if 1 <= int_nc <= math.inf:
if self.TOO_MANY_LINES in inspections:
inspections[self.TOO_MANY_LINES]['lines'] += values[nc_count]['lines']
continue
inspections[self.TOO_MANY_LINES] = {
'message': self.inspections[self.TOO_MANY_LINES],
'lines': values[nc_count]['lines'],
}
if discrete['1'] > 0.5:
for nc_count in values.keys():
if nc_count == '3+':
int_nc = 5
else:
int_nc = int(nc_count)
if 0 <= int_nc <= 0:
if self.TOO_FEW_LINES in inspections:
inspections[self.TOO_FEW_LINES]['lines'] += values[nc_count]['lines']
continue
inspections[self.TOO_FEW_LINES] = {
'message': self.inspections[self.TOO_FEW_LINES],
'lines': values[nc_count]['lines'][:],
}
if int_nc > 1:
if self.TOO_MANY_LINES in inspections:
inspections[self.TOO_MANY_LINES]['lines'] += values[nc_count]['lines']
continue
inspections[self.TOO_MANY_LINES] = {
'message': self.inspections[self.TOO_MANY_LINES],
'lines': values[nc_count]['lines'][:],
}
if discrete['2'] > 0.5:
for nc_count in values.keys():
if nc_count == '3+':
int_nc = 5
else:
int_nc = int(nc_count)
if 0 <= int_nc <= 1:
if self.TOO_FEW_LINES in inspections:
inspections[self.TOO_FEW_LINES]['lines'] += values[nc_count]['lines']
continue
inspections[self.TOO_FEW_LINES] = {
'message': self.inspections[self.TOO_FEW_LINES],
'lines': values[nc_count]['lines'][:],
}
if int_nc > 3:
if self.TOO_MANY_LINES in inspections:
inspections[self.TOO_MANY_LINES]['lines'] += values[nc_count]['lines']
continue
inspections[self.TOO_MANY_LINES] = {
'message': self.inspections[self.TOO_MANY_LINES],
'lines': values[nc_count]['lines'][:],
}
if discrete['3+'] > 0.5:
for nc_count in values.keys():
if nc_count == '3+':
int_nc = 5
else:
int_nc = int(nc_count)
if 0 <= int_nc <= 2:
if self.TOO_FEW_LINES in inspections:
inspections[self.TOO_FEW_LINES]['lines'] += values[nc_count]['lines']
continue
inspections[self.TOO_FEW_LINES] = {
'message': self.inspections[self.TOO_FEW_LINES],
'lines': values[nc_count]['lines'][:],
}
# Sort line numbers
for key in inspections.keys():
inspections[key]['lines'] = sorted(inspections[key]['lines'])
return inspections
|
|
#!/usr/bin/env python
#
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the MIT license.
# See the LICENSE file in the project root for more information.
#
##########################################################################
##########################################################################
#
# Module: run-throughput-tests.py
#
# Notes: runs throughput testing for coreclr and uploads the timing results
# to benchview
#
#
##########################################################################
##########################################################################
import argparse
import distutils.dir_util
import os
import re
import shutil
import subprocess
import sys
import time
import timeit
import stat
import csv
##########################################################################
# Globals
##########################################################################
# List of dlls we want to exclude
dll_exclude_list = {
'Windows_NT': [
# Require Newtonsoft.Json
"Microsoft.DotNet.ProjectModel.dll",
"Microsoft.Extensions.DependencyModel.dll",
# Require System.Security.Principal.Windows
"System.Net.Requests.dll",
"System.Net.Security.dll",
"System.Net.Sockets.dll"
],
'Linux' : [
# Required System.Runtime.WindowsRuntime
"System.Runtime.WindowsRuntime.UI.Xaml.dll"
]
}
jit_list = {
'Windows_NT': {
'x64': 'clrjit.dll',
'x86': 'clrjit.dll',
},
'Linux': {
'x64': 'libclrjit.so'
}
}
os_group_list = {
'Windows_NT': 'Windows_NT',
'Ubuntu14.04': 'Linux'
}
python_exe_list = {
'Windows_NT': 'py',
'Linux': 'python3.5'
}
##########################################################################
# Argument Parser
##########################################################################
description = 'Tool to collect throughtput performance data'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('-arch', dest='arch', default='x64')
parser.add_argument('-configuration', dest='build_type', default='Release')
parser.add_argument('-run_type', dest='run_type', default='rolling')
parser.add_argument('-os', dest='operating_system', default='Windows_NT')
parser.add_argument('-clr_root', dest='clr_root', default=None)
parser.add_argument('-assembly_root', dest='assembly_root', default=None)
parser.add_argument('-benchview_path', dest='benchview_path', default=None)
##########################################################################
# Helper Functions
##########################################################################
def validate_args(args):
""" Validate all of the arguments parsed.
Args:
args (argparser.ArgumentParser): Args parsed by the argument parser.
Returns:
(arch, build_type, clr_root, fx_root, fx_branch, fx_commit, env_script)
(str, str, str, str, str, str, str)
Notes:
If the arguments are valid then return them all in a tuple. If not, raise
an exception stating x argument is incorrect.
"""
arch = args.arch
build_type = args.build_type
run_type = args.run_type
operating_system = args.operating_system
clr_root = args.clr_root
assembly_root = args.assembly_root
benchview_path = args.benchview_path
def validate_arg(arg, check):
""" Validate an individual arg
Args:
arg (str|bool): argument to be validated
check (lambda: x-> bool): test that returns either True or False
: based on whether the check passes.
Returns:
is_valid (bool): Is the argument valid?
"""
helper = lambda item: item is not None and check(item)
if not helper(arg):
raise Exception('Argument: %s is not valid.' % (arg))
valid_archs = {'Windows_NT': ['x86', 'x64'], 'Linux': ['x64']}
valid_build_types = ['Release']
valid_run_types = ['rolling', 'private']
valid_os = ['Windows_NT', 'Ubuntu14.04']
arch = next((a for a in valid_archs if a.lower() == arch.lower()), arch)
build_type = next((b for b in valid_build_types if b.lower() == build_type.lower()), build_type)
validate_arg(operating_system, lambda item: item in valid_os)
os_group = os_group_list[operating_system]
validate_arg(arch, lambda item: item in valid_archs[os_group])
validate_arg(build_type, lambda item: item in valid_build_types)
validate_arg(run_type, lambda item: item in valid_run_types)
if clr_root is None:
raise Exception('--clr_root must be set')
else:
clr_root = os.path.normpath(clr_root)
validate_arg(clr_root, lambda item: os.path.isdir(clr_root))
if assembly_root is None:
raise Exception('--assembly_root must be set')
else:
assembly_root = os.path.normpath(assembly_root)
validate_arg(assembly_root, lambda item: os.path.isdir(assembly_root))
if not benchview_path is None:
benchview_path = os.path.normpath(benchview_path)
validate_arg(benchview_path, lambda item: os.path.isdir(benchview_path))
args = (arch, operating_system, os_group, build_type, run_type, clr_root, assembly_root, benchview_path)
# Log configuration
log('Configuration:')
log(' arch: %s' % arch)
log(' os: %s' % operating_system)
log(' os_group: %s' % os_group)
log(' build_type: %s' % build_type)
log(' run_type: %s' % run_type)
log(' clr_root: %s' % clr_root)
log(' assembly_root: %s' % assembly_root)
if not benchview_path is None:
log('benchview_path : %s' % benchview_path)
return args
def nth_dirname(path, n):
""" Find the Nth parent directory of the given path
Args:
path (str): path name containing at least N components
n (int): num of basenames to remove
Returns:
outpath (str): path with the last n components removed
Notes:
If n is 0, path is returned unmodified
"""
assert n >= 0
for i in range(0, n):
path = os.path.dirname(path)
return path
def del_rw(action, name, exc):
os.chmod(name, stat.S_IWRITE)
os.remove(name)
def log(message):
""" Print logging information
Args:
message (str): message to be printed
"""
print('[%s]: %s' % (sys.argv[0], message))
def generateCSV(dll_name, dll_runtimes):
""" Write throuput performance data to a csv file to be consumed by measurement.py
Args:
dll_name (str): the name of the dll
dll_runtimes (float[]): A list of runtimes for each iteration of the performance test
"""
csv_file_name = "throughput-%s.csv" % (dll_name)
csv_file_path = os.path.join(os.getcwd(), csv_file_name)
with open(csv_file_path, 'w') as csvfile:
output_file = csv.writer(csvfile, delimiter=',', lineterminator='\n')
for iteration in dll_runtimes:
output_file.writerow(["default", "coreclr-crossgen-tp", dll_name, iteration])
return csv_file_name
def runIterations(dll_name, dll_path, iterations, crossgen_path, jit_path, assemblies_path):
""" Run throughput testing for a given dll
Args:
dll_name: the name of the dll
dll_path: the path to the dll
iterations: the number of times to run crossgen on the dll
crossgen_path: the path to crossgen
jit_path: the path to the jit
assemblies_path: the path to the assemblies that may be needed for the crossgen run
Returns:
dll_elapsed_times: a list of the elapsed times for the dll
"""
dll_elapsed_times = []
# Set up arguments for running crossgen
run_args = [crossgen_path,
'/JITPath',
jit_path,
'/Platform_Assemblies_Paths',
assemblies_path,
dll_path
]
log(" ".join(run_args))
# Time.clock() returns seconds, with a resolution of 0.4 microseconds, so multiply by the multiplier to get milliseconds
multiplier = 1000
for iteration in range(0,iterations):
proc = subprocess.Popen(run_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
start_time = timeit.default_timer()
(out, err) = proc.communicate()
end_time = timeit.default_timer()
if proc.returncode == 0:
# Calculate the runtime
elapsed_time = (end_time - start_time) * multiplier
dll_elapsed_times.append(elapsed_time)
else:
log("Error in %s" % (dll_name))
log(err.decode("utf-8"))
return dll_elapsed_times
##########################################################################
# Main
##########################################################################
def main(args):
global dll_exclude_list
global jit_list
global os_group_list
global python_exe_list
architecture, operating_system, os_group, build_type, run_type, clr_root, assembly_root, benchview_path = validate_args(args)
arch = architecture
current_dir = os.getcwd()
jit = jit_list[os_group][architecture]
crossgen = 'crossgen'
if os_group == 'Windows_NT':
crossgen += '.exe'
# Make sandbox
sandbox_path = os.path.join(clr_root, "sandbox")
if os.path.isdir(sandbox_path):
shutil.rmtree(sandbox_path, onerror=del_rw)
os.makedirs(sandbox_path)
os.chdir(sandbox_path)
# Set up paths
bin_path = os.path.join(clr_root, 'bin', 'Product', os_group + '.' + arch + '.' + build_type)
crossgen_path = os.path.join(bin_path,crossgen)
jit_path = os.path.join(bin_path, jit)
iterations = 6
python_exe = python_exe_list[os_group]
# Run throughput testing
for dll_file_name in os.listdir(assembly_root):
# Find all framework dlls in the assembly_root dir, which we will crossgen
if (dll_file_name.endswith(".dll") and
(not ".ni." in dll_file_name) and
("Microsoft" in dll_file_name or "System" in dll_file_name) and
(not dll_file_name in dll_exclude_list[os_group])):
dll_name = dll_file_name.replace(".dll", "")
dll_path = os.path.join(assembly_root, dll_file_name)
dll_elapsed_times = runIterations(dll_file_name, dll_path, iterations, crossgen_path, jit_path, assembly_root)
if len(dll_elapsed_times) != 0:
if not benchview_path is None:
# Generate the csv file
csv_file_name = generateCSV(dll_name, dll_elapsed_times)
shutil.copy(csv_file_name, clr_root)
# For each benchmark, call measurement.py
measurement_args = [python_exe,
os.path.join(benchview_path, "measurement.py"),
"csv",
os.path.join(os.getcwd(), csv_file_name),
"--metric",
"execution_time",
"--unit",
"milliseconds",
"--better",
"desc",
"--drop-first-value",
"--append"]
log(" ".join(measurement_args))
proc = subprocess.Popen(measurement_args)
proc.communicate()
else:
# Write output to console if we are not publishing
log("%s" % (dll_name))
log("Duration: [%s]" % (", ".join(str(x) for x in dll_elapsed_times)))
# Upload the data
if not benchview_path is None:
# Call submission.py
submission_args = [python_exe,
os.path.join(benchview_path, "submission.py"),
"measurement.json",
"--build",
os.path.join(clr_root, "build.json"),
"--machine-data",
os.path.join(clr_root, "machinedata.json"),
"--metadata",
os.path.join(clr_root, "submission-metadata.json"),
"--group",
"CoreCLR-throughput",
"--type",
run_type,
"--config-name",
build_type,
"--config",
"Configuration",
build_type,
"--config",
"OS",
operating_system,
"--arch",
architecture,
"--machinepool",
"PerfSnake"
]
log(" ".join(submission_args))
proc = subprocess.Popen(submission_args)
proc.communicate()
# Call upload.py
upload_args = [python_exe,
os.path.join(benchview_path, "upload.py"),
"submission.json",
"--container",
"coreclr"
]
log(" ".join(upload_args))
proc = subprocess.Popen(upload_args)
proc.communicate()
os.chdir(current_dir)
return 0
if __name__ == "__main__":
Args = parser.parse_args(sys.argv[1:])
main(Args)
|
|
import logging
import os
from time import sleep
from datetime import timedelta, datetime
from snaptastic import exceptions
from snaptastic import get_ec2_conn
from snaptastic import metaclass
from snaptastic.ebs_volume import EBSVolume
from snaptastic.utils import get_userdata_dict, add_tags
logger = logging.getLogger(__name__)
class Snapshotter(object):
'''
Reusable class for creating snapshots and mounting them on boot
This class makes a few assumptions:
- role
- cluster
- environment
Are part of your userdata
The key things to customize:
- get_volumes
These hooks
- pre_mounts, post_mounts
- pre_snapshots, post_snapshots
'''
SNAPSHOT_EXPIRY_DAYS = 7
NOT_READY_SNAPSHOT_SLEEP = 2
name = None
__metaclass__ = metaclass.SnapshotterRegisteringMetaClass
def __init__(self, userdata=None, metadata=None, connection=None, bdm=None):
'''
Goes through the steps needed to mount the specified volume
- checks if we have a snapshot
- create a new volume and attach it
- tag the volume
- load the data from the snapshot into the volume
:param userdata: dictionary with the userdata
:type userdata: dict
:param metadata: metadata for the instance
:type metadata: dict
:param connection: boto connection object
:param bdm: dictionary describing the device mapping
'''
from boto.utils import get_instance_metadata
self.userdata = get_userdata_dict() if userdata is None else userdata
self.metadata = get_instance_metadata(
) if metadata is None else metadata
self.con = get_ec2_conn() if connection is None else connection
self.bdm = self.get_bdm() if bdm is None else bdm
'''
These you will need to customize
'''
def get_volumes(self):
'''
Get the volumes for this instance, customize this at will
'''
volume = EBSVolume(device='/dev/sdf', mount_point='/mnt/test', size=5)
volumes = [volume]
return volumes
def get_filter_tags(self):
'''
The tags which are used for finding the correct snapshot to load from.
In addition to these tags, mount point is also always added.
Use these to unique identify different parts of your infrastructure
'''
tags = {
'role': self.userdata['role'],
'cluster': self.userdata['cluster'],
'environment': self.userdata['environment']
}
return tags
'''
Main functions to call when using Snapshotter
'''
def make_snapshots(self, volumes=None):
'''
Make snapshots of all the volumes
'''
volumes = volumes or self.get_volumes()
logger.info('making snapshots of %s volumes', len(volumes))
self.pre_snapshots(volumes)
try:
return self._make_snapshots(volumes)
finally:
self.post_snapshots(volumes)
def _make_snapshots(self, volumes):
snapshots = []
for vol in volumes:
self.pre_snapshot(vol)
snapshot = self.make_snapshot(vol)
snapshots.append(snapshot)
self.post_snapshot(vol)
return snapshots
def make_snapshot(self, vol):
# get a snapshot name
description = self.get_snapshot_description(vol)
logger.info(
'preparing to create a snapshot with description %s', description)
# find the volume ID for this device
volume_id = self.get_volume_id(vol)
# get the tags, note that these are used for finding the right snapshot
tags = self.get_tags_for_volume(vol)
# Don't freeze more than we need to
with vol.freeze():
logger.info('creating snapshot')
snapshot = self.con.create_snapshot(
volume_id, description=description)
logger.info('succesfully created snapshot with id %s', snapshot.id)
# Add tags
logger.info('tagging snapshot %s with tags %s', snapshot.id, tags)
add_tags(snapshot, tags)
return snapshot
def clear_snapshot_cache(self):
if hasattr(self, '_snapshots'):
del self._snapshots
def wait_before_attempt(self, attempt_number):
'''
Waits an linearly increasing amount of time based on the number of attempts
already done.
It is used to sleep between multiple attempts (eg. request polling)
:param attempt_number: the number of attempts already done
'''
seconds_to_sleep = attempt_number * self.NOT_READY_SNAPSHOT_SLEEP
sleep(seconds_to_sleep)
def wait_for_snapshots(self, volumes, max_retries=12):
'''
Make sure all volumes have a ready to mount snapshot
before starting to mount them
'''
retries = 0
not_ready = [volumes]
while len(not_ready) > 0:
self.clear_snapshot_cache()
not_ready = [vol for vol in volumes
if self.get_snapshot(vol)
and self.get_snapshot(vol).status != 'completed']
if not_ready == []:
break
if retries >= max_retries:
raise exceptions.MissingSnapshot(
'Snapshots are not ready after %s attempts, aborting...' % retries)
retries += 1
logger.info('Waiting %s seconds for volumes %s to have ready snapshots' % (
not_ready, self.NOT_READY_SNAPSHOT_SLEEP))
self.wait_before_attempt(retries)
def mount_snapshots(self, volumes=None, ignore_mounted=False, dry_run=False):
''' Loops through the volumes and runs mount_volume on them
When ignore_mounted is True it will ignore DeviceAlreadyExists errors
'''
volumes = volumes or self.get_volumes()
logger.info('preparing to mount %s volumes', len(volumes))
# TODO, ugly code here for testing purpose
if dry_run:
for vol in volumes:
snapshot_id = self.get_snapshot(vol)
logger.info(
'for volume %s found snapshot %s', vol, snapshot_id)
return volumes
self.wait_for_snapshots(volumes)
self.pre_mounts(volumes)
for vol in volumes:
self.pre_mount(vol)
try:
self.mount_snapshot(vol)
except exceptions.DeviceAlreadyExists:
if ignore_mounted:
logger.info("Ignoring {0}".format(vol))
else:
raise
self.post_mount(vol)
self.post_mounts(volumes)
return volumes
def mount_snapshot(self, ebs_volume):
'''
Goes through the steps needed to mount the specified volume
- checks if we have a snapshot
- create a new volume and attach it
- tag the volume
- load the data from the snapshot into the volume
:param ebs_volume: the volume specification, we're mounting
:type ebs_volume: EBSVolume
'''
# see if we have a snapshot we can start from
try:
snapshot_id = self.get_snapshot(ebs_volume)
except exceptions.MissingSnapshot, e:
snapshot_id = None
logger.info('mounting a volume to %s with snapshot %s',
ebs_volume.mount_point, snapshot_id)
# create the device and attach
boto_volume = self.create_volume(ebs_volume, snapshot_id=snapshot_id)
# attach the volume to the instance
self.attach_volume(ebs_volume, boto_volume)
# if it's not from a snapshot we need to format
if snapshot_id is None:
ebs_volume.format()
# mount the volume
ebs_volume.mount()
def unmount_snapshots(self, volumes=None):
'''
Unmounting the volumes, mainly for testing
'''
volumes = volumes or self.get_volumes()
self.pre_unmounts(volumes)
logger.info('unmounting volumes %s', volumes)
for vol in volumes:
# first unmount
self.pre_unmount(vol)
try:
vol.unmount()
except exceptions.UnmountException, e:
logger.warn(e)
try:
# now detach
volume_id = self.get_volume_id(vol)
self.detach_volume(vol, volume_id)
except Exception, e:
logger.warn(e)
self.post_unmount(vol)
self.post_unmounts(volumes)
return volumes
'''
Volume related functionality
'''
def create_volume(self, vol, snapshot_id=None):
'''
Creates a volume and attaches it to this instance
If given a snapshot id, populates from the snapshot, else
formats the volume first
Subsequently mounts the volume to the given mount point
'''
# catch this at a higher level if we want to skip
if os.path.exists(vol.instance_device):
error_message = 'Device %s already exists' % vol.instance_device
error_message += '\n run with --ignore-mounted to proceed'
raise exceptions.DeviceAlreadyExists(error_message)
# we always create a new volume when mounting upon boot
# load from a snapshot if we have one
log_message = 'Creating a volume of size %s in zone %s from snapshot %s'
logger.info(log_message, vol.size, self.availability_zone, snapshot_id)
# tell boto about the iops if we want them :)
kwargs = dict()
if vol.iops:
kwargs['iops'] = vol.iops
boto_volume = self.con.create_volume(size=vol.size,
zone=self.availability_zone,
snapshot=snapshot_id,
volume_type=vol.volume_type,
**kwargs
)
# tag the volume
tags = self.get_tags_for_volume(vol)
logger.info('tagging volume %s with tags %s', boto_volume.id, tags)
add_tags(boto_volume, tags)
logger.info('tags added succesfully')
return boto_volume
def attach_volume(self, ebs_volume, boto_volume):
'''
Attaches the given boto_volume class to the running instance
'''
if os.path.exists(ebs_volume.instance_device):
logger.warn("The device %s already exists.",
ebs_volume.instance_device)
# waiting till the volume is available
waited = 0
MAX_VOLUME_AVAILABLE_WAIT = 45
while boto_volume.update() != 'available' and waited < MAX_VOLUME_AVAILABLE_WAIT:
logger.info(
'Waiting for volume to become available %s' % boto_volume.id)
sleep(1)
waited += 1
# attaching a volume to our instance
message_format = 'Attaching volume %s to instance %s'
logger.info(message_format, boto_volume.id, self.instance_id)
self.con.attach_volume(
boto_volume.id, self.instance_id, ebs_volume.device)
logger.info('Starting to poll till volume is fully attached')
# drink some coffee and wait
waited = 0
MAX_ATTACHMENT_WAIT = 45
while boto_volume.update() != 'in-use' and waited < MAX_ATTACHMENT_WAIT:
logger.info('Waiting for volume attachment: %s' % boto_volume.id)
sleep(1)
waited += 1
while not os.path.exists(ebs_volume.instance_device) and waited < MAX_ATTACHMENT_WAIT:
logger.info('Waiting for device: %s' % ebs_volume.instance_device)
sleep(1)
waited += 1
if waited == MAX_ATTACHMENT_WAIT:
error_format = 'Device didnt attach within % seconds'
raise exceptions.AttachmentException(
error_format, MAX_ATTACHMENT_WAIT)
return boto_volume
def detach_volume(self, ebs_volume, volume_id):
detached = False
MAX_DETACHMENT_WAIT = 45
waited = 0
logger.info('now detaching %s', volume_id)
while os.path.exists(ebs_volume.instance_device) and waited < MAX_DETACHMENT_WAIT:
logger.info('Waiting for device to detach: %s' %
ebs_volume.instance_device)
detached = self.con.detach_volume(volume_id)
sleep(1)
waited += 1
if waited == MAX_DETACHMENT_WAIT:
error_format = 'Device didnt detach within % seconds'
raise exceptions.DetachmentException(
error_format, MAX_DETACHMENT_WAIT)
return detached
def get_bdm(self):
bdm = self.con.get_instance_attribute(
self.instance_id, 'blockDeviceMapping')
return bdm
def get_expiration_tags(self):
tags = {
'expires': str(datetime.now() + timedelta(days=self.SNAPSHOT_EXPIRY_DAYS)),
'created': str(datetime.now()),
}
return tags
def get_tags_for_volume(self, volume):
'''
Includes
- filter tags (role, cluster, environment)
- expiration tags (expires, created)
- mount tag (mount point)
- instance tag (for debugging)
'''
filter_tags = self.get_filter_tags()
expiration_tags = self.get_expiration_tags()
tags = dict(
instance_id=self.instance_id,
mount_point=volume.mount_point,
)
tags.update(filter_tags)
tags.update(expiration_tags)
return tags
def get_volume_id(self, vol):
bdm_mapping = self.bdm['blockDeviceMapping']
try:
volume_id = bdm_mapping[vol.device].volume_id
except KeyError:
msg = '%s not found in block device mapping %s' % (
vol.device, bdm_mapping)
raise exceptions.MissingVolume(msg)
return volume_id
def get_cached_snapshots(self):
if not getattr(self, '_snapshots', None):
tags = self.get_filter_tags()
filters = {}
for key, value in tags.iteritems():
filters['tag:%s' % key] = value
snapshots = self.con.get_all_snapshots(filters=filters)
self._snapshots = snapshots
return self._snapshots
def get_snapshot(self, vol):
""" Returns the ID of the most recent snapshot that matches the given tags, or None
if no snapshots were found.
tags is a dict, used to filter the results from get_all_snapshots.
This relies on the fact that the API returns snapshots in the order they
are created, so we can just return the last element of the list.
"""
all_snapshots = self.get_cached_snapshots()
all_snapshots.sort(key=lambda s: s.start_time, reverse=True)
volume_snapshots = [s for s in all_snapshots if s.tags.get(
'mount_point') == vol.mount_point]
try:
latest_snapshot = volume_snapshots[0]
except IndexError, e:
raise exceptions.MissingSnapshot(e.message)
return latest_snapshot
def get_snapshot_description_string(self):
'''
Example, Redis.goteam.be snapshot of /mnt/persistent/
'''
return "%(cluster)s snapshot of %(mount_point)s"
def get_snapshot_description(self, vol):
format_dict = dict(
mount_point=vol.mount_point
)
format_dict.update(self.userdata)
snapshot_name = self.get_snapshot_description_string() % format_dict
snapshot_name = snapshot_name.replace('_', '-')
return snapshot_name
'''
Shortcuts
'''
@property
def instance_id(self):
instance_id = self.metadata['instance-id']
return instance_id
@property
def availability_zone(self):
availability_zone = self.metadata['placement']['availability-zone']
return availability_zone
'''
Section with Hooks
'''
def pre_mounts(self, volumes):
pass
def post_mounts(self, volumes):
pass
def pre_mount(self, vol):
pass
def post_mount(self, vol):
pass
def pre_unmounts(self, volumes):
pass
def post_unmounts(self, volumes):
pass
def pre_unmount(self, vol):
pass
def post_unmount(self, vol):
pass
def pre_snapshots(self, volumes):
pass
def post_snapshots(self, volumes):
pass
def pre_snapshot(self, vol):
pass
def post_snapshot(self, vol):
pass
|
|
# Copyright (c) 2010, individual contributors (see AUTHORS file)
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import random
from dashboard.forms import LoginForm, RegistrationForm, ForgotPasswordForm
from dashboard.models import Contributor, Event, UserInfo
from django.contrib import auth
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.template import RequestContext
from django.shortcuts import render_to_response, get_object_or_404, redirect
from django.contrib.auth.decorators import login_required
from hashlib import md5
from dashboard.views import projects
from observatory.settings import RECAPTCHA_PUBLIC, RECAPTCHA_PRIVATE
from observatory.lib.recaptcha.client import captcha
from emaillist.methods import send_mail
from emaillist.models import EmailAddress
from django.contrib.auth import *
import random
from random import choice
from settings import MAIL_SENDER
PEOPLE_ADJECTIVES = ["awesome",
"stylish",
"great",
"excellent",
"wonderful",
"amazing",
"impressive",
"tremendous",
"innovative",
"inventive",
"creative",
"sensational"]
ADJ_COUNT = len(PEOPLE_ADJECTIVES)
# display the list of users
def people(request):
people = User.objects.order_by("info__mentor").reverse().exclude(is_active = False).exclude(project=None, info__mentor=False)
return render_to_response("users/people.html", {
"people": people,
"adjective": PEOPLE_ADJECTIVES[random.randint(0, ADJ_COUNT - 1)],
}, context_instance = RequestContext(request))
# display the list of past users
def past_people(request):
people = User.objects.order_by("info__mentor").reverse().exclude(is_active = True).exclude(project=None, info__mentor=False)
return render_to_response("users/past_people.html", {
"people": people,
"adjective": PEOPLE_ADJECTIVES[random.randint(0, ADJ_COUNT - 1)],
}, context_instance = RequestContext(request))
# makes a user inactive and moves them to past users
def deactivate(request, user_id):
user = get_object_or_404(User, id = user_id)
user.is_active = False
user.save()
try:
contributor = Contributor.objects.get(user = user)
except:
contributor = None
try:
is_self = user.id == request.user.id
except:
is_self = False
return render_to_response('users/profile.html', {
'user_page': user,
'contributor': contributor,
'is_self': is_self
}, context_instance = RequestContext(request))
# makes a user active and moves them to users
def activate(request, user_id):
user = get_object_or_404(User, id = user_id)
user.is_active = True
user.save()
try:
contributor = Contributor.objects.get(user = user)
except:
contributor = None
try:
is_self = user.id == request.user.id
except:
is_self = False
return render_to_response('users/profile.html', {
'user_page': user,
'contributor': contributor,
'is_self': is_self
}, context_instance = RequestContext(request))
# display the list of past users
def past_people(request):
people = User.objects.all().exclude(is_active = True)
return render_to_response("users/past_people.html", {
"people": people
}, context_instance = RequestContext(request))
# gives user mentor status
@login_required
def mentorize(request, user_id):
if request.user.info.mentor:
user = get_object_or_404(User, id = user_id)
user_info = get_object_or_404(UserInfo, user = user)
user_info.mentor = True
user_info.save()
return redirect('/user/'+str(user_id))
# removes mentor status
@login_required
def dementorize(request, user_id):
# Ensure logged in user is a mentor
if request.user.info.mentor:
user = get_object_or_404(User, id = user_id)
user_info = get_object_or_404(UserInfo, user = user)
user_info.mentor = False
user_info.save()
return redirect('/user/'+str(user_id))
# display's the user's profile
def profile(request, user_id):
user = get_object_or_404(User, id = user_id)
user_info = get_object_or_404(UserInfo, user = user)
is_mentor = user_info.mentor
try:
contributor = Contributor.objects.get(user = user)
except:
contributor = None
try:
is_self = user.id == request.user.id
except:
is_self = False
return render_to_response('users/profile.html', {
'user_page': user,
'contributor': contributor,
'is_self': is_self,
'is_mentor': is_mentor
}, context_instance = RequestContext(request))
# displays both the login and registration forms. If there is an error with the
# selected form, the user is redirected to a page with only that form.
def login_or_reg(request):
next = reverse(projects.list)
if 'next' in request.GET:
next = request.GET['next']
reg_form = RegistrationForm(auto_id = "id_login_%s")
login_form = LoginForm(auto_id = "id_login_%s")
return render_to_response('users/login-register.html', {
'next': next,
'js_page_id': 'login-register',
'reg_form': reg_form,
'login_form': login_form,
'RECAPTCHA_PUBLIC': RECAPTCHA_PUBLIC,
'RECAPTCHA_PRIVATE': RECAPTCHA_PRIVATE
}, context_instance = RequestContext(request))
# displays a registration form
def register(request):
if request.method == "POST":
class RegisterError:
pass
try:
form = RegistrationForm(request.POST)
if not form.is_valid():
error_header = "That's not quite right."
raise RegisterError()
if len(User.objects.filter(email = form.cleaned_data["email"])) > 0:
error_header = "That email is already registered."
raise RegisterError()
if form.cleaned_data['password'] != request.POST['password_confirm']:
error_header = "Your passwords didn't match."
raise RegisterError()
# validate the captcha is recaptcha is enabled
if RECAPTCHA_PUBLIC is not None:
capt = captcha.submit(request.POST["recaptcha_challenge_field"],
request.POST["recaptcha_response_field"],
RECAPTCHA_PRIVATE,
request.META["REMOTE_ADDR"])
if not capt.is_valid:
error_header = "Let's try that captcha again."
raise RegisterError()
resp = create_user(request, form)
return resp
except RegisterError:
pass
# GET
else:
error_header = None
form = RegistrationForm()
return render_to_response('users/register.html', {
'next': reverse(projects.list),
'reg_form': form,
'error_header': error_header,
'RECAPTCHA_PUBLIC': RECAPTCHA_PUBLIC,
'RECAPTCHA_PRIVATE': RECAPTCHA_PRIVATE
}, context_instance = RequestContext(request))
# makes a user inactive and moves them to past users
def deactivate(request, user_id):
user = get_object_or_404(User, id = user_id)
user.is_active = False
user.save()
try:
contributor = Contributor.objects.get(user = user)
except:
contributor = None
try:
is_self = user.id == request.user.id
except:
is_self = False
return render_to_response('users/profile.html', {
'user_page': user,
'contributor': contributor,
'is_self': is_self
}, context_instance = RequestContext(request))
# makes a user active and moves them to users
def activate(request, user_id):
user = get_object_or_404(User, id = user_id)
user.is_active = True
user.save()
try:
contributor = Contributor.objects.get(user = user)
except:
contributor = None
try:
is_self = user.id == request.user.id
except:
is_self = False
return render_to_response('users/profile.html', {
'user_page': user,
'contributor': contributor,
'is_self': is_self
}, context_instance = RequestContext(request))
# creates a user, submitted from register
def create_user(request, form):
data = form.cleaned_data
# use an md5 of the email as a username
m = md5()
m.update(data["email"])
# if it's ok, register the user
user = User.objects.create_user(m.hexdigest()[0:30],
data['email'],
data['password'])
# set the user's first/last names
user.first_name = data['first_name']
user.last_name = data['last_name']
# save the user
user.save()
#Add additional info
info = UserInfo(user=user, mentor=False)
info.save()
#Add email information
m = EmailAddress(address=data['email'], user=user)
m.save()
# search past events for the user's email
for event in Event.objects.filter(author_email__iexact = user.email,
author = None):
event.author = user
event.save()
# search past events for the user's first and last name
name = user.get_full_name()
for event in Event.objects.filter(author_name__iexact = name, author = None):
event.author = user
event.save()
# search contributors for the user's name and email
for contrib in Contributor.objects.filter(email__iexact = user.email,
user = None):
contrib.user = user
contrib.save()
for contrib in Contributor.objects.filter(name__iexact = name, user = None):
contrib.user = user
contrib.save()
# log the user in (since we can't send emails for validation AFAIK)
user = auth.authenticate(username = user.username,
password = data['password'])
auth.login(request, user)
return HttpResponseRedirect(request.POST['next'])
class LoginError:
def __init__(self, username_correct):
self.username_corrent = username_correct
# allows a user to login
def login(request):
next = reverse(projects.list)
error_header = None
if request.method == 'POST':
if 'next' in request.POST:
next = request.POST['next']
login_form = LoginForm(request.POST, auto_id = "id_login_%s")
if login_form.is_valid():
try:
data = login_form.cleaned_data
# query for a user via email
try:
user = User.objects.get(email = data['email'])
except:
error_header = "{0} isn't registered.".format(data['email'])
raise LoginError(False)
# authenticate that user
user = auth.authenticate(username = user.username,
password = data['password'])
# if the password is incorrect, redireect to the login page
if user is None:
error_header = "Invalid password."
raise LoginError(True)
# otherwise, log the user in
auth.login(request, user)
return HttpResponseRedirect(next)
except LoginError as e:
pass
except:
raise
else:
login_form = LoginForm(auto_id = "id_login_%s")
return render_to_response('users/login.html', {
'next': next,
'error_header': error_header,
'login_form': login_form
}, context_instance = RequestContext(request))
# logs out a user
def logout(request):
auth.logout(request)
return HttpResponseRedirect(reverse(projects.list))
# forgot password
def forgot_password(request):
forgot_password_form = ForgotPasswordForm(request.POST, auto_id="id_%s")
if request.method == 'POST':
if forgot_password_form.is_valid():
data = forgot_password_form.cleaned_data
try:
user = User.objects.get(email = data['email'])
except:
forgot_password_form = ForgotPasswordForm(auto_id="id_%s")
return render_to_response('users/forgot_password.html', {
'forgot_password_form': forgot_password_form
}, context_instance = RequestContext(request))
random.seed()
new_pass = ''.join([choice('qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM1234567890') for i in range(8)])
user.set_password(new_pass)
user.save()
mailmsg = ("Hello " + user.first_name + ",\n\nAs requested, here is a new password for you to use to login to Observatory: \n" + new_pass + "\n\n")
send_mail('New Password for Observatory', mailmsg, MAIL_SENDER,
[user.email], fail_silently=False)
return HttpResponseRedirect(reverse(forgot_password_success))
else:
return render_to_response('users/forgot_password.html', {
'forgot_password_form': forgot_password_form
}, context_instance = RequestContext(request))
else:
forgot_password_form = ForgotPasswordForm(auto_id="id_%s")
return render_to_response('users/forgot_password.html', {
'forgot_password_form': forgot_password_form
}, context_instance = RequestContext(request))
def forgot_password_success(request):
return render_to_response('users/forgot_password_success.html')
|
|
from graphviz import Digraph
import random
import imageio
imageio.plugins.ffmpeg.download()
from moviepy.editor import *
import numpy as np
class ReactionPathDiagram():
"""
Initializes the values of concentations, rates and species of a system of elementary reactions.
INPUTS
=======
target - Target file location of generated graph
obj - Object of class Reaction/ReactionSystem which obtains the above values from prior calculations
integrate -
EXAMPLE USAGE
=========
- ReactionPathDiagram("\results\target_file_location", obj_Reaction)
- ReactionPathDiagram("\results\target_file_location", obj_Reaction_System)
"""
def __init__(self, target, obj, integrate=False, time=None, cluster=False):
#Get unique species of the system from other class function
self.unique_species = [i[0] for i in obj.involved_species.items()]
#Get system reaction types from other class function
self.types = [i.is_reversible for i in obj.reaction_list]
self.reactants = []
#Get reactant species of the system from other class function
for i in obj.reaction_list:
temp_reaction = []
for j in i.reactant_stoich_coeffs.items():
temp_reaction.append(j[0])
self.reactants.append(temp_reaction)
self.products = []
#Get product species of the system from other class function
for i in obj.reaction_list:
temp_reaction = []
for j in i.product_stoich_coeffs.items():
temp_reaction.append(j[0])
self.products.append(temp_reaction)
#Check if No# of Reactions consistent with No# of Specie Lists
if len(self.reactants)!=len(self.products) or len(self.products)!=len(self.types):
raise ValueError("No# of reaction system elements must be consistent.")
#Get reactant concentrations of the system from other class function
self.reactant_concentrations = obj.vis_concentrations
#Check if Reactant Concentrations are Positive
if sum([1 if i[1]<0 else 0 for i in self.reactant_concentrations.items()])!=0:
raise ValueError("Specie Concentrations must be positive.")
self.max_node_size = 5
self.arrow_max_width = 5
#If integrate flag set, get product concentrations and reaction rates at 'time', else constant defined by
#user and final reaction rates.
if integrate==True:
temp_conc = obj.step(time)[1]
self.product_concentrations = dict([(i,temp_conc[ind]) for ind, i in enumerate(self.unique_species)])
temp_rates = obj.get_reaction_rate()
self.reaction_rates = dict([(i,temp_rates[ind]) for ind, i in enumerate(self.unique_species)])
else:
self.product_concentrations = dict([(i,self.max_node_size) for ind, i in enumerate(self.unique_species)])
temp_rates = obj.get_reaction_rate()
self.reaction_rates = dict([(i,temp_rates[ind]) for ind, i in enumerate(self.unique_species)])
"""
#Check if Reactant Concentrations are Positive
if sum([1 if i[1]<0 else 0 for i in self.product_concentrations.items()])!=0:
raise ValueError("Specie Concentrations must be positive.")
"""
self.fitted = False
self.connected = False
self.connections = []
if cluster :
self.cluster = True
self.graph = Digraph(target, format='png')
self.graph.attr('node', shape='doublecircle')
self.graph.attr(label='Reaction Path Diagram')
#self.graph.attr(size='20,20!')
self.color_index = self.initialize_color_index()
self.tag_reactant = " | R"
self.tag_product = " | P "
else:
self.cluster = False
self.reac_graph = Digraph(target, format='png')
self.reac_graph.attr('node', shape='doublecircle')
self.reac_graph.attr(color='lightgrey')
self.reac_graph.attr(size='20,20!')
self.reac_graph.attr(label='Reaction Path Diagram')
self.prod_graph = Digraph('subgraph')
#self.prod_graph.attr(size='20,20!')
self.color_index = self.initialize_color_index()
self.tag_reactant = " | R"
self.tag_product = " | P "
def fit(self):
"""
Method to define graphical nodes for each unique specie at the reactant and
product end. For each connection, a "hex-tuple" of reactant, product, type of reaction,
reactant_reaction_rate and product_reaction_rate is defined.
EXAMPLE USAGE
=========
*Prior*
graph = ReactionPathDiagram(target, obj)
---------
graph.fit()
"""
for index in range(len(self.types)):
temp_type = self.types[index]
temp_reactants = self.reactants[index]
temp_products = self.products[index]
for i in temp_reactants:
temp_reac_rate = self.reaction_rates[i]
for j in temp_products:
temp_prod_rate = self.reaction_rates[j]
connection = (i, j, temp_type, temp_reac_rate, temp_prod_rate, index)
self.connections.append(connection)
self.fitted = True
def connect(self, graphics_dict=None,
size=5, separate=False):
"""
Method to make defined connections between system node with specific graphics.
INPUTS
=======
grahics_dict :
'node_color' : If True, the nodes of each specie assume a color specific to the reactants and products
'rate': If True, the reaction rate of the specie is displayed
'arrow_size': If True, the thickness of the arrow is normalized for the reaction rate
'arrow_color': If True, the colors of the arrows are different for individual reactions
'init_con': If True, the size of the reactant nodes is set to initial concentration, else constant size
'prod_con': If True, the size of the product nodes is set to final concentration*, else constant size
*integrator needs to be implemented for this feature
size = 1, constant size of nodes
separate = If True, the reactant and product nodes for each specie are separate.
EXAMPLE USAGE
=========
*Prior*
graph = ReactionPathDiagram(target, obj)
graph.fit() *Prior*
graphics_dict = {'node_color':True,'rate':False,'arrow_size':False,
'arrow_color':True,'init_con':False,'prod_con': False}
---------------
graph.connect(graphics_dict, time=None, size=1, separate = True)
"""
#Check if graph connected
if self.fitted == False:
raise AttributeError("Please call fit() method first.")
if graphics_dict == None:
raise AttributeError("Graphics dictionary not passed.")
#Check if graphics dictionary is in readable form
if sum([0 if (i[1]==True or i[1]==False) else 1 for i in graphics_dict.items()])!=0:
raise ValueError("Graphics Dictionary must contain only True (1) or False (0) values.")
#Display Product Concentration if True else constant
if graphics_dict['prod_con']==True:
prod_conc = self.product_concentrations
else:
prod_conc = dict([(i,size) for ind, i in enumerate(self.unique_species)])
#Display Reactant Concentration if True else constant
if graphics_dict['init_con']==True:
reac_conc = self.reactant_concentrations
else:
reac_conc = dict([(i,size) for ind, i in enumerate(self.unique_species)])
#Build Nodes
if self.cluster:
self.build_nodes_cluster(graphics_dict, separate, reac_conc, prod_conc, reac_color="Green", prod_color="Red")
else:
self.build_nodes_free(graphics_dict, separate, reac_conc, prod_conc, reac_color="Green", prod_color="Red")
#Build Connections
for connection in self.connections:
if separate:
org = connection[0]+self.tag_reactant
dest = connection[1]+self.tag_product
else:
org = connection[0]
dest = connection[1]
graphics = self.get_graphics(graphics_dict, connection)
if self.cluster:
self.graph.edge(org, dest, **graphics)
else:
self.reac_graph.edge(org, dest, **graphics)
#Add Product Subgraph
if separate and not self.cluster:
self.reac_graph.subgraph(self.prod_graph)
self.connected = True
def build_nodes_cluster(self, graphics_dict, separate, reac_conc, prod_conc, reac_color, prod_color):
"""
Helper method to build nodes with specific concentrations and graphics in cluster formation.
INPUTS
=======
grahics_dict :
'node_color' : If True, the nodes of each specie assume a color specific to the reactants and products
'rate': If True, the reaction rate of the specie is displayed
'arrow_size': If True, the thickness of the arrow is normalized for the reaction rate
'arrow_color': If True, the colors of the arrows are different for individual reactions
'init_con': If True, the size of the reactant nodes is set to initial concentration, else constant size
'prod_con': If True, the size of the product nodes is set to final concentration*, else constant size
*integrator needs to be implemented for this feature
separate = If True, the reactant and product nodes for each specie are separate.
reac_conc = Initialized value from user.
prod_conc = As calculated through integration.
reac_color = "Green", pre-defined
prod_color = "Red", pre-defined
"""
max_conc_reac = max(reac_conc.items(), key=lambda x: x[1])[1]
max_conc_prod = max(prod_conc.items(), key=lambda x: x[1])[1]
#Check if graph needs to be separated
if separate:
#Define Reactant Cluster
with self.graph.subgraph(name='cluster_reactant') as c:
c.attr(color=reac_color)
c.attr(label='Reactants')
for index, specie in enumerate(self.unique_species):
temp_size = str((reac_conc[specie]/max_conc_reac)*self.max_node_size)
if graphics_dict['node_color']==True:
c.node(specie+self.tag_reactant, **{'width':temp_size, 'height':temp_size}, color=reac_color)
else:
c.node(specie+self.tag_reactant, **{'width':temp_size, 'height':temp_size})
#Define Product Cluster
with self.graph.subgraph(name='cluster_product') as c:
c.attr(color=prod_color)
c.attr(label='Products')
for index, specie in enumerate(self.unique_species):
temp_size = str((prod_conc[specie]/max_conc_prod)*self.max_node_size)
if graphics_dict['node_color']==True:
c.node(specie+self.tag_product, **{'width':temp_size, 'height':temp_size}, color=prod_color)
else:
c.node(specie+self.tag_product, **{'width':temp_size, 'height':temp_size})
else:
#Define Single Cluster
for index, specie in enumerate(self.unique_species):
temp_size = str((prod_conc[specie]/max_conc_prod)*self.max_node_size)
if graphics_dict['node_color']==True:
self.graph.node(specie, **{'width':temp_size, 'height':temp_size}, color=reac_color)
else:
self.graph.node(specie, **{'width':temp_size, 'height':temp_size})
def build_nodes_free(self, graphics_dict, separate, reac_conc, prod_conc, reac_color, prod_color):
"""
Helper method to build nodes with specific concentrations and graphics, free positioning.
INPUTS
=======
grahics_dict :
'node_color' : If True, the nodes of each specie assume a color specific to the reactants and products
'rate': If True, the reaction rate of the specie is displayed
'arrow_size': If True, the thickness of the arrow is normalized for the reaction rate
'arrow_color': If True, the colors of the arrows are different for individual reactions
'init_con': If True, the size of the reactant nodes is set to initial concentration, else constant size
'prod_con': If True, the size of the product nodes is set to final concentration*, else constant size
*integrator needs to be implemented for this feature
separate = If True, the reactant and product nodes for each specie are separate.
reac_conc = Initialized value from user.
prod_conc = As calculated through integration.
reac_color = "Green", pre-defined
prod_color = "Red", pre-defined
"""
max_conc_reac = max(reac_conc.items(), key=lambda x: x[1])[1]
max_conc_prod = max(prod_conc.items(), key=lambda x: x[1])[1]
#Check if graph needs to be separated for reactants and products
if separate:
for index, specie in enumerate(self.unique_species):
if graphics_dict['node_color']==True:
temp_size = str((reac_conc[specie]/max_conc_reac)*self.max_node_size)
self.reac_graph.node(specie+self.tag_reactant, **{'width':temp_size, 'height':temp_size}, color=reac_color)
temp_size = str((prod_conc[specie]/max_conc_prod)*self.max_node_size)
self.prod_graph.node(specie+self.tag_product, **{'width':temp_size, 'height':temp_size}, color=prod_color)
else:
temp_size = str((reac_conc[specie]/max_conc_reac)*self.max_node_size)
self.reac_graph.node(specie+self.tag_reactant, **{'width':temp_size, 'height':temp_size})
temp_size = str((prod_conc[specie]/max_conc_prod)*self.max_node_size)
self.prod_graph.node(specie+self.tag_product, **{'width':temp_size, 'height':temp_size})
else:
for index, specie in enumerate(self.unique_species):
temp_size = str((prod_conc[specie]/max_conc_prod)*self.max_node_size)
if graphics_dict['node_color']==True:
self.reac_graph.node(specie, **{'width':temp_size, 'height':temp_size}, color=reac_color)
else:
self.reac_graph.node(specie, **{'width':temp_size, 'height':temp_size})
def get_graphics(self, graphics_dict, connection):
"""
Helper method to get specific graphics for each connection.
INPUTS
=======
grahics_dict :
'node_color' : If True, the nodes of each specie assume a color specific to the reactants and products
'rate': If True, the reaction rate of the specie is displayed
'arrow_size': If True, the thickness of the arrow is normalized for the reaction rate
'arrow_color': If True, the colors of the arrows are different for individual reactions
'init_con': If True, the size of the reactant nodes is set to initial concentration, else constant size
'prod_con': If True, the size of the product nodes is set to final concentration*, else constant size
*integrator needs to be implemented for this feature
connection = (reactant, product, reaction_type, reactant_reaction_rate, product_reaction_rate, reaction_index)
"""
#Initiate final graphics dictionary
graphics = {}
#Get connection specific graphics
for i in graphics_dict.items():
if i[0]=='rate' and i[1]==True:
label = str(connection[3]) + ", " + str(connection[4])
graphics['label'] = label
elif i[0]=='arrow_size' and i[1]==True and connection[2]==False:
max_rate = max(self.reaction_rates.items(), key=lambda x: x[1])[1]
graphics['penwidth'] = str(abs(connection[3]/max_rate)*self.arrow_max_width)
elif i[0]=='arrow_size' and i[1]==True and connection[2]==True:
max_rate = max(self.reaction_rates.items(), key=lambda x: x[1])[1]
graphics['penwidth'] = str(abs((connection[3]+connection[4])/(2*max_rate))*self.arrow_max_width)
elif i[0]=='arrow_color' and i[1]==True:
graphics['color'] = self.color_index[connection[5]]
#Check for Reversible
if connection[2]==True:
graphics['dir']= 'both'
return graphics
def initialize_color_index(self):
"""
Helper method to initialize different colors for each reaction index.
"""
#Initialize color dictionary for edges and set random state
color_dict = {}
rstate = np.random.RandomState(9000)
random_func = lambda: rstate.randint(0,255)
#Get a number of colors randomly from hexadecimal color representation
for i in range(25):
color = '#%02X%02X%02X' % (random_func(),random_func(),random_func())
color_dict[i] = color
return color_dict
def plot(self, test=False):
"""
Method to display and save generated graph.
EXAMPLE USAGE
=========
*Prior*
graph = ReactionPathDiagram(target, obj)
graph.fit()
graphics_dict = {'node_color':True,'rate':False,'arrow_size':False,
'arrow_color':True,'init_con':False,'prod_con': False}
graph.connect(graphics_dict, time=None, size=1, separate = True)
-----------
graph.plot()
"""
#Check if graph connected
if self.connected == False:
raise AttributeError("Please call connect() method first.")
#Display and save graph in directory
if self.cluster:
self.graph.view()
else:
self.reac_graph.view()
def create_video(self, img_list, target):
"""
Method to generate video of generated graph images.
INPUTS
======
img_list : List of image locations.
target : Target location for storing generated video.
EXAMPLE USAGE
=========
*Prior*
graph = ReactionPathDiagram(target, obj)
graph.fit()
graphics_dict = {'node_color':True,'rate':False,'arrow_size':False,
'arrow_color':True,'init_con':False,'prod_con': False}
graph.connect(graphics_dict, time=None, size=1, separate = True)
graph.plot()
-----------
images = ['results/final1.gv.png','results/final2.gv.png', 'results/final3.gv.png' ]
graph.create_video(images, "results/video")
"""
#Check if image list is empty
if img_list==[]:
raise ValueError("Image list empty!")
#Create and concatenate image clips
clips = [ImageClip(img).set_duration(0.5) for img in img_list]
concat_clip = concatenate_videoclips(clips, method="compose")
concat_clip.write_videofile(target+".mp4", fps=24)
|
|
import os.path
import platform
import sys
import webbrowser
from datetime import datetime
import argparse
try:
import simplejson as json
except ImportError:
import json
from dogshell.common import report_errors, report_warnings, CommandLineClient, print_err
class DashClient(CommandLineClient):
def setup_parser(self, subparsers):
parser = subparsers.add_parser('dashboard', help='Create, edit, and delete dashboards.')
parser.add_argument('--string_ids', action='store_true', dest='string_ids',
help='Represent Dashboard IDs as strings instead of ints in JSON')
verb_parsers = parser.add_subparsers(title='Verbs')
post_parser = verb_parsers.add_parser('post', help='Create dashboards.')
post_parser.add_argument('title', help='title for the new dashboard')
post_parser.add_argument('description', help='short description of the dashboard')
post_parser.add_argument('graphs', help='graph definitions as a JSON string. if unset, reads from stdin.', nargs="?")
post_parser.add_argument('--template_variables', type=_template_variables, default=[],
help='a json list of template variable dicts, e.g. \
\'[{"name": "host", "prefix": "host", "default": "host:my-host"}]\'')
post_parser.set_defaults(func=self._post)
update_parser = verb_parsers.add_parser('update', help='Update existing dashboards.')
update_parser.add_argument('dashboard_id', help='dashboard to replace with the new definition')
update_parser.add_argument('title', help='new title for the dashboard')
update_parser.add_argument('description', help='short description of the dashboard')
update_parser.add_argument('graphs', help='graph definitions as a JSON string. if unset, reads from stdin.', nargs="?")
update_parser.add_argument('--template_variables', type=_template_variables, default=[],
help='a json list of template variable dicts, e.g. \
\'[{"name": "host", "prefix": "host", "default": "host:my-host"}]\'')
update_parser.set_defaults(func=self._update)
show_parser = verb_parsers.add_parser('show', help='Show a dashboard definition.')
show_parser.add_argument('dashboard_id', help='dashboard to show')
show_parser.set_defaults(func=self._show)
show_all_parser = verb_parsers.add_parser('show_all', help='Show a list of all dashboards.')
show_all_parser.set_defaults(func=self._show_all)
pull_parser = verb_parsers.add_parser('pull', help='Pull a dashboard on the server into a local file')
pull_parser.add_argument('dashboard_id', help='ID of dashboard to pull')
pull_parser.add_argument('filename', help='file to pull dashboard into') # , type=argparse.FileType('wb'))
pull_parser.set_defaults(func=self._pull)
pull_all_parser = verb_parsers.add_parser('pull_all', help='Pull all dashboards into files in a directory')
pull_all_parser.add_argument('pull_dir', help='directory to pull dashboards into')
pull_all_parser.set_defaults(func=self._pull_all)
push_parser = verb_parsers.add_parser('push', help='Push updates to dashboards from local files to the server')
push_parser.add_argument('--append_auto_text', action='store_true', dest='append_auto_text',
help='When pushing to the server, appends filename and timestamp to the end of the dashboard description')
push_parser.add_argument('file', help='dashboard files to push to the server', nargs='+', type=argparse.FileType('r'))
push_parser.set_defaults(func=self._push)
new_file_parser = verb_parsers.add_parser('new_file', help='Create a new dashboard and put its contents in a file')
new_file_parser.add_argument('filename', help='name of file to create with empty dashboard')
new_file_parser.set_defaults(func=self._new_file)
web_view_parser = verb_parsers.add_parser('web_view', help='View the dashboard in a web browser')
web_view_parser.add_argument('file', help='dashboard file', type=argparse.FileType('r'))
web_view_parser.set_defaults(func=self._web_view)
delete_parser = verb_parsers.add_parser('delete', help='Delete dashboards.')
delete_parser.add_argument('dashboard_id', help='dashboard to delete')
delete_parser.set_defaults(func=self._delete)
def _pull(self, args):
self._write_dash_to_file(args.dashboard_id, args.filename, args.timeout, args.format, args.string_ids)
def _pull_all(self, args):
self.dog.timeout = args.timeout
def _title_to_filename(title):
# Get a lowercased version with most punctuation stripped out...
no_punct = ''.join([c for c in title.lower() if c.isalnum() or c in [" ", "_", "-"]])
# Now replace all -'s, _'s and spaces with "_", and strip trailing _
return no_punct.replace(" ", "_").replace("-", "_").strip("_")
format = args.format
res = self.dog.dashboards()
report_warnings(res)
report_errors(res)
if not os.path.exists(args.pull_dir):
os.mkdir(args.pull_dir, 0o755)
used_filenames = set()
for dash_summary in res['dashes']:
filename = _title_to_filename(dash_summary['title'])
if filename in used_filenames:
filename = filename + "-" + dash_summary['id']
used_filenames.add(filename)
self._write_dash_to_file(dash_summary['id'],
os.path.join(args.pull_dir, filename + ".json"),
args.timeout,
format,
args.string_ids)
if format == 'pretty':
print(("\n### Total: {0} dashboards to {1} ###"
.format(len(used_filenames), os.path.realpath(args.pull_dir))))
def _new_file(self, args):
self.dog.timeout = args.timeout
format = args.format
res = self.dog.create_dashboard(args.filename,
"Description for {0}".format(args.filename), [])
report_warnings(res)
report_errors(res)
self._write_dash_to_file(res['dash']['id'], args.filename, args.timeout, format, args.string_ids)
if format == 'pretty':
print(self._pretty_json(res))
else:
print(json.dumps(res))
def _write_dash_to_file(self, dash_id, filename, timeout, format='raw', string_ids=False):
with open(filename, "w") as f:
res = self.dog.dashboard(dash_id)
report_warnings(res)
report_errors(res)
dash_obj = res["dash"]
if "resource" in dash_obj:
del dash_obj["resource"]
if "url" in dash_obj:
del dash_obj["url"]
if string_ids:
dash_obj["id"] = str(dash_obj["id"])
json.dump(dash_obj, f, indent=2)
if format == 'pretty':
print("Downloaded dashboard {0} to file {1}".format(dash_id, filename))
else:
print("{0} {1}".format(dash_id, filename))
def _push(self, args):
self.dog.timeout = args.timeout
for f in args.file:
try:
dash_obj = json.load(f)
except Exception as err:
# except simplejson.decoder.JSONDecodeError as err: # only works in simplejson 2.2.x
raise Exception("Could not parse {0}: {1}".format(f.name, err))
# Always convert to int, in case it was originally a string.
dash_obj["id"] = int(dash_obj["id"])
if args.append_auto_text:
datetime_str = datetime.now().strftime('%x %X')
auto_text = ("<br/>\nUpdated at {0} from {1} ({2}) on {3}"
.format(datetime_str, f.name, dash_obj["id"], platform.node()))
dash_obj["description"] += auto_text
tpl_vars = dash_obj.get("template_variables", [])
res = self.dog.update_dashboard(dash_obj["id"], dash_obj["title"], dash_obj["description"],
dash_obj["graphs"], template_variables=tpl_vars)
print(tpl_vars)
if 'errors' in res:
print_err('Upload of dashboard {0} from file {1} failed.'.format(dash_obj["id"], f.name))
report_warnings(res)
report_errors(res)
if args.format == 'pretty':
print("Uploaded file {0} (dashboard {1})".format(f.name, dash_obj["id"]))
def _post(self, args):
self.dog.timeout = args.timeout
format = args.format
graphs = args.graphs
if args.graphs is None:
graphs = sys.stdin.read()
try:
graphs = json.loads(graphs)
except:
raise Exception('bad json parameter')
res = self.dog.create_dashboard(args.title, args.description, graphs,
template_variables=args.template_variables)
report_warnings(res)
report_errors(res)
if format == 'pretty':
print(self._pretty_json(res))
else:
print(json.dumps(res))
def _update(self, args):
self.dog.timeout = args.timeout
format = args.format
graphs = args.graphs
if args.graphs is None:
graphs = sys.stdin.read()
try:
graphs = json.loads(graphs)
except:
raise Exception('bad json parameter')
res = self.dog.update_dashboard(args.dashboard_id, args.title, args.description,
graphs, template_variables=args.template_variables)
report_warnings(res)
report_errors(res)
if format == 'pretty':
print(self._pretty_json(res))
else:
print(json.dumps(res))
def _show(self, args):
self.dog.timeout = args.timeout
format = args.format
res = self.dog.dashboard(args.dashboard_id)
report_warnings(res)
report_errors(res)
if args.string_ids:
res["dash"]["id"] = str(res["dash"]["id"])
if format == 'pretty':
print(self._pretty_json(res))
else:
print(json.dumps(res))
def _show_all(self, args):
self.dog.timeout = args.timeout
format = args.format
res = self.dog.dashboards()
report_warnings(res)
report_errors(res)
if args.string_ids:
for d in res["dashes"]:
d["id"] = str(d["id"])
if format == 'pretty':
print(self._pretty_json(res))
elif format == 'raw':
print(json.dumps(res))
else:
for d in res["dashes"]:
print("\t".join([(d["id"]),
(d["resource"]),
(d["title"]),
self._escape(d["description"])]))
def _delete(self, args):
self.dog.timeout = args.timeout
format = args.format
res = self.dog.delete_dashboard(args.dashboard_id)
report_warnings(res)
report_errors(res)
if format == 'pretty':
print(self._pretty_json(res))
else:
print(json.dumps(res))
def _web_view(self, args):
dash_id = json.load(args.file)['id']
url = self.dog.api_host + "/dash/dash/{0}".format(dash_id)
webbrowser.open(url)
def _escape(self, s):
return s.replace("\r", "\\r").replace("\n", "\\n").replace("\t", "\\t")
def _pretty_json(self, obj):
return json.dumps(obj, sort_keys=True, indent=2)
def _template_variables(tpl_var_input):
if '[' not in tpl_var_input:
return [v.strip() for v in tpl_var_input.split(',')]
else:
try:
return json.loads(tpl_var_input)
except Exception:
raise argparse.ArgumentTypeError('bad template_variable json parameter')
|
|
#!/usr/bin/env python
"""Module with GRRWorker implementation."""
import pdb
import time
import traceback
import logging
from grr.lib import aff4
from grr.lib import config_lib
from grr.lib import flags
from grr.lib import flow
from grr.lib import master
from grr.lib import queue_manager as queue_manager_lib
from grr.lib import queues as queues_config
from grr.lib import rdfvalue
from grr.lib import registry
# pylint: disable=unused-import
from grr.lib import server_stubs
# pylint: enable=unused-import
from grr.lib import stats
from grr.lib import threadpool
from grr.lib import utils
from grr.lib.rdfvalues import flows as rdf_flows
class Error(Exception):
"""Base error class."""
class FlowProcessingError(Error):
"""Raised when flow requests/responses can't be processed."""
class GRRWorker(object):
"""A GRR worker."""
# time to wait before polling when no jobs are currently in the
# task scheduler (sec)
POLLING_INTERVAL = 2
SHORT_POLLING_INTERVAL = 0.3
SHORT_POLL_TIME = 30
# target maximum time to spend on RunOnce
RUN_ONCE_MAX_SECONDS = 300
# A class global threadpool to be used for all workers.
thread_pool = None
# This is a timed cache of locked flows. If this worker encounters a lock
# failure on a flow, it will not attempt to grab this flow until the timeout.
queued_flows = None
def __init__(self, queues=queues_config.WORKER_LIST,
threadpool_prefix="grr_threadpool",
threadpool_size=None, token=None):
"""Constructor.
Args:
queues: The queues we use to fetch new messages from.
threadpool_prefix: A name for the thread pool used by this worker.
threadpool_size: The number of workers to start in this thread pool.
token: The token to use for the worker.
Raises:
RuntimeError: If the token is not provided.
"""
logging.info("started worker with queues: " + str(queues))
self.queues = queues
self.queued_flows = utils.TimeBasedCache(max_size=10, max_age=60)
if token is None:
raise RuntimeError("A valid ACLToken is required.")
# Make the thread pool a global so it can be reused for all workers.
if GRRWorker.thread_pool is None:
if threadpool_size is None:
threadpool_size = config_lib.CONFIG["Threadpool.size"]
GRRWorker.thread_pool = threadpool.ThreadPool.Factory(
threadpool_prefix, min_threads=2, max_threads=threadpool_size)
GRRWorker.thread_pool.Start()
self.token = token
self.last_active = 0
# Well known flows are just instantiated.
self.well_known_flows = flow.WellKnownFlow.GetAllWellKnownFlows(token=token)
self.flow_lease_time = config_lib.CONFIG["Worker.flow_lease_time"]
self.well_known_flow_lease_time = config_lib.CONFIG[
"Worker.well_known_flow_lease_time"]
def Run(self):
"""Event loop."""
try:
while 1:
if master.MASTER_WATCHER.IsMaster():
processed = self.RunOnce()
else:
processed = 0
if processed == 0:
logger = logging.getLogger()
for h in logger.handlers:
h.flush()
if time.time() - self.last_active > self.SHORT_POLL_TIME:
interval = self.POLLING_INTERVAL
else:
interval = self.SHORT_POLLING_INTERVAL
time.sleep(interval)
else:
self.last_active = time.time()
except KeyboardInterrupt:
logging.info("Caught interrupt, exiting.")
self.thread_pool.Join()
def RunOnce(self):
"""Processes one set of messages from Task Scheduler.
The worker processes new jobs from the task master. For each job
we retrieve the session from the Task Scheduler.
Returns:
Total number of messages processed by this call.
"""
start_time = time.time()
processed = 0
queue_manager = queue_manager_lib.QueueManager(token=self.token)
for queue in self.queues:
# Freezeing the timestamp used by queue manager to query/delete
# notifications to avoid possible race conditions.
queue_manager.FreezeTimestamp()
fetch_messages_start = time.time()
notifications_by_priority = queue_manager.GetNotificationsByPriority(
queue)
stats.STATS.RecordEvent("worker_time_to_retrieve_notifications",
time.time() - fetch_messages_start)
# Process stuck flows first
stuck_flows = notifications_by_priority.pop(
queue_manager.STUCK_PRIORITY, [])
if stuck_flows:
self.ProcessStuckFlows(stuck_flows, queue_manager)
notifications_available = []
for priority in sorted(notifications_by_priority, reverse=True):
for notification in notifications_by_priority[priority]:
# Filter out session ids we already tried to lock but failed.
if notification.session_id not in self.queued_flows:
notifications_available.append(notification)
try:
# If we spent too much time processing what we have so far, the
# active_sessions list might not be current. We therefore break here
# so we can re-fetch a more up to date version of the list, and try
# again later. The risk with running with an old active_sessions list
# is that another worker could have already processed this message,
# and when we try to process it, there is nothing to do - costing us a
# lot of processing time. This is a tradeoff between checking the data
# store for current information and processing out of date
# information.
processed += self.ProcessMessages(notifications_available,
queue_manager,
self.RUN_ONCE_MAX_SECONDS -
(time.time() - start_time))
# We need to keep going no matter what.
except Exception as e: # pylint: disable=broad-except
logging.error("Error processing message %s. %s.", e,
traceback.format_exc())
stats.STATS.IncrementCounter("grr_worker_exceptions")
if flags.FLAGS.debug:
pdb.post_mortem()
queue_manager.UnfreezeTimestamp()
# If we have spent too much time, stop.
if (time.time() - start_time) > self.RUN_ONCE_MAX_SECONDS:
return processed
return processed
def ProcessStuckFlows(self, stuck_flows, queue_manager):
stats.STATS.IncrementCounter("grr_flows_stuck", len(stuck_flows))
for stuck_flow in stuck_flows:
try:
flow.GRRFlow.TerminateFlow(
stuck_flow.session_id, reason="Stuck in the worker",
status=rdf_flows.GrrStatus.ReturnedStatus.WORKER_STUCK,
force=True, token=self.token)
except Exception: # pylint: disable=broad-except
logging.exception("Error terminating stuck flow: %s", stuck_flow)
finally:
# Remove notifications for this flow. This will also remove the
# "stuck flow" notification itself.
queue_manager.DeleteNotification(stuck_flow.session_id)
def ProcessMessages(self, active_notifications, queue_manager, time_limit=0):
"""Processes all the flows in the messages.
Precondition: All tasks come from the same queue.
Note that the server actually completes the requests in the
flow when receiving the messages from the client. We do not really
look at the messages here at all any more - we just work from the
completed messages in the flow RDFValue.
Args:
active_notifications: The list of notifications.
queue_manager: QueueManager object used to manage notifications,
requests and responses.
time_limit: If set return as soon as possible after this many seconds.
Returns:
The number of processed flows.
"""
now = time.time()
processed = 0
for notification in active_notifications:
if notification.session_id not in self.queued_flows:
if time_limit and time.time() - now > time_limit:
break
processed += 1
self.queued_flows.Put(notification.session_id, 1)
self.thread_pool.AddTask(target=self._ProcessMessages,
args=(notification,
queue_manager.Copy()),
name=self.__class__.__name__)
return processed
def _ProcessRegularFlowMessages(self, flow_obj, notification):
"""Processes messages for a given flow."""
session_id = notification.session_id
if not isinstance(flow_obj, flow.GRRFlow):
logging.warn("%s is not a proper flow object (got %s)", session_id,
type(flow_obj))
stats.STATS.IncrementCounter("worker_bad_flow_objects",
fields=[str(type(flow_obj))])
raise FlowProcessingError("Not a GRRFlow.")
runner = flow_obj.GetRunner()
if runner.schedule_kill_notifications:
# Create a notification for the flow in the future that
# indicates that this flow is in progess. We'll delete this
# notification when we're done with processing completed
# requests. If we're stuck for some reason, the notification
# will be delivered later and the stuck flow will get
# terminated.
stuck_flows_timeout = rdfvalue.Duration(
config_lib.CONFIG["Worker.stuck_flows_timeout"])
kill_timestamp = (rdfvalue.RDFDatetime().Now() +
stuck_flows_timeout)
with queue_manager_lib.QueueManager(token=self.token) as manager:
manager.QueueNotification(session_id=session_id,
in_progress=True,
timestamp=kill_timestamp)
# kill_timestamp may get updated via flow.HeartBeat() calls, so we
# have to store it in the runner context.
runner.context.kill_timestamp = kill_timestamp
try:
runner.ProcessCompletedRequests(notification, self.thread_pool)
# Something went wrong - log it in the flow.
except Exception as e: # pylint: disable=broad-except
runner.context.state = rdf_flows.Flow.State.ERROR
runner.context.backtrace = traceback.format_exc()
logging.error("Flow %s: %s", flow_obj, e)
raise FlowProcessingError(e)
finally:
# Delete kill notification as the flow got processed and is not
# stuck.
with queue_manager_lib.QueueManager(token=self.token) as manager:
if runner.schedule_kill_notifications:
manager.DeleteNotification(
session_id, start=runner.context.kill_timestamp,
end=runner.context.kill_timestamp)
runner.context.kill_timestamp = None
if (runner.process_requests_in_order and
notification.last_status and
(runner.context.next_processed_request <=
notification.last_status)):
logging.debug("Had to reschedule a notification: %s", notification)
# We are processing requests in order and have received a
# notification for a specific request but could not process
# that request. This might be a race condition in the data
# store so we reschedule the notification in the future.
delay = config_lib.CONFIG[
"Worker.notification_retry_interval"]
manager.QueueNotification(
notification, timestamp=notification.timestamp + delay)
def _ProcessMessages(self, notification, queue_manager):
"""Does the real work with a single flow."""
flow_obj = None
session_id = notification.session_id
try:
# Take a lease on the flow:
flow_name = session_id.FlowName()
if flow_name in self.well_known_flows:
# Well known flows are not necessarily present in the data store so
# we need to create them instead of opening.
expected_flow = self.well_known_flows[flow_name].__class__.__name__
flow_obj = aff4.FACTORY.CreateWithLock(
session_id, expected_flow,
lease_time=self.well_known_flow_lease_time,
blocking=False, token=self.token)
else:
flow_obj = aff4.FACTORY.OpenWithLock(
session_id, lease_time=self.flow_lease_time,
blocking=False, token=self.token)
now = time.time()
logging.debug("Got lock on %s", session_id)
# If we get here, we now own the flow. We can delete the notifications
# we just retrieved but we need to make sure we don't delete any that
# came in later.
queue_manager.DeleteNotification(session_id, end=notification.timestamp)
if flow_name in self.well_known_flows:
stats.STATS.IncrementCounter("well_known_flow_requests",
fields=[str(session_id)])
# We remove requests first and then process them in the thread pool.
# On one hand this approach increases the risk of losing requests in
# case the worker process dies. On the other hand, it doesn't hold
# the lock while requests are processed, so other workers can
# process well known flows requests as well.
with flow_obj:
responses = flow_obj.FetchAndRemoveRequestsAndResponses(session_id)
flow_obj.ProcessResponses(responses, self.thread_pool)
else:
with flow_obj:
self._ProcessRegularFlowMessages(flow_obj, notification)
elapsed = time.time() - now
logging.debug("Done processing %s: %s sec", session_id, elapsed)
stats.STATS.RecordEvent("worker_flow_processing_time", elapsed,
fields=[flow_obj.Name()])
# Everything went well -> session can be run again.
self.queued_flows.ExpireObject(session_id)
except aff4.LockError:
# Another worker is dealing with this flow right now, we just skip it.
# We expect lots of these when there are few messages (the system isn't
# highly loaded) but it is interesting when the system is under load to
# know if we are pulling the optimal number of messages off the queue.
# A high number of lock fails when there is plenty of work to do would
# indicate we are wasting time trying to process work that has already
# been completed by other workers.
stats.STATS.IncrementCounter("worker_flow_lock_error")
except FlowProcessingError:
# Do nothing as we expect the error to be correctly logged and accounted
# already.
pass
except Exception as e: # pylint: disable=broad-except
# Something went wrong when processing this session. In order not to spin
# here, we just remove the notification.
logging.exception("Error processing session %s: %s", session_id, e)
stats.STATS.IncrementCounter("worker_session_errors",
fields=[str(type(e))])
queue_manager.DeleteNotification(session_id)
class WorkerInit(registry.InitHook):
"""Registers worker stats variables."""
pre = ["StatsInit"]
def RunOnce(self):
"""Exports the vars.."""
stats.STATS.RegisterCounterMetric("grr_flows_stuck")
stats.STATS.RegisterCounterMetric("worker_bad_flow_objects",
fields=[("type", str)])
stats.STATS.RegisterCounterMetric("worker_session_errors",
fields=[("type", str)])
stats.STATS.RegisterCounterMetric(
"worker_flow_lock_error", docstring=("Worker lock failures. We expect "
"these to be high when the system"
"is idle."))
stats.STATS.RegisterEventMetric("worker_flow_processing_time",
fields=[("flow", str)])
stats.STATS.RegisterEventMetric("worker_time_to_retrieve_notifications")
|
|
#!/usr/bin/env python
from __future__ import (absolute_import, division, print_function,
unicode_literals)
def classify_pixel(input_data, classifier, threads=8, ram=4000):
"""
Runs a pre-trained ilastik classifier on a volume of data
Adapted from Stuart Berg's example here:
https://github.com/ilastik/ilastik/blob/master/examples/example_python_client.py
Arguments:
input_data: data to be classified - 3D numpy array
classifier: ilastik trained/classified file
threads: number of thread to use for classifying input data
ram: RAM to use in MB
Returns:
pixel_out: The raw trained classifier
"""
import numpy as np
import six
import pdb
from collections import OrderedDict
import vigra
import os
import ilastik_main
from ilastik.applets.dataSelection import DatasetInfo
from ilastik.workflows.pixelClassification import PixelClassificationWorkflow
# Before we start ilastik, prepare these environment variable settings.
os.environ["LAZYFLOW_THREADS"] = str(threads)
os.environ["LAZYFLOW_TOTAL_RAM_MB"] = str(ram)
# Set the command-line arguments directly into argparse.Namespace object
# Provide your project file, and don't forget to specify headless.
args = ilastik_main.parser.parse_args([])
args.headless = True
args.project = classifier
# Instantiate the 'shell', (an instance of ilastik.shell.HeadlessShell)
# This also loads the project file into shell.projectManager
shell = ilastik_main.main(args)
assert isinstance(shell.workflow, PixelClassificationWorkflow)
# Obtain the training operator
opPixelClassification = shell.workflow.pcApplet.topLevelOperator
# Sanity checks
assert len(opPixelClassification.InputImages) > 0
assert opPixelClassification.Classifier.ready()
# For this example, we'll use random input data to "batch process"
print("input_data.shape", input_data.shape)
# In this example, we're using 2D data (extra dimension for channel).
# Tagging the data ensures that ilastik interprets the axes correctly.
input_data = vigra.taggedView(input_data, 'xyz')
# In case you're curious about which label class is which,
# let's read the label names from the project file.
label_names = opPixelClassification.LabelNames.value
label_colors = opPixelClassification.LabelColors.value
probability_colors = opPixelClassification.PmapColors.value
print("label_names, label_colors, probability_colors", label_names, label_colors, probability_colors)
# Construct an OrderedDict of role-names -> DatasetInfos
# (See PixelClassificationWorkflow.ROLE_NAMES)
role_data_dict = OrderedDict([("Raw Data",
[DatasetInfo(preloaded_array=input_data)])])
# Run the export via the BatchProcessingApplet
# Note: If you don't provide export_to_array, then the results will
# be exported to disk according to project's DataExport settings.
# In that case, run_export() returns None.
predictions = shell.workflow.batchProcessingApplet.\
run_export(role_data_dict, export_to_array=True)
predictions = np.squeeze(predictions)
print("predictions.dtype, predictions.shape", predictions.dtype, predictions.shape)
print("DONE.")
return predictions
def segment_vessels(vessel_probability, probability_threshold, dilation_size, minimum_size):
"""
This function produces a binary image with segmented vessels from a probability map (from
ilastik or another classifier).
Copyright (c) 2016, UChicago Argonne, LLC.
Parameters
----------
vessel_probability : ndarray
Nr x Nc x Nz matrix which contains the probability of each voxel being a vessel.
probability_threshold : float
threshold between (0,1) to apply to probability map (only consider voxels for which
vessel_probability(r,c,z) > probability_threshold).
dilation_size : int
Sphere Structural Element diameter size.
minimum_size : int
components smaller than this are removed from image.
Returns
-------
ndarry
Binary Image
"""
import numpy as np
import scipy.io as sio
from scipy import ndimage as ndi
from skimage import morphology
smallsize = 100 # components smaller than this size are removed. WHY Fixed Size??
unfiltered_im = (vessel_probability >= probability_threshold)
im_removed_small_objects = morphology.remove_small_objects(unfiltered_im,
min_size = smallsize, in_place = True)
dilated_im = ndi.binary_dilation(im_removed_small_objects, morphology.ball((dilation_size-1)/2))
image_out = morphology.remove_small_objects(dilated_im, min_size = minimum_size,
in_place = True)
return(image_out)
def detect_cells(cell_probability, probability_threshold, stopping_criterion,
initial_template_size, dilation_size, max_no_cells):
"""
This is the top level function to infer the position (and eventually size) of all cells in a 3D
volume of image data. We assume that we already have computed a "probability map" which encodes
the probability that each voxel corresponds to a cell body.
Copyright (c) 2016, UChicago Argonne, LLC.
Parameters
----------
cell_probability : ndarray
Nr x Nc x Nz matrix which contains the probability of each voxel being a cell body.
probability_threshold : float
threshold between (0,1) to apply to probability map (only consider voxels for which
cell_probability(r,c,z) > probability_threshold)
stopping_criterion : float
stopping criterion is a value between (0,1) (minimum normalized correlation between
template and probability map) (Example = 0.47)
initial_template_size : int
initial size of spherical template (to use in sweep)
dilation_size : int
size to increase mask around each detected cell (zero out sphere of radius with
initial_template_size+dilation_size around each centroid)
max_no_cells : int
maximum number of cells (alternative stopping criterion)
Returns
-------
ndarray
centroids = D x 4 matrix, where D = number of detected cells.
The (x,y,z) coordinate of each cell are in columns 1-3.
The fourth column contains the correlation (ptest) between the template
and probability map and thus represents our "confidence" in the estimate.
The algorithm terminates when ptest<=stopping_criterion.
ndarray
new_map = Nr x Nc x Nz matrix containing labeled detected cells (1,...,D)
"""
# following imports to be updated when directory structure are finalized
#import create_synth_dict
#from compute3dvec import compute3dvec
from scipy import signal
import numpy as np
import pdb
import logging
# threshold probability map.
newtest = (cell_probability * (cell_probability > probability_threshold)).astype('float32')
#initial_template_size is an int now but could a vector later on - convert it to an array
initial_template_size = np.atleast_1d(initial_template_size)
# create dictionary of spherical templates
box_radius = np.ceil(np.max(initial_template_size)/2) + 1
dict = create_synth_dict(initial_template_size, box_radius)
dilate_dict = create_synth_dict(initial_template_size + dilation_size, box_radius)
box_length = int(round(np.shape(dict)[0] ** (1/3)))
new_map = np.zeros((np.shape(cell_probability)), dtype='uint8')
newid = 1
centroids = np.empty((0, 4))
# run greedy search step for at most max_no_cells steps (# cells <= max_no_cells)
for ktot in range(max_no_cells):
val = np.zeros((np.shape(dict)[1], 1), dtype='float32')
id = np.zeros((np.shape(dict)[1], 1), dtype='uint32')
# loop to convolve the probability cube with each template in dict
for j in range(np.shape(dict)[1]):
convout = signal.fftconvolve(newtest, np.reshape(dict[:,j], (box_length, box_length,
box_length)), mode='same')
# get the max value of the flattened convout array and its index
val[j],id[j] = np.real(np.amax(convout)), np.argmax(convout)
# find position in image with max correlation
which_atom = np.argmax(val)
which_loc = id[which_atom]
# Save dict into a cube array with its center given by which_loc and place it into a 3-D array.
x2 = compute3dvec(dict[:, which_atom], which_loc, box_length, np.shape(newtest))
xid = np.nonzero(x2)
# Save dilate_dict into a cube array with its center given by which_loc and place it into a 3-D array.
x3 = compute3dvec(dilate_dict[:, which_atom], which_loc, box_length, np.shape(newtest))
newtest = newtest * (x3 == 0)
ptest = val/np.sum(dict, axis=0)
if ptest < stopping_criterion:
print("Cell Detection is done")
return(centroids, new_map)
# Label detected cell
new_map[xid] = newid
newid = newid + 1
#Convert flat index to indices
rr, cc, zz = np.unravel_index(which_loc, np.shape(newtest))
new_centroid = rr, cc, zz #Check - why cc is first?
# insert a row into centroids
centroids = np.vstack((centroids, np.append(new_centroid, ptest)))
# for later: convert to logging and print with much less frequency
if(ktot % 10 == 0):
print('Iteration remaining = ', (max_no_cells - ktot - 1), 'Correlation = ', ptest )
print("Cell Detection is done")
return(centroids, new_map)
def create_synth_dict(radii, box_radius):
"""
This function creates a collection of spherical templates of different sizes.
Copyright (c) 2016, UChicago Argonne, LLC.
Parameters
----------
radii : int
radii coubld be 1xN vector but currently is an integer
box_radius : float
Returns
-------
ndarray
dictionary of template vectors, of size (box_length ** 3 x length(radii)), where
box_length = box_radius*2 +1 and radii is an input to the function which contains a vector
of different sphere sizes.
"""
import numpy as np
from numpy import linalg as LA
from scipy import ndimage as ndi
from skimage.morphology import ball
box_length = int(box_radius * 2 + 1) #used for array dimension
dict = np.zeros((box_length**3, np.size(radii)), dtype='float32')
cvox = int((box_length-1)/2 + 1)
for i in range(len(radii)):
template = np.zeros((box_length, box_length, box_length))
template[cvox, cvox, cvox] = 1
dict[:, i] = np.reshape(ndi.binary_dilation(template, ball((radii[i] - 1)/2)), (box_length**3))
dict[:, i] = dict[:, i]/(LA.norm(dict[:, i]))
return(dict)
def placeatom(vector, box_length, which_loc, stacksz):
"""
Copies the data from vector into a cube with the width of "box_length" and places the cube
into a 3-D array with the shape/size defined by the "stacksz" parameter. The center of cube is
given by the "which_loc" parameter.
Copyright (c) 2016, UChicago Argonne, LLC.
Parameters
----------
vector : ndarray
Nx1 array
box_length : int
Lenght
which_loc : int
location to place atom in the flattened array
stacksz : ndarry
shape of the array (3D)
Returns
-------
ndarray
"""
import numpy as np
output_array = np.zeros((stacksz), dtype='float32')
#Convert flat index to indices
r, c, z = np.unravel_index(which_loc, (stacksz))
output_array[r, c, z] = 1
# Increase every dimension by box_length at the top and at the bottom and fill them with zeroes.
output_array = np.lib.pad(output_array, ((box_length, box_length), (box_length, box_length),
(box_length, box_length)), 'constant', constant_values=(0, 0))
# get the indices of the center of cube into increased dimensions output_array.
r, c, z = np.nonzero(output_array)
#save the output of round() function to avoid multiple calls to it.
half_length = np.int(round(box_length/2))
# TODO: casting to int to avoid problems downstream with indexing
c = np.int(c)
r = np.int(r)
z = np.int(z)
#Save the data from the cube into output_array.
output_array[(r - half_length +1) : (r + box_length - half_length +1), \
(c - half_length +1) : (c + box_length - half_length +1), \
(z - half_length +1) : (z + box_length - half_length +1)] = \
np.reshape(vector, (box_length, box_length, box_length))
return(output_array)
def compute3dvec(vector, which_loc, box_length, stacksz):
"""
Resizes the array dimension returned by placeatom() to the shape/size given by "stacksz" parameter.
Copyright (c) 2016, UChicago Argonne, LLC.
Parameters
----------
vector : ndarray
Nx1 array
box_length : int
Lenght
which_loc : int
location to place atom
stacksz : ndarry
shape of the array (3D)
Returns
-------
ndarray
"""
import numpy as np
output_array = placeatom(vector, box_length, which_loc, stacksz)
#delete the top "box_length" arrays for all dimensions.
x, y, z = np.shape(output_array)
output_array = output_array[box_length:x, box_length:y, box_length:z]
#delete the bottom "box_length" arrays for all dimensions.
x, y, z = np.shape(output_array)
output_array = output_array[0 : (x - box_length), 0 : (y - box_length), 0 : (z - box_length)]
return output_array
|
|
import os
import sys
import json
import types
import shutil
import logging
import tempfile
import contextlib
from pyblish import api
from . import lib, schema
self = sys.modules[__name__]
self.log = logging.getLogger("pyblish-starter")
self._registered_data = list()
self._registered_families = list()
self._registered_formats = list()
self._registered_root = os.getcwd() # Default to current working directory
self._is_installed = False
def default_host():
"""A default host, in place of anything better
This may be considered as reference for the
interface a host must implement. It also ensures
that the system runs, even when nothing is there
to support it.
"""
host = types.ModuleType("default")
host.__dict__.update({
"ls": lambda: [],
"load": lambda asset, version, representation: None,
"create": lambda name, family: "my_instance",
})
return host
def debug_host():
host = types.ModuleType("standalone")
host.__dict__.update({
"ls": lambda: [],
"load": lambda asset, version=-1, representation=None:
sys.stdout.write(json.dumps({
"asset": asset,
"version": version,
"representation": representation
}, indent=4) + "\n"),
"create": lambda name, family:
sys.stdout.write(json.dumps({
"name": name,
"family": family,
}, indent=4))
})
return host
self._registered_host = default_host()
def install(host):
"""Install `host` into the running Python session.
Arguments:
host (module): A Python module containing the Pyblish
starter host-interface.
"""
# Optional host install function
if hasattr(host, "install"):
host.install()
register_host(host)
register_plugins()
register_default_data()
register_default_families()
self._is_installed = True
self.log.info("Successfully installed Pyblish Starter!")
def uninstall():
try:
registered_host().uninstall()
except AttributeError:
pass
deregister_host()
deregister_plugins()
deregister_default_data()
deregister_default_families()
self.log.info("Successfully uninstalled Pyblish Starter!")
def is_installed():
"""Return state of installation
Returns:
True if installed, False otherwise
"""
return self._is_installed
def register_default_data():
register_data(key="id", value="pyblish.starter.instance")
register_data(key="name", value="{name}")
register_data(key="family", value="{family}")
def register_default_families():
register_family(
name="starter.model",
help="Polygonal geometry for animation"
)
register_family(
name="starter.rig",
help="Character rig"
)
register_family(
name="starter.animation",
help="Pointcache"
)
def ls(root=None):
"""List available assets
Return a list of available assets.
The interface of this function, along with its schema, is designed
to facilitate a potential transition into database-driven queries.
A note on performance:
This function is a generator, it scans the system one asset
at a time. However, scanning implies both listing directories
and opening files - one per asset per version.
Therefore, performance drops combinatorially for each new
version added to the project.
In small pipelines - e.g. 100s of assets, with 10s of versions -
this should not pose a problem.
In large pipelines - e.g. 1000s of assets, with 100s of versions -
this would likely become unbearable and manifest itself in
surrounding areas of the pipeline where disk-access is
critical; such as saving or loading files.
..note: The order of the list is undefined, but is typically alphabetical
due to dependence on os.listdir()
..note: The order of versions returned is guaranteed to be sorted, so
as to simplify retrieving the latest one via `versions[-1]`
"""
assetsdir = lib.format_shared_dir(root or self._registered_root)
for asset in lib.listdir(assetsdir):
versionsdir = os.path.join(assetsdir, asset)
asset_entry = {
"schema": "pyblish-starter:asset-1.0",
"name": asset,
"versions": list()
}
for version in lib.listdir(versionsdir):
versiondir = os.path.join(versionsdir, version)
fname = os.path.join(versiondir, ".metadata.json")
try:
with open(fname) as f:
data = json.load(f)
except IOError:
self.log.warning("\"%s\" not found." % fname)
continue
if data.get("schema") != "pyblish-starter:version-1.0":
self.log.warning("\"%s\" unsupported schema." % fname)
continue
asset_entry["versions"].append(data)
# Sort versions by integer
asset_entry["versions"].sort(key=lambda v: v["version"])
schema.validate(asset_entry, "asset")
yield asset_entry
@contextlib.contextmanager
def fixture(assets=["Asset1"], versions=1):
"""Build transient fixture of `assets` and `versions`
Generate a temporary fixture of customisable assets
with current metadata schema. This function is intended
for use in tests and tutorials.
Arguments:
assets (list, optional): Names of assets to create,
defaults to one asset named "Asset1"
version (int, optional): Number of versions of each asset,
defaults to 1 version.
Thread Safety:
This function modifies globals state and is
therefore not thread-safe.
Usage:
>>> with fixture(assets=["MyAsset1"], versions=1):
... for asset in ls():
... assert asset["name"] == "MyAsset1"
...
"""
tempdir = tempfile.mkdtemp()
shared = os.path.join(
tempdir,
"shared"
)
os.makedirs(shared)
for asset in assets:
assetdir = os.path.join(shared, asset)
os.makedirs(assetdir)
for version in range(versions):
version = lib.format_version(version + 1)
versiondir = os.path.join(assetdir, version)
os.makedirs(versiondir)
fname = os.path.join(versiondir, asset + ".ma")
open(fname, "w").close() # touch
fname = os.path.join(versiondir, ".metadata.json")
with open(fname, "w") as f:
json.dump({
"schema": "pyblish-starter:version-1.0",
"version": lib.parse_version(version),
"path": versiondir,
"time": "",
"author": "mottosso",
"source": os.path.join(
"{project}",
"maya",
"scenes",
"scene.ma"
),
"representations": [
{
"schema": "pyblish-starter:representation-1.0",
"format": ".ma",
"path": os.path.join(
"{dirname}",
"%s{format}" % asset
),
},
]
}, f)
# Keep track of original root
_ = self._registered_root
try:
self._registered_root = tempdir
yield tempdir
finally:
self._registered_root = _
shutil.rmtree(tempdir)
def register_root(path):
"""Register currently active root"""
self._registered_root = path
def registered_root():
"""Return currently registered root"""
return self._registered_root
# Alias
root = registered_root
def register_format(format):
"""Register a supported format
A supported format is used to determine which of any available
representations are relevant to the currently registered host.
"""
self._registered_formats.append(format)
def register_host(host):
missing = list()
for member in ("load",
"create",
"ls",):
if not hasattr(host, member):
missing.append(member)
assert not missing, (
"Incomplete interface for host: '%s'\n"
"Missing: %s" % (host, ", ".join(missing))
)
self._registered_host = host
def register_plugins():
"""Register accompanying plugins"""
from . import plugins
plugin_path = os.path.dirname(plugins.__file__)
api.register_plugin_path(plugin_path)
def register_data(key, value, help=None):
"""Register new default attribute
Arguments:
key (str): Name of data
value (object): Arbitrary value of data
help (str, optional): Briefly describe
"""
self._registered_data.append({
"key": key,
"value": value,
"help": help or ""
})
def register_family(name, data=None, help=None):
"""Register family and attributes for family
Arguments:
name (str): Name of family
data (dict, optional): Additional data, see
:func:`register_data` for docstring on members
help (str, optional): Briefly describe this family
"""
self._registered_families.append({
"name": name,
"data": data or [],
"help": help or ""
})
def registered_formats():
return self._registered_formats[:]
def registered_families():
return self._registered_families[:]
def registered_data():
return self._registered_data[:]
def registered_host():
return self._registered_host
def deregister_default_families():
self._registered_families[:] = list()
def deregister_default_data():
self._registered_data[:] = list()
def deregister_plugins():
from . import plugins
plugin_path = os.path.dirname(plugins.__file__)
try:
api.deregister_plugin_path(plugin_path)
except ValueError:
self.log.warning("pyblish-starter plug-ins not registered.")
def deregister_host():
self._registered_host = default_host()
|
|
import os
import sys
import json, socket, time
# This makes sure the path which python uses to find things when using import
# can find all our code.
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
# import qt modules (platform independant)
import ardrone.util.qtcompat as qt
QtCore = qt.import_module('QtCore')
class State(object):
"""
A class which manages the state of the SwarmController object.
This is the base class from which all states inherit.
A state can be requested to be maintained:
>> maintain()
or a requested to try to transition to a new state:
>> transition(state_id)
When it is requested to do so, the state machine determines whether it is in the correct state and changes it accordingly.
State changes are checked by comparing the current swarm_status against the exit_conditions of the current state.
The state ids are:
0 - Setup State (i.e. setting up to a pre-mission configuration)
1 - TaskBad (i.e. task not being achieved)
2 - TaskGood (i.e. task being achieved)
"""
def __init__(self,_coop,drones,drone_controllers):
# Variables
self.drones = drones
self.state_ids = (0,1,2)
# Assign pointers
self.drone_controllers = drone_controllers # NB - actually a tuple of pointers
self._coop = _coop
def transition(self,state_id):
"""
Carry out action to transition towards requested state.
"""
pass
def maintain(self):
"""
Carry out action to maintain state.
"""
pass
def check_exit(self):
"""
Check the exit conditions against swarm status.
If state requires changing then do so to the correct state and inform SwarmControl of this change.
"""
# Count for conditions which have been met
conditions_met_count = 0
# Check exit condition for each state against all exit conditions for the respective state
for state in self.state_ids:
for key in self.exit_conditions[state].keys():
#print ("checking condition against: %s" % key)
#print ("comparisson: %s" % ([self.exit_conditions[state][key],self._coop.swarm_status[key]]))
if self.exit_conditions[state][key] == self._coop.swarm_status[key]:
conditions_met_count = conditions_met_count + 1
# Check met conditions against total conditions, accept or reject exit as specified in state
if self.exit_conditional[state] == 'none':
pass
elif conditions_met_count == len(self.exit_conditions[state]):
self.next_state(state)
elif conditions_met_count > 0 and self.exit_conditional[state] == 'or':
self.next_state(state)
elif conditions_met_count == 0 or self.exit_conditional[state] == 'and':
pass
else:
print("Unexpected condition grouping - check_exit - SwarmStates")
def next_state(self,state_id):
"""
Takes a state_id and changes the current state to the relevant object.
"""
if state_id == 0:
self._coop.change_state((SetupState(self._coop,self.drones,self.drone_controllers),0))
elif state_id == 1:
self._coop.change_state((TaskBad(self._coop,self.drones,self.drone_controllers),1))
elif state_id == 2:
self._coop.change_state((TaskGood(self._coop,self.drones,self.drone_controllers),2))
class SetupState(State):
"""
state_id = 0
The SetupState is for when the drones are not verified as being ready for operations.
State entry requirements: none
State purpose: to WAIT until drones are communicating
State transition conditions:
State 0:
-
State 1:
talking == True for all drones
State 2:
-
"""
def __init__(self,_coop,drones,drone_controllers):
# Initialise as per State base class
State.__init__(self,_coop,drones,drone_controllers)
# Set exit conditions
self.exit_conditions = [{}, {'talking':True}, {}]
self.exit_conditional = ['none','and','none']
print("======In Setup State======")
def maintain(self):
for drone in self.drone_controllers:
drone.request_state(0)
pass
def transition(self,state_id):
self.maintain()
self.check_exit()
for drone in self.drone_controllers:
drone.request_state(0)
class TaskBad(State):
"""
state_id = 1
The TaskBad state is for when the task is not being achieved.
State entry requirements: drones are setup and ready for operations.
State purpose: to achieved the task and safely land any drone's requiring charging
TASK - to observe marker 0.
State transition conditions:
State 0:
-
State 1:
-
State 2:
airprox == False && observing_target == True
"""
def __init__(self,_coop,drones,drone_controllers):
# Initialise as per State base class
State.__init__(self,_coop,drones,drone_controllers)
# Set exit conditions
self.exit_conditions = [{}, {},{'airprox':False, 'observing_target':True}]
self.exit_conditional = ['none','none','and']
print("======Task not being achieved======")
def maintain(self):
# Land drones in need
#self._coop.land_low_battery_drones()
self.check_exit()
pass # It's not hard to carry on doing something badly!
def transition(self,state_id):
if state_id == 0 or state_id == 1:
print("Trying to change from TaskBad state into a SwarmState which isn't sensible. No action taken - SwarmState")
if state_id == 2:
"""
To achieve the task, use the drone with highest battery percentage (when this state was created) and navigate it to the target
"""
if self._coop.allocate_asset():
# if position of drone is known, then request the drone follow a route to the target
# (NB this will only do something when the drone is in state 3)
new_routes = self._coop._navigator.route_to_target(self._coop.swarm_status['position'][self.drones.index(self._coop.asset_drone)],0,self._coop.asset_drone)
self._coop.send_routes(new_routes,[self._coop.asset_drone,])
# request this drone to enter a state ready to follow markers
self._coop.asset_drone_controller.request_state(3)
# Land drones in need
#self._coop.land_low_battery_drones()
# Check success
self.check_exit()
class TaskGood(State):
"""
ID = 2
The TaskGood state is for when the task is being achieved.
State entry requirements: task is being achieved.
State purpose: watch over situation to check for task not being achieved.
TASK - to move continuously around a loop without collision
State transition conditions:
State 0:
-
State 1:
airprox == True || observing_target == False
State 2:
-
"""
def __init__(self,_coop,drones,drone_controllers):
# Initialise as per State base class
State.__init__(self,_coop,drones,drone_controllers)
# Set exit conditions
self.exit_conditions = [{}, {'airprox':True,'observing_target':False}, {}]
self.exit_conditional = ['none','or','none']
print("======Task being achieved======")
def maintain(self):
if self._coop.allocate_asset():
# Keep asset centered on target
new_routes = self._coop._navigator.route_to_target(self._coop.swarm_status['position'][self.drones.index(self._coop.asset_drone)],0,self._coop.asset_drone)
self._coop.send_routes(new_routes,[self._coop.asset_drone,])
# request this drone to enter a state ready to follow markers
self._coop.asset_drone_controller.request_state(3)
# Land drones in need
#self._coop.land_low_battery_drones()
# Check State
self.check_exit()
def transition(self,state_id):
# Land drones in need
#self._coop.land_low_battery_drones()
# Check State
self.check_exit()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# name: test_website.py
# author: Harold Bradley III
# email: [email protected]
# created on: 12/11/2015
#
# pylint: disable=invalid-name,unused-argument
"""
Integration and unit tests for ww module's Website class and methods.
"""
from __future__ import absolute_import, print_function
import os
from mock import patch
import pytest
from ext_pylib.files import File
from ww import Website, WebsiteDomain
from ww import settings as s
from ww.website import merge_atts, localhost
def test_superuser():
"""Tests to ensure tests are run as superuser."""
if os.getuid() != 0:
assert False, "You must be a superuser to run these tests."
_INPUT = 'ext_pylib.input.prompts.INPUT'
TEST_DOMAIN = 'example.com'
TEST_ATTS = {
'root' : {
'path' : '/www/example.com/',
'perms' : 0775,
'owner' : 'www-data',
'group' : 'www-data',
},
'htdocs' : {'path' : '/www/htdocs/', },
'log' : {'path' : '/www/log/', },
'access_log' : {'path' : '/www/log/access_log', },
'vhost' : {'path' : '/etc/apache2/the_example.com.conf', },
'htaccess' : {
'path' : '/www/htdocs/.htaccess',
'sections' : [{'identifier' : 'h5g', 'path' : s.HTA_5G_TEMPLATE}, ]
},
}
def test_localhost_decorator():
"""Integration test for localhost() decorator.
This sets up a domain that does not point to this server.
Then it wraps a function with the localhost decorator that tests if the
domain points to this server. At this point it should. After the function
is called, however, the domain should again no longer point to this server.
"""
test_domain = WebsiteDomain(TEST_DOMAIN)
print('test_domain should not point to this server to begin with.')
assert not test_domain.verify(), \
"Make sure there is *not* a host entry for example.com before running the test!"
@localhost
def decorator_test(test_domain):
"""Function stub for testing localhost() decorator"""
print('test_domain *should* point to this server inside the decorated function.')
return test_domain.verify()
assert decorator_test(test_domain)
print('test_domain should *not* point to this server after the decorated function has run.')
assert not test_domain.verify()
MERGE_ATTS_ARGS = [
({'htdocs' : {'path' : '/default/path', 'perms' : 0700}, 'other' : 'default_value'},
{'htdocs' : {'path' : '/new/path'}},
{'htdocs' : {'path' : '/new/path', 'perms' : 0700}, 'other' : 'default_value'}),
({'htdocs' : {'path' : '/default/path', 'perms' : 0700}, 'other' : 'default_value'},
{},
{'htdocs' : {'path' : '/default/path', 'perms' : 0700}, 'other' : 'default_value'}),
({},
{'htdocs' : {'path' : '/default/path', 'perms' : 0700}, 'other' : 'default_value'},
{'htdocs' : {'path' : '/default/path', 'perms' : 0700}, 'other' : 'default_value'}),
(None,
{'htdocs' : {'path' : '/default/path', 'perms' : 0700}, 'other' : 'default_value'},
{'htdocs' : {'path' : '/default/path', 'perms' : 0700}, 'other' : 'default_value'}),
({'htdocs' : {'path' : '/default/path', 'perms' : 0700}, 'other' : 'default_value'},
None,
{'htdocs' : {'path' : '/default/path', 'perms' : 0700}, 'other' : 'default_value'}),
]
@pytest.mark.parametrize(("atts", "new_atts", "expected"), MERGE_ATTS_ARGS)
def test_merge_atts(atts, new_atts, expected):
"""Tests merge_atts function."""
assert merge_atts(atts, new_atts) == expected
def test_website_initialization():
"""Test initialize Website."""
website = Website(TEST_DOMAIN, TEST_ATTS)
assert website.domain == TEST_DOMAIN
assert website.root.path == TEST_ATTS['root']['path']
assert website.root.perms == TEST_ATTS['root']['perms']
assert website.root.owner == TEST_ATTS['root']['owner']
assert website.root.group == TEST_ATTS['root']['group']
assert website.htdocs.path == TEST_ATTS['htdocs']['path']
assert website.log.path == TEST_ATTS['log']['path']
assert website.access_log.path == TEST_ATTS['access_log']['path']
assert website.vhost.path == TEST_ATTS['vhost']['path']
assert website.htaccess.path == TEST_ATTS['htaccess']['path']
@patch('ww.website.Vhost.exists', return_value=True)
@patch('ww.website.Vhost.parse', return_value={})
def test_website_init_existing_vhost(mock_exists, *args):
"""Test initialize Website."""
with patch(_INPUT, return_value='y'):
Website(TEST_DOMAIN, TEST_ATTS)
mock_exists.assert_called_once_with()
@patch('ww.website.Website.verify', return_value=True)
def test_website_repair(mock_verify):
"""Tests Website class verify method."""
website = Website(TEST_DOMAIN, TEST_ATTS)
website.repair()
mock_verify.assert_called_once_with(True)
def test_website_install_verify_remove():
"""Integration test: initializes, installs, verifies, and removes website."""
with patch(_INPUT, return_value='y'):
website = Website('example.com')
assert not website.is_installed(), "Website 'example.com' should not exist on this server."
assert not website.verify(), "Verification on a non-existing website should fail."
with patch(_INPUT, return_value='y'):
website.install()
@localhost
def installed_website_tests(website):
"""Function testing installed website. Wrapped by localhost() decorator."""
print('test_domain *should* point to this server inside the decorated function.')
assert website.is_installed()
assert website.verify(), "Freshly installed website should verify as true."
installed_website_tests(website)
website.remove(ask=False)
assert not website.is_installed()
assert not website.verify(), "Verification on a non-existing website should fail."
# Repeat the test for good measure:
with patch(_INPUT, return_value='y'):
website.install()
installed_website_tests(website)
website.remove(ask=False)
assert not website.is_installed()
assert not website.verify(), "Verification on a non-existing website should fail."
def test_website_pack_unpack():
"""Integration test: initializes, installs, verifies, packs, removes,
unpacks and removes website."""
with patch(_INPUT, return_value='y'):
website = Website('example.com')
assert not website.is_installed(), "Website 'example.com' should not exist on this server."
assert not website.verify(), "Verification on a non-existing website should fail."
with patch(_INPUT, return_value='y'):
website.install()
# Create a test file in htdocs
the_file = File({'path' : website.htdocs + '/index.html'})
the_file.data = 'Test file.'
the_file.create()
# Pack the site
website.pack()
website.remove(ask=False)
assert not website.is_installed()
assert not website.verify(), "Verification on a non-existing website should fail."
assert not the_file.exists()
with patch(_INPUT, return_value='y'):
website.unpack()
assert website.is_installed()
def verify(website):
"""Verify function to wrap with localhost decorator."""
assert website.verify()
localhost(verify)(website)
assert the_file.exists()
# Remove and check again for good measure
website.remove(ask=False)
assert not website.is_installed()
assert not website.verify(), "Verification on a non-existing website should fail."
assert not the_file.exists()
def test_website_migrate():
"""TODO:"""
pass
|
|
# Copyright 2013 Violin Memory, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Violin Memory iSCSI Driver for Openstack Cinder
Provides iSCSI specific LUN services for V6000 series flash arrays.
This driver requires VMOS v6.3.0.4 or newer software on the array.
You will need to install the Violin Memory REST client library:
sudo pip install vmemclient
Set the following in the cinder.conf file to enable the VMEM V6000
ISCSI Driver along with the required flags:
volume_driver=cinder.volume.drivers.violin.v6000_iscsi.V6000ISCSIDriver
NOTE: this driver file requires the use of synchronization points for
certain types of backend operations, and as a result may not work
properly in an active-active HA configuration. See OpenStack Cinder
driver documentation for more information.
"""
import random
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import units
from cinder import context
from cinder.db.sqlalchemy import models
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder import utils
from cinder.volume import driver
from cinder.volume.drivers.san import san
from cinder.volume.drivers.violin import v6000_common
LOG = logging.getLogger(__name__)
class V6000ISCSIDriver(driver.ISCSIDriver):
"""Executes commands relating to iSCSI-based Violin Memory Arrays.
Version history:
1.0 - Initial driver
1.0.1 - Fixes polling for export completion
"""
VERSION = '1.0.1'
TARGET_GROUP_NAME = 'openstack'
def __init__(self, *args, **kwargs):
super(V6000ISCSIDriver, self).__init__(*args, **kwargs)
self.array_info = []
self.gateway_iscsi_ip_addresses_mga = []
self.gateway_iscsi_ip_addresses_mgb = []
self.stats = {}
self.configuration.append_config_values(v6000_common.violin_opts)
self.configuration.append_config_values(san.san_opts)
self.common = v6000_common.V6000Common(self.configuration)
LOG.info(_LI("Initialized driver %(name)s version: %(vers)s."),
{'name': self.__class__.__name__, 'vers': self.VERSION})
def do_setup(self, context):
"""Any initialization the driver does while starting."""
super(V6000ISCSIDriver, self).do_setup(context)
self.common.do_setup(context)
self.gateway_iscsi_ip_addresses_mga = self._get_active_iscsi_ips(
self.common.mga)
for ip in self.gateway_iscsi_ip_addresses_mga:
self.array_info.append({"node": self._get_hostname('mga'),
"addr": ip,
"conn": self.common.mga})
self.gateway_iscsi_ip_addresses_mgb = self._get_active_iscsi_ips(
self.common.mgb)
for ip in self.gateway_iscsi_ip_addresses_mgb:
self.array_info.append({"node": self._get_hostname('mgb'),
"addr": ip,
"conn": self.common.mgb})
# setup global target group for exports to use
self._create_iscsi_target_group()
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met."""
self.common.check_for_setup_error()
bn = "/vshare/config/iscsi/enable"
resp = self.common.vip.basic.get_node_values(bn)
if resp[bn] is not True:
raise exception.ViolinInvalidBackendConfig(
reason=_('iSCSI is not enabled'))
if len(self.gateway_iscsi_ip_addresses_mga) == 0:
raise exception.ViolinInvalidBackendConfig(
reason=_('no available iSCSI IPs on mga'))
if len(self.gateway_iscsi_ip_addresses_mgb) == 0:
raise exception.ViolinInvalidBackendConfig(
reason=_('no available iSCSI IPs on mgb'))
def create_volume(self, volume):
"""Creates a volume."""
self.common._create_lun(volume)
def delete_volume(self, volume):
"""Deletes a volume."""
self.common._delete_lun(volume)
def extend_volume(self, volume, new_size):
"""Deletes a volume."""
self.common._extend_lun(volume, new_size)
def create_snapshot(self, snapshot):
"""Creates a snapshot from an existing volume."""
self.common._create_lun_snapshot(snapshot)
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
self.common._delete_lun_snapshot(snapshot)
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
ctxt = context.get_admin_context()
snapshot['size'] = snapshot['volume']['size']
self.common._create_lun(volume)
self.copy_volume_data(ctxt, snapshot, volume)
def create_cloned_volume(self, volume, src_vref):
"""Creates a full clone of the specified volume."""
ctxt = context.get_admin_context()
self.common._create_lun(volume)
self.copy_volume_data(ctxt, src_vref, volume)
def ensure_export(self, context, volume):
"""Synchronously checks and re-exports volumes at cinder start time."""
pass
def create_export(self, context, volume, connector):
"""Exports the volume."""
pass
def remove_export(self, context, volume):
"""Removes an export for a logical volume."""
pass
def initialize_connection(self, volume, connector):
"""Initializes the connection (target<-->initiator)."""
igroup = None
if self.configuration.use_igroups:
#
# Most drivers don't use igroups, because there are a
# number of issues with multipathing and iscsi/fcp where
# lun devices either aren't cleaned up properly or are
# stale (from previous scans).
#
# If the customer really wants igroups for whatever
# reason, we create a new igroup for each host/hypervisor.
# Every lun that is exported to the particular
# hypervisor/host will be contained in this igroup. This
# should prevent other hosts from seeing luns they aren't
# using when they perform scans.
#
igroup = self.common._get_igroup(volume, connector)
self._add_igroup_member(connector, igroup)
tgt = self._get_iscsi_target()
target_name = self.TARGET_GROUP_NAME
if isinstance(volume, models.Volume):
lun = self._export_lun(volume, connector, igroup)
else:
lun = self._export_snapshot(volume, connector, igroup)
iqn = "%s%s:%s" % (self.configuration.iscsi_target_prefix,
tgt['node'], target_name)
self.common.vip.basic.save_config()
properties = {}
properties['target_discovered'] = False
properties['target_portal'] = '%s:%d' \
% (tgt['addr'], self.configuration.iscsi_port)
properties['target_iqn'] = iqn
properties['target_lun'] = lun
properties['volume_id'] = volume['id']
properties['auth_method'] = 'CHAP'
properties['auth_username'] = ''
properties['auth_password'] = ''
return {'driver_volume_type': 'iscsi', 'data': properties}
def terminate_connection(self, volume, connector, force=False, **kwargs):
"""Terminates the connection (target<-->initiator)."""
if isinstance(volume, models.Volume):
self._unexport_lun(volume)
else:
self._unexport_snapshot(volume)
self.common.vip.basic.save_config()
def get_volume_stats(self, refresh=False):
"""Get volume stats."""
if refresh or not self.stats:
self._update_stats()
return self.stats
def _create_iscsi_target_group(self):
"""Creates a new target for use in exporting a lun.
Create an HA target on the backend that will be used for all
lun exports made via this driver.
The equivalent CLI commands are "iscsi target create
<target_name>" and "iscsi target bind <target_name> to
<ip_of_mg_eth_intf>".
"""
v = self.common.vip
target_name = self.TARGET_GROUP_NAME
bn = "/vshare/config/iscsi/target/%s" % target_name
resp = self.common.vip.basic.get_node_values(bn)
if resp:
LOG.debug("iscsi target group %s already exists.", target_name)
return
LOG.debug("Creating iscsi target %s.", target_name)
try:
self.common._send_cmd_and_verify(v.iscsi.create_iscsi_target,
self._wait_for_target_state,
'', [target_name], [target_name])
except Exception:
LOG.exception(_LE("Failed to create iscsi target!"))
raise
try:
self.common._send_cmd(self.common.mga.iscsi.bind_ip_to_target,
'', target_name,
self.gateway_iscsi_ip_addresses_mga)
self.common._send_cmd(self.common.mgb.iscsi.bind_ip_to_target,
'', target_name,
self.gateway_iscsi_ip_addresses_mgb)
except Exception:
LOG.exception(_LE("Failed to bind iSCSI targets!"))
raise
def _get_iscsi_target(self):
"""Get a random target IP for OpenStack to connect to.
For the non-multipath case we pick a single random target for
the Openstack infrastructure to use. This at least allows us
to evenly distribute LUN connections across the storage
cluster.
"""
return self.array_info[random.randint(0, len(self.array_info) - 1)]
@utils.synchronized('vmem-export')
def _export_lun(self, volume, connector=None, igroup=None):
"""Generates the export configuration for the given volume.
The equivalent CLI command is "lun export container
<container_name> name <lun_name>"
Arguments:
volume -- volume object provided by the Manager
connector -- connector object provided by the Manager
igroup -- name of igroup to use for exporting
Returns:
lun_id -- the LUN ID assigned by the backend
"""
lun_id = -1
export_to = ''
v = self.common.vip
if igroup:
export_to = igroup
elif connector:
export_to = connector['initiator']
else:
raise exception.Error(_("No initiators found, cannot proceed"))
target_name = self.TARGET_GROUP_NAME
LOG.debug("Exporting lun %s.", volume['id'])
try:
self.common._send_cmd_and_verify(
v.lun.export_lun, self.common._wait_for_export_state, '',
[self.common.container, volume['id'], target_name,
export_to, 'auto'], [volume['id'], None, True])
except Exception:
LOG.exception(_LE("LUN export for %s failed!"), volume['id'])
raise
lun_id = self.common._get_lun_id(volume['id'])
return lun_id
@utils.synchronized('vmem-export')
def _unexport_lun(self, volume):
"""Removes the export configuration for the given volume.
The equivalent CLI command is "no lun export container
<container_name> name <lun_name>"
Arguments:
volume -- volume object provided by the Manager
"""
v = self.common.vip
LOG.debug("Unexporting lun %s.", volume['id'])
try:
self.common._send_cmd_and_verify(
v.lun.unexport_lun, self.common._wait_for_export_state, '',
[self.common.container, volume['id'], 'all', 'all', 'auto'],
[volume['id'], None, False])
except exception.ViolinBackendErrNotFound:
LOG.debug("Lun %s already unexported, continuing.", volume['id'])
except Exception:
LOG.exception(_LE("LUN unexport for %s failed!"), volume['id'])
raise
@utils.synchronized('vmem-export')
def _export_snapshot(self, snapshot, connector=None, igroup=None):
"""Generates the export configuration for the given snapshot.
The equivalent CLI command is "snapshot export container
PROD08 lun <snapshot_name> name <volume_name>"
Arguments:
snapshot -- snapshot object provided by the Manager
connector -- connector object provided by the Manager
igroup -- name of igroup to use for exporting
Returns:
lun_id -- the LUN ID assigned by the backend
"""
lun_id = -1
export_to = ''
v = self.common.vip
target_name = self.TARGET_GROUP_NAME
LOG.debug("Exporting snapshot %s.", snapshot['id'])
if igroup:
export_to = igroup
elif connector:
export_to = connector['initiator']
else:
raise exception.Error(_("No initiators found, cannot proceed"))
try:
self.common._send_cmd(v.snapshot.export_lun_snapshot, '',
self.common.container, snapshot['volume_id'],
snapshot['id'], export_to, target_name,
'auto')
except Exception:
LOG.exception(_LE("Snapshot export for %s failed!"),
snapshot['id'])
raise
else:
self.common._wait_for_export_state(snapshot['volume_id'],
snapshot['id'], state=True)
lun_id = self.common._get_snapshot_id(snapshot['volume_id'],
snapshot['id'])
return lun_id
@utils.synchronized('vmem-export')
def _unexport_snapshot(self, snapshot):
"""Removes the export configuration for the given snapshot.
The equivalent CLI command is "no snapshot export container
PROD08 lun <snapshot_name> name <volume_name>"
Arguments:
snapshot -- snapshot object provided by the Manager
"""
v = self.common.vip
LOG.debug("Unexporting snapshot %s.", snapshot['id'])
try:
self.common._send_cmd(v.snapshot.unexport_lun_snapshot, '',
self.common.container, snapshot['volume_id'],
snapshot['id'], 'all', 'all', 'auto', False)
except Exception:
LOG.exception(_LE("Snapshot unexport for %s failed!"),
snapshot['id'])
raise
else:
self.common._wait_for_export_state(snapshot['volume_id'],
snapshot['id'], state=False)
def _add_igroup_member(self, connector, igroup):
"""Add an initiator to an igroup so it can see exports.
The equivalent CLI command is "igroup addto name <igroup_name>
initiators <initiator_name>"
Arguments:
connector -- connector object provided by the Manager
"""
v = self.common.vip
LOG.debug("Adding initiator %s to igroup.", connector['initiator'])
resp = v.igroup.add_initiators(igroup, connector['initiator'])
if resp['code'] != 0:
raise exception.Error(
_('Failed to add igroup member: %(code)d, %(message)s') % resp)
def _update_stats(self):
"""Update array stats.
Gathers array stats from the backend and converts them to GB values.
"""
data = {}
total_gb = 0
free_gb = 0
v = self.common.vip
master_cluster_id = v.basic.get_node_values(
'/cluster/state/master_id').values()[0]
bn1 = "/vshare/state/global/%s/container/%s/total_bytes" \
% (master_cluster_id, self.common.container)
bn2 = "/vshare/state/global/%s/container/%s/free_bytes" \
% (master_cluster_id, self.common.container)
resp = v.basic.get_node_values([bn1, bn2])
if bn1 in resp:
total_gb = resp[bn1] / units.Gi
else:
LOG.warning(_LW("Failed to receive update for total_gb stat!"))
if 'total_capacity_gb' in self.stats:
total_gb = self.stats['total_capacity_gb']
if bn2 in resp:
free_gb = resp[bn2] / units.Gi
else:
LOG.warning(_LW("Failed to receive update for free_gb stat!"))
if 'free_capacity_gb' in self.stats:
free_gb = self.stats['free_capacity_gb']
backend_name = self.configuration.volume_backend_name
data['volume_backend_name'] = backend_name or self.__class__.__name__
data['vendor_name'] = 'Violin Memory, Inc.'
data['driver_version'] = self.VERSION
data['storage_protocol'] = 'iSCSI'
data['reserved_percentage'] = 0
data['QoS_support'] = False
data['total_capacity_gb'] = total_gb
data['free_capacity_gb'] = free_gb
for i in data:
LOG.debug("stat update: %(name)s=%(data)s.",
{'name': i, 'data': data[i]})
self.stats = data
def _get_short_name(self, volume_name):
"""Creates a vSHARE-compatible iSCSI target name.
The Folsom-style volume names are prefix(7) + uuid(36), which
is too long for vSHARE for target names. To keep things
simple we can just truncate the name to 32 chars.
Arguments:
volume_name -- name of volume/lun
Returns:
Shortened volume name as a string.
"""
return volume_name[:32]
def _get_active_iscsi_ips(self, mg_conn):
"""Get a list of gateway IP addresses that can be used for iSCSI.
Arguments:
mg_conn -- active XG connection to one of the gateways
Returns:
active_gw_iscsi_ips -- list of IP addresses
"""
active_gw_iscsi_ips = []
interfaces_to_skip = ['lo', 'vlan10', 'eth1', 'eth2', 'eth3']
bn = "/net/interface/config/*"
intf_list = mg_conn.basic.get_node_values(bn)
for i in intf_list:
if intf_list[i] in interfaces_to_skip:
continue
bn1 = "/net/interface/state/%s/addr/ipv4/1/ip" % intf_list[i]
bn2 = "/net/interface/state/%s/flags/link_up" % intf_list[i]
resp = mg_conn.basic.get_node_values([bn1, bn2])
if len(resp.keys()) == 2 and resp[bn2] is True:
active_gw_iscsi_ips.append(resp[bn1])
return active_gw_iscsi_ips
def _get_hostname(self, mg_to_query=None):
"""Get the hostname of one of the mgs (hostname is used in IQN).
If the remote query fails then fall back to using the hostname
provided in the cinder configuration file.
Arguments:
mg_to_query -- name of gateway to query 'mga' or 'mgb'
Returns: hostname -- hostname as a string
"""
hostname = self.configuration.san_ip
conn = self.common.vip
if mg_to_query == "mga":
hostname = self.configuration.gateway_mga
conn = self.common.mga
elif mg_to_query == "mgb":
hostname = self.configuration.gateway_mgb
conn = self.common.mgb
ret_dict = conn.basic.get_node_values("/system/hostname")
if ret_dict:
hostname = ret_dict.items()[0][1]
else:
LOG.debug("Unable to fetch gateway hostname for %s.", mg_to_query)
return hostname
def _wait_for_target_state(self, target_name):
"""Polls backend to verify an iscsi target configuration.
This function will try to verify the creation of an iscsi
target on both gateway nodes of the array every 5 seconds.
Arguments:
target_name -- name of iscsi target to be polled
Returns:
True if the target state was correctly added
"""
bn = "/vshare/state/local/target/iscsi/%s" % (target_name)
def _loop_func():
status = [False, False]
mg_conns = [self.common.mga, self.common.mgb]
LOG.debug("Entering _wait_for_target_state loop: target=%s.",
target_name)
for node_id in range(2):
resp = mg_conns[node_id].basic.get_node_values(bn)
if len(resp.keys()):
status[node_id] = True
if status[0] and status[1]:
raise loopingcall.LoopingCallDone(retvalue=True)
timer = loopingcall.FixedIntervalLoopingCall(_loop_func)
success = timer.start(interval=5).wait()
return success
|
|
'''
Created on 26 May 2013
@author: lukasz.forynski
@brief: Implementation of the multi-key dictionary.
https://github.com/formiaczek/python_data_structures
___________________________________
Copyright (c) 2013 Lukasz Forynski <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify, merge,
publish, distribute, sub-license, and/or sell copies of the Software, and to permit persons
to whom the Software is furnished to do so, subject to the following conditions:
- The above copyright notice and this permission notice shall be included in all copies
or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
'''
class multi_key_dict(object):
""" Purpose of this type is to provie a multi-key dictionary.
This kind of dictionary has a similar interface to the standard dictionary, and indeed if used
with single key key elements - it's behaviour is the same as for a standard dict().
However it also allows for creation elements using multiple keys (using tuples/lists).
Such elements can be accessed using either of those keys (e.g read/updated/deleted).
Dictionary provides also extended interface for iterating over items and keys by the key type.
This can be useful e.g.: when creating dictionaries with (index,name) allowing to iterate over
items using either: names or indexes. It can be useful for many many other similar use-cases,
and there is no limit to the number of keys used to map to the value.
There are also methods to find other keys mapping to the same value as the specified keys etc.
Refer to examples and test code to see it in action.
simple example:
k = multi_key_dict()
k[100] = 'hundred' # add item to the dictionary (as for normal dictionary)
# but also:
# below creates entry with two possible key types: int and str,
# mapping all keys to the assigned value
k[1000, 'kilo', 'k'] = 'kilo (x1000)'
print k[1000] # will print 'kilo (x1000)'
print k['k'] # will also print 'kilo (x1000)'
# the same way objects can be updated, and if an object is updated using one key, the new value will
# be accessible using any other key, e.g. for example above:
k['kilo'] = 'kilo'
print k[1000] # will print 'kilo' as value was updated
"""
def __getitem__(self, key):
""" Return the value at index specified as key."""
if self.has_key(key):
return self.items_dict[self.__dict__[str(type(key))][key]]
else:
raise KeyError(key)
def __setitem__(self, keys, value):
""" Set the value at index (or list of indexes) specified as keys.
Note, that if multiple key list is specified, either:
- none of keys should map to an existing item already (item creation), or
- all of keys should map to exactly the same item (as previously created)
(item update)
If this is not the case - KeyError is raised. """
if(type(keys) in [tuple, list]):
num_of_keys_we_have = reduce(lambda x, y: x+y, map(lambda x : self.has_key(x), keys))
if num_of_keys_we_have:
all_select_same_item = True
direct_key = None
for key in keys:
key_type = str(type(key))
try:
if not direct_key:
direct_key = self.__dict__[key_type][key]
else:
new = self.__dict__[key_type][key]
if new != direct_key:
all_select_same_item = False
break
except Exception, err:
all_select_same_item = False
break;
if not all_select_same_item:
raise KeyError(', '.join(str(key) for key in keys))
first_key = keys[0] # combination if keys is allowed, simply use the first one
else:
first_key = keys
key_type = str(type(first_key)) # find the intermediate dictionary..
if self.has_key(first_key):
self.items_dict[self.__dict__[key_type][first_key]] = value # .. and update the object if it exists..
else:
if(type(keys) not in [tuple, list]):
key = keys
keys = [keys]
self.__add_item(value, keys) # .. or create it - if it doesn't
def __delitem__(self, key):
""" Called to implement deletion of self[key]."""
key_type = str(type(key))
if (self.has_key(key) and
self.items_dict and
self.items_dict.has_key(self.__dict__[key_type][key])):
intermediate_key = self.__dict__[key_type][key]
# remove the item in main dictionary
del self.items_dict[intermediate_key]
# remove all references (also pointed by other types of keys)
# for the item that this key pointed to.
for name, reference_dict in self.__dict__.iteritems():
if(type(name) == str and name.find('<type') == 0):
ref_key = None
for temp_key, value in reference_dict.iteritems():
if value == intermediate_key:
ref_key = temp_key
break
if ref_key:
del reference_dict[ref_key]
else:
raise KeyError(key)
def has_key(self, key):
""" Returns True if this object contains an item referenced by the key."""
key_type = str(type(key))
if self.__dict__.has_key(key_type):
if self.__dict__[key_type].has_key(key):
return True
return False
def get_other_keys(self, key, including_current=False):
""" Returns list of other keys that are mapped to the same value as specified key.
@param key - key for which other keys should be returned.
@param including_current if set to True - key will also appear on this list."""
other_keys = []
if self.has_key(key):
intermediate_key = self.__dict__[str(type(key))][key]
other_keys.extend(self.__all_keys_from_intermediate_key(intermediate_key))
if not including_current:
other_keys.remove(key)
return other_keys
def iteritems(self, key_type=None, return_all_keys=False):
""" Returns an iterator over the dictionary's (key, value) pairs.
@param key_type if specified, iterator will be returning only (key,value) pairs for this type of key.
Otherwise (if not specified) ((keys,...), value)
i.e. (tuple of keys, values) pairs for all items in this dictionary will be generated.
@param return_all_keys if set to True - tuple of keys is retuned instead of a key of this type."""
if key_type is not None:
key = str(key_type)
if self.__dict__.has_key(key):
for key, intermediate_key in self.__dict__[key].iteritems():
if return_all_keys:
keys = self.__all_keys_from_intermediate_key(intermediate_key)
yield keys, self.items_dict[intermediate_key]
else:
yield key, self.items_dict[intermediate_key]
else:
for multi_key_type, value in self.items_dict.iteritems():
keys = self.__all_keys_from_intermediate_key(multi_key_type)
yield keys, value
def iterkeys(self, key_type=None, return_all_keys=False):
""" Returns an iterator over the dictionary's keys.
@param key_type if specified, iterator for a dictionary of this type will be used.
Otherwise (if not specified) tuples containing all (multiple) keys
for this dictionary will be generated.
@param return_all_keys if set to True - tuple of keys is retuned instead of a key of this type."""
if(key_type is not None):
the_key = str(key_type)
if self.__dict__.has_key(the_key):
for key in self.__dict__[the_key].iterkeys():
if return_all_keys:
intermediate_key = self.__dict__[the_key][key]
keys = self.__all_keys_from_intermediate_key(intermediate_key)
yield keys
else:
yield key
else:
for multi_key_type in self.items_dict.keys():
yield self.__all_keys_from_intermediate_key(multi_key_type)
def itervalues(self, key_type=None):
""" Returns an iterator over the dictionary's values.
@param key_type if specified, iterator will be returning only values pointed by keys of this type.
Otherwise (if not specified) all values in this dictinary will be generated."""
if(key_type is not None):
intermediate_key = str(key_type)
if self.__dict__.has_key(intermediate_key):
for direct_key in self.__dict__[intermediate_key].itervalues():
yield self.items_dict[direct_key]
else:
for value in self.items_dict.itervalues():
yield value
def items(self, key_type=None, return_all_keys=False):
""" Return a copy of the dictionary's list of (key, value) pairs.
@param key_type if specified, (key, value) pairs for keys of this type will be returned.
Otherwise list of pairs: ((keys), value) for all items will be returned.
@param return_all_keys if set to True - tuple of keys is retuned instead of a key of this type."""
all_items = []
if key_type is not None:
keys_used_so_far = set()
direct_key = str(key_type)
if self.__dict__.has_key(direct_key):
for key, intermediate_key in self.__dict__[direct_key].iteritems():
if not intermediate_key in keys_used_so_far:
keys_used_so_far.add(intermediate_key)
if return_all_keys:
keys = self.__all_keys_from_intermediate_key(intermediate_key)
all_items.append((keys, self.items_dict[intermediate_key]))
else:
all_items.append((key, self.items_dict[intermediate_key]))
else:
for multi_key_type, value in self.items_dict.iteritems():
all_items.append((self.__all_keys_from_intermediate_key(multi_key_type), value))
return all_items
def keys(self, key_type=None):
""" Returns a copy of the dictionary's keys.
@param key_type if specified, only keys for this type will be returned.
Otherwise list of tuples containing all (multiple) keys will be returned."""
if key_type is not None:
intermediate_key = str(key_type)
if self.__dict__.has_key(intermediate_key):
return self.__dict__[intermediate_key].keys()
else:
# keys will contain lists of keys
all_keys = []
for multi_key_type in self.items_dict.keys():
all_keys.append(self.__all_keys_from_intermediate_key(multi_key_type))
return all_keys
def values(self, key_type=None):
""" Returns a copy of the dictionary's values.
@param key_type if specified, only values pointed by keys of this type will be returned.
Otherwise list of all values contained in this dictionary will be returned."""
if(key_type is not None):
all_items = []
keys_used = set()
direct_key = str(key_type)
if self.__dict__.has_key(direct_key):
for intermediate_key in self.__dict__[direct_key].itervalues():
if not intermediate_key in keys_used:
all_items.append(self.items_dict[intermediate_key])
keys_used.add(intermediate_key)
return all_items
else:
return self.items_dict.values()
def __len__(self):
""" Returns number of objects in dictionary."""
length = 0
if self.__dict__.has_key('items_dict'):
length = len(self.items_dict)
return length
def __add_item(self, item, keys=None):
""" Internal method to add an item to the multi-key dictionary"""
if(not keys or not len(keys)):
raise Exception('Error in %s.__add_item(%s, keys=tuple/list of items): need to specify a tuple/list containing at least one key!'
% (self.__class__.__name__, str(item)))
# joined values of keys will be used as a direct key. We'll encode type and key too..
direct_key = '`'.join([key.__class__.__name__+':' +str(key) for key in keys])
for key in keys:
key_type = str(type(key))
# store direct key as a value in an intermediate dictionary
if(not self.__dict__.has_key(key_type)):
self.__setattr__(key_type, dict())
self.__dict__[key_type][key] = direct_key
# store the value in the actual dictionary
if(not self.__dict__.has_key('items_dict')):
self.items_dict = dict()
self.items_dict[direct_key] = item
def __all_keys_from_intermediate_key(self, intermediate_key):
""" Internal method to find the tuple containing multiple keys"""
keys = []
# since we're trying to reverse-find keys for a value in number of dicts,
# (which is far from optimal, but re-creating objects from the intermediate keys
# doesn't work for more complex types loaded from sub-modules) - at least we'll
# try do that only for a correct dictionary (and not all of them)
key_types = set([tv.split(':', 1)[0] for tv in intermediate_key.split('`')])
is_correct_dict = lambda key: True in [str(key).startswith('<type \'%s' % k) for k in key_types]
for key, val in self.__dict__.items():
if type(val) == dict and is_correct_dict(key):
keys.extend([k for k, v in val.items() if v == intermediate_key])
return(tuple(keys))
def get(self, key, default=None):
""" Return the value at index specified as key."""
if self.has_key(key):
return self.items_dict[self.__dict__[str(type(key))][key]]
else:
return default
def __str__(self):
items = []
str_repr = lambda x: '\'%s\'' % x if type(x) == str else str(x)
if hasattr(self, 'items_dict'):
for (keys, value) in self.items():
keys_str = [str_repr(k) for k in keys]
items.append('(%s): %s' % (', '.join(keys_str),
str_repr(value)))
dict_str = '{%s}' % ( ', '.join(items))
return dict_str
def test_multi_key_dict():
contains_all = lambda cont, in_items: not (False in [c in cont for c in in_items])
m = multi_key_dict()
assert( len(m) == 0 ), 'expected len(m) == 0'
all_keys = list()
m['aa', 12, 32, 'mmm'] = 123 # create a value with multiple keys..
assert( len(m) == 1 ), 'expected len(m) == 1'
all_keys.append(('aa', 'mmm', 32, 12)) # store it for later
# try retrieving other keys mapped to the same value using one of them
res = m.get_other_keys('aa')
assert(contains_all(res, ['mmm', 32, 12])), 'get_other_keys(\'aa\'): %s other than expected: %s ' % (m, ['mmm', 32, 12])
# try retrieving other keys mapped to the same value using one of them: also include this key
res = m.get_other_keys(32, True)
assert(contains_all(res, ['aa', 'mmm', 32, 12])), 'get_other_keys(32): %s other than expected: %s ' % (res, ['aa', 'mmm', 32, 12])
assert( m.has_key('aa') == True ), 'expected m.has_key(\'aa\') == True'
assert( m.has_key('aab') == False ), 'expected m.has_key(\'aab\') == False'
assert( m.has_key(12) == True ), 'expected m.has_key(12) == True'
assert( m.has_key(13) == False ), 'expected m.has_key(13) == False'
assert( m.has_key(32) == True ), 'expected m.has_key(32) == True'
m['something else'] = 'abcd'
assert( len(m) == 2 ), 'expected len(m) == 2'
all_keys.append(('something else',)) # store for later
m[23] = 0
assert( len(m) == 3 ), 'expected len(m) == 3'
all_keys.append((23,)) # store for later
# check if it's possible to read this value back using either of keys
assert( m['aa'] == 123 ), 'expected m[\'aa\'] == 123'
assert( m[12] == 123 ), 'expected m[12] == 123'
assert( m[32] == 123 ), 'expected m[32] == 123'
assert( m['mmm'] == 123 ), 'expected m[\'mmm\'] == 123'
# now update value and again - confirm it back - using different keys..
m['aa'] = 45
assert( m['aa'] == 45 ), 'expected m[\'aa\'] == 45'
assert( m[12] == 45 ), 'expected m[12] == 45'
assert( m[32] == 45 ), 'expected m[32] == 45'
assert( m['mmm'] == 45 ), 'expected m[\'mmm\'] == 45'
m[12] = '4'
assert( m['aa'] == '4' ), 'expected m[\'aa\'] == \'4\''
assert( m[12] == '4' ), 'expected m[12] == \'4\''
# test __str__
m_str_exp = '{(23): 0, (\'aa\', \'mmm\', 32, 12): \'4\', (\'something else\'): \'abcd\'}'
m_str = str(m)
assert (len(m_str) > 0), 'str(m) should not be empty!'
assert (m_str[0] == '{'), 'str(m) should start with \'{\', but does with \'%c\'' % m_str[0]
assert (m_str[-1] == '}'), 'str(m) should end with \'}\', but does with \'%c\'' % m_str[-1]
# check if all key-values are there as expected. THey might be sorted differently
def get_values_from_str(dict_str):
sorted_keys_and_value = []
for k in dict_str.split(', ('):
keys, val = k.strip('{}() ').replace(')', '').split(':')
keys = sorted([k.strip() for k in keys.split(',')])
sorted_keys_and_value.append((keys, val))
return sorted_keys_and_value
exp = get_values_from_str(m_str_exp)
act = get_values_from_str(m_str)
assert (contains_all(act, exp)), 'str(m) values: \'{0}\' are not {1} '.format(act, exp)
# try accessing / creating new (keys)-> value mapping whilst one of these
# keys already maps to a value in this dictionarys
try:
m['aa', 'bb'] = 'something new'
assert(False), 'Should not allow adding multiple-keys when one of keys (\'aa\') already exists!'
except KeyError, err:
pass
# now check if we can get all possible keys (formed in a list of tuples
# each tuple containing all keys)
res = sorted([sorted(k) for k in m.keys()])
all_keys = sorted([sorted(k) for k in all_keys])
assert(contains_all(res, all_keys)), 'unexpected values from m.keys(), got:\n%s\n expected:\n%s)' %(res, all_keys)
# check default iteritems (which will unpack tupe with key(s) and value)
all_keys = [sorted(k) for k in all_keys]
num_of_elements = 0
for keys, value in m.iteritems():
num_of_elements += 1
assert(sorted(keys) in all_keys), 'm.iteritems(): unexpected keys: %s' % (keys)
assert(m[keys[0]] == value), 'm.iteritems(): unexpected value: %s (keys: %s)' % (value, keys)
assert(num_of_elements > 0), 'm.iteritems() returned generator that did not produce anything'
# test default iterkeys()
num_of_elements = 0
for keys in m.iterkeys():
num_of_elements += 1
assert(sorted(keys) in all_keys), 'm.iterkeys(): unexpected keys: %s' % (keys)
assert(num_of_elements > 0), 'm.iterkeys() returned generator that did not produce anything'
# test iterkeys(int, True): useful to get all info from the dictionary
# dictionary is iterated over the type specified, but all keys are returned.
num_of_elements = 0
for keys in m.iterkeys(int, True):
num_of_elements += 1
assert(sorted(keys) in all_keys), 'm.iterkeys(int, True): unexpected keys: %s' % (keys)
assert(num_of_elements > 0), 'm.iterkeys(int, True) returned generator that did not produce anything'
# test values for different types of keys()
values_for_int_keys = sorted([0, '4'])
assert (sorted(m.values(int)) == values_for_int_keys), 'm.values(int) are %s, but expected: %s.' % (sorted(m.values(int)),
values_for_int_keys)
values_for_str_keys = sorted(['4', 'abcd'])
assert (sorted(m.values(str)) == values_for_str_keys), 'm.values(str) are %s, but expected: %s.' % (sorted(m.values(str)),
values_for_str_keys)
current_values = sorted([0, '4', 'abcd']) # default (should give all values)
assert (sorted(m.values()) == current_values), 'm.values() are %s, but expected: %s.' % (sorted(m.values()),
current_values)
#test itervalues() (default) - should return all values. (Itervalues for other types are tested below)
vals = []
for value in m.itervalues():
vals.append(value)
assert (current_values == sorted(vals)), 'itervalues(): expected %s, but collected %s' % (current_values, sorted(vals))
#test items(int)
items_for_int = sorted([(32, '4'), (23, 0)])
assert (items_for_int == sorted(m.items(int))), 'items(int): expected %s, but collected %s' % (items_for_int,
sorted(m.items(int)))
# test items(str)
items_for_str = sorted([('aa', '4'), ('something else', 'abcd')])
assert (items_for_str == sorted(m.items(str))), 'items(str): expected %s, but collected %s' % (items_for_str,
sorted(m.items(str)))
# test items() (default - all items)
all_items = [((('aa', 'mmm', 32, 12), '4')), (('something else',), 'abcd'), ((23,), 0)]
all_items = sorted([sorted(k) for k in [sorted(kk) for kk in all_items]])
res = sorted([sorted(k) for k in m.items()])
assert (all_items == res), 'items() (all items): expected %s,\n\t\t\t\tbut collected %s' % (all_items, res)
# now test deletion..
curr_len = len(m)
del m[12]
assert( len(m) == curr_len - 1 ), 'expected len(m) == %d' % (curr_len - 1)
# try again
try:
del m['aa']
assert(False), 'cant remove again: item m[\'aa\'] should not exist!'
except KeyError, err:
pass
# try to access non-existing
try:
k = m['aa']
assert(False), 'removed item m[\'aa\'] should exist!'
except KeyError, err:
pass
# try to access non-existing with a different key
try:
k = m[12]
assert(False), 'removed item m[12] should exist!'
except KeyError, err:
pass
# prepare for other tests (also testing creation of new items)
tst_range = range(10, 40) + range(50, 70)
for i in tst_range:
m[i] = i # will create a dictionary, where keys are same as items
# test iteritems()
for key, value in m.iteritems(int):
assert(key == value), 'iteritems(int): expected %d, but received %d' % (key, value)
# test iterkeys()
num_of_elements = 0
curr_index_in_range = 0
for key in m.iterkeys(int):
expected = tst_range[curr_index_in_range]
assert (key == expected), 'iterkeys(int): expected %d, but received %d' % (expected, key)
curr_index_in_range += 1
num_of_elements += 1
assert(num_of_elements > 0), 'm.iteritems(int) returned generator that did not produce anything'
#test itervalues(int)
curr_index_in_range = 0
num_of_elements = 0
for value in m.itervalues(int):
expected = tst_range[curr_index_in_range]
assert (value == expected), 'itervalues(int): expected %d, but received %d' % (expected, value)
curr_index_in_range += 1
num_of_elements += 1
assert(num_of_elements > 0), 'm.itervalues(int) returned generator that did not produce anything'
# test values(int)
assert (m.values(int) == tst_range), 'm.values(int) is not as expected.'
# test keys()
assert (m.keys(int) == tst_range), 'm.keys(int) is not as expected.'
# test setitem with multiple keys
m['xy', 999, 'abcd'] = 'teststr'
try:
m['xy', 998] = 'otherstr'
assert(False), 'creating / updating m[\'xy\', 998] should fail!'
except KeyError, err:
pass
# test setitem with multiple keys
m['cd'] = 'somethingelse'
try:
m['cd', 999] = 'otherstr'
assert(False), 'creating / updating m[\'cd\', 999] should fail!'
except KeyError, err:
pass
m['xy', 999] = 'otherstr'
assert (m['xy'] == 'otherstr'), 'm[\'xy\'] is not as expected.'
assert (m[999] == 'otherstr'), 'm[999] is not as expected.'
assert (m['abcd'] == 'otherstr'), 'm[\'abcd\'] is not as expected.'
m['abcd', 'xy'] = 'another'
assert (m['xy'] == 'another'), 'm[\'xy\'] is not == \'another\'.'
assert (m[999] == 'another'), 'm[999] is not == \'another\''
assert (m['abcd'] == 'another'), 'm[\'abcd\'] is not == \'another\'.'
# test get functionality of basic dictionaries
m['CanIGet'] = 'yes'
assert (m.get('CanIGet') == 'yes')
assert (m.get('ICantGet') == None)
assert (m.get('ICantGet', "Ok") == "Ok")
k = multi_key_dict()
k['1:12', 1] = 'key_has_:'
k.items() # should not cause any problems to have : in key
assert (k[1] == 'key_has_:'), 'k[1] is not equal to \'abc:def:ghi\''
import datetime
n = datetime.datetime.now()
l = multi_key_dict()
l[n] = 'now' # use datetime obj as a key
#test keys..
r = l.keys()[0]
assert(r == (n,)), 'Expected {0} (tuple with all key types) as a 1st key, but got: {1}'.format((n,), r)
r = l.keys(datetime.datetime)[0]
assert(r == n), 'Expected {0} as a key, but got: {1}'.format(n, r)
assert(l.values() == ['now']), 'Expected values: {0}, but got: {1}'.format(l.values(), 'now')
# test items..
exp_items = [((n,), 'now')]
r = l.items()
assert(r == exp_items), 'Expected for items(): tuple of keys: {0}, but got: {1}'.format(r, exp_items)
assert(exp_items[0][1] == 'now'), 'Expected for items(): value: {0}, but got: {1}'.format('now',
exp_items[0][1])
print 'All test passed OK!'
if __name__ == '__main__':
try:
test_multi_key_dict()
except KeyboardInterrupt:
print '\n(interrupted by user)'
|
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
import numpy as np
import torch
from torch import nn
from ..structures.bounding_box import BoxList
from .utils import meshgrid
import maskrcnn_benchmark._C as C
# meshgrid = torch.meshgrid
class AnchorGenerator(nn.Module):
"""
For a set of image sizes and feature maps, computes a set
of anchors
"""
def __init__(
self,
scales=(0.5, 1.0, 2.0),
aspect_ratios=(0.5, 1.0, 2.0),
base_anchor_size=256,
anchor_stride=16,
straddle_thresh=0,
*args,
**kwargs
):
super(AnchorGenerator, self).__init__()
# TODO complete and fix
sizes = tuple(i * base_anchor_size for i in scales)
cell_anchors = generate_anchors(anchor_stride, sizes, aspect_ratios).float()
self.stride = anchor_stride
self.cell_anchors = cell_anchors
self.straddle_thresh = straddle_thresh
def num_anchors_per_location(self):
return [len(self.cell_anchors)]
def forward_single_image(self, image_sizes, feature_map):
device = feature_map.device
grid_height, grid_width = feature_map.shape[-2:]
stride = self.stride
shifts_x = torch.arange(
0, grid_width * stride, step=stride, dtype=torch.float32, device=device
)
shifts_y = torch.arange(
0, grid_height * stride, step=stride, dtype=torch.float32, device=device
)
shift_x, shift_y = meshgrid(shifts_x, shifts_y)
shift_x = shift_x.view(-1)
shift_y = shift_y.view(-1)
shifts = torch.stack((shift_x, shift_y, shift_x, shift_y), dim=1)
self.cell_anchors = self.cell_anchors.to(device)
A = self.cell_anchors.size(0)
K = shifts.size(0)
field_of_anchors = self.cell_anchors.view(1, A, 4) + shifts.view(
1, K, 4
).permute(1, 0, 2)
anchors = field_of_anchors.view(K * A, 4)
# add visibility information to anchors
image_height, image_width = image_sizes
if self.straddle_thresh >= 0:
inds_inside = (
(anchors[..., 0] >= -self.straddle_thresh)
& (anchors[..., 1] >= -self.straddle_thresh)
& (anchors[..., 2] < image_width + self.straddle_thresh)
& (anchors[..., 3] < image_height + self.straddle_thresh)
)
else:
inds_inside = torch.ones(anchors.shape[0], dtype=torch.uint8, device=device)
anchors = BoxList(anchors, (image_width, image_height), mode="xyxy")
anchors.add_field("visibility", inds_inside)
# TODO check if want to return list of not
# return [anchors]
return anchors
def forward(self, images_sizes, feature_maps):
"""
Arguments:
image_sizes (list(tuple(int, int)))
feature_maps (list(list(tensor)))
"""
assert len(feature_maps) == 1, "only single feature maps allowed"
anchors = []
for image_sizes, feature_map in zip(images_sizes, feature_maps[0]):
anchors.append(self.forward_single_image(image_sizes, feature_map))
return [anchors]
# TODO deduplicate with AnchorGenerator
class FPNAnchorGenerator(nn.Module):
"""
For a set of image sizes and feature maps, computes a set
of anchors
"""
def __init__(
self,
scales=(0.5, 1.0, 2.0),
aspect_ratios=(0.5, 1.0, 2.0),
base_anchor_size=256,
anchor_strides=(8, 16, 32),
straddle_thresh=0,
*args,
**kwargs
):
super(FPNAnchorGenerator, self).__init__()
# TODO complete and fix
sizes = tuple(i * base_anchor_size for i in scales)
cell_anchors = [
generate_anchors(anchor_stride, (size,), aspect_ratios).float()
for anchor_stride, size in zip(anchor_strides, sizes)
]
self.strides = anchor_strides
self.cell_anchors = cell_anchors
self.straddle_thresh = straddle_thresh
def num_anchors_per_location(self):
return [len(cell_anchors) for cell_anchors in self.cell_anchors]
def forward_single_image(self, image_sizes, feature_map, cell_anchors, stride):
device = feature_map.device
grid_height, grid_width = feature_map.shape[-2:]
# add visibility information to anchors
image_height, image_width = image_sizes
# Replace chunk with a single kernel call
if False:
shifts_x = torch.arange(
0, grid_width * stride, step=stride, dtype=torch.float32, device=device
)
shifts_y = torch.arange(
0, grid_height * stride, step=stride, dtype=torch.float32, device=device
)
shift_x, shift_y = meshgrid(shifts_x, shifts_y)
shift_x = shift_x.view(-1)
shift_y = shift_y.view(-1)
shifts = torch.stack((shift_x, shift_y, shift_x, shift_y), dim=1)
A = cell_anchors.size(0)
K = shifts.size(0)
field_of_anchors = cell_anchors.view(1, A, 4) + shifts.view(1, K, 4).permute(
1, 0, 2
)
anchors = field_of_anchors.view(K * A, 4)
if self.straddle_thresh >= 0:
inds_inside = (
(anchors[..., 0] >= -self.straddle_thresh)
& (anchors[..., 1] >= -self.straddle_thresh)
& (anchors[..., 2] < image_width + self.straddle_thresh)
& (anchors[..., 3] < image_height + self.straddle_thresh)
)
else:
inds_inside = torch.ones(anchors.shape[0], dtype=torch.uint8, device=device)
else:
anchors, inds_inside = C.anchor_generator(image_sizes, (grid_height, grid_width), cell_anchors, stride, self.straddle_thresh)
anchors = BoxList(anchors, (image_width, image_height), mode="xyxy")
anchors.add_field("visibility", inds_inside)
# TODO check if want to return list of not
# return [anchors]
return anchors
def forward(self, images_sizes, feature_maps):
"""
Arguments:
image_sizes (list(tuple(int, int)))
feature_maps (list(list(tensor)))
"""
device = feature_maps[0][0].device
self.cell_anchors = [anchor.to(device) for anchor in self.cell_anchors]
anchors = []
for feature_map_level, stride, cell_anchor in zip(
feature_maps, self.strides, self.cell_anchors
):
per_level_anchors = []
for image_sizes, feature_map in zip(images_sizes, feature_map_level):
per_level_anchors.append(
self.forward_single_image(
image_sizes, feature_map, cell_anchor, stride
)
)
anchors.append(per_level_anchors)
return anchors
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
#
# Based on:
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick and Sean Bell
# --------------------------------------------------------
# Verify that we compute the same anchors as Shaoqing's matlab implementation:
#
# >> load output/rpn_cachedir/faster_rcnn_VOC2007_ZF_stage1_rpn/anchors.mat
# >> anchors
#
# anchors =
#
# -83 -39 100 56
# -175 -87 192 104
# -359 -183 376 200
# -55 -55 72 72
# -119 -119 136 136
# -247 -247 264 264
# -35 -79 52 96
# -79 -167 96 184
# -167 -343 184 360
# array([[ -83., -39., 100., 56.],
# [-175., -87., 192., 104.],
# [-359., -183., 376., 200.],
# [ -55., -55., 72., 72.],
# [-119., -119., 136., 136.],
# [-247., -247., 264., 264.],
# [ -35., -79., 52., 96.],
# [ -79., -167., 96., 184.],
# [-167., -343., 184., 360.]])
def generate_anchors(
stride=16, sizes=(32, 64, 128, 256, 512), aspect_ratios=(0.5, 1, 2)
):
"""Generates a matrix of anchor boxes in (x1, y1, x2, y2) format. Anchors
are centered on stride / 2, have (approximate) sqrt areas of the specified
sizes, and aspect ratios as given.
"""
return _generate_anchors(
stride,
np.array(sizes, dtype=np.float) / stride,
np.array(aspect_ratios, dtype=np.float),
)
def _generate_anchors(base_size, scales, aspect_ratios):
"""Generate anchor (reference) windows by enumerating aspect ratios X
scales wrt a reference (0, 0, base_size - 1, base_size - 1) window.
"""
anchor = np.array([1, 1, base_size, base_size], dtype=np.float) - 1
anchors = _ratio_enum(anchor, aspect_ratios)
anchors = np.vstack(
[_scale_enum(anchors[i, :], scales) for i in range(anchors.shape[0])]
)
return torch.from_numpy(anchors)
def _whctrs(anchor):
"""Return width, height, x center, and y center for an anchor (window)."""
w = anchor[2] - anchor[0] + 1
h = anchor[3] - anchor[1] + 1
x_ctr = anchor[0] + 0.5 * (w - 1)
y_ctr = anchor[1] + 0.5 * (h - 1)
return w, h, x_ctr, y_ctr
def _mkanchors(ws, hs, x_ctr, y_ctr):
"""Given a vector of widths (ws) and heights (hs) around a center
(x_ctr, y_ctr), output a set of anchors (windows).
"""
ws = ws[:, np.newaxis]
hs = hs[:, np.newaxis]
anchors = np.hstack(
(
x_ctr - 0.5 * (ws - 1),
y_ctr - 0.5 * (hs - 1),
x_ctr + 0.5 * (ws - 1),
y_ctr + 0.5 * (hs - 1),
)
)
return anchors
def _ratio_enum(anchor, ratios):
"""Enumerate a set of anchors for each aspect ratio wrt an anchor."""
w, h, x_ctr, y_ctr = _whctrs(anchor)
size = w * h
size_ratios = size / ratios
ws = np.round(np.sqrt(size_ratios))
hs = np.round(ws * ratios)
anchors = _mkanchors(ws, hs, x_ctr, y_ctr)
return anchors
def _scale_enum(anchor, scales):
"""Enumerate a set of anchors for each scale wrt an anchor."""
w, h, x_ctr, y_ctr = _whctrs(anchor)
ws = w * scales
hs = h * scales
anchors = _mkanchors(ws, hs, x_ctr, y_ctr)
return anchors
# TODO doesn't match exactly Detectron
# heavily inspired from tensorflow
class AnchorGenerator_v0(nn.Module):
def __init__(
self,
scales=(0.5, 1.0, 2.0),
aspect_ratios=(0.5, 1.0, 2.0),
base_anchor_size=None,
anchor_stride=None,
anchor_offset=None,
straddle_thresh=0,
):
super(AnchorGenerator, self).__init__()
# Handle argument defaults
if base_anchor_size is None:
base_anchor_size = [256, 256]
base_anchor_size = torch.tensor(base_anchor_size, dtype=torch.float32)
if anchor_stride is None:
anchor_stride = [16, 16]
anchor_stride = torch.tensor(anchor_stride, dtype=torch.float32)
if anchor_offset is None:
anchor_offset = [0, 0]
anchor_offset = torch.tensor(anchor_offset, dtype=torch.float32)
scales = torch.tensor(scales, dtype=torch.float32)
aspect_ratios = torch.tensor(aspect_ratios, dtype=torch.float32)
self.register_buffer("_scales", scales)
self.register_buffer("_aspect_ratios", aspect_ratios)
self.register_buffer("_base_anchor_size", base_anchor_size)
self.register_buffer("_anchor_stride", anchor_stride)
self.register_buffer("_anchor_offset", anchor_offset)
"""
self._scales = scales
self._aspect_ratios = aspect_ratios
self._base_anchor_size = base_anchor_size
self._anchor_stride = anchor_stride
self._anchor_offset = anchor_offset
"""
self.straddle_thresh = straddle_thresh
def num_anchors_per_location(self):
return [len(self._scales) * len(self._aspect_ratios)]
# TODO we don't want image, but image shapes. We can't get individual
# images like this -- only relevant for the visibility
def forward_single_image(self, image_sizes, feature_map):
"""
only the first element of the list is taken into account
image is a list of tensors
feature_map is a list of tensors
"""
# TODO attention if we want to return a list or not
# grid_height, grid_width = feature_map[0].shape[-2:]
grid_height, grid_width = feature_map.shape[-2:]
scales_grid, aspect_ratios_grid = meshgrid(self._scales, self._aspect_ratios)
scales_grid = torch.reshape(scales_grid, [-1])
aspect_ratios_grid = torch.reshape(aspect_ratios_grid, [-1])
# for the JIT -- JIT doesn't work here
# grid_height = torch.tensor(grid_height)
# grid_width = torch.tensor(grid_width)
anchors = tile_anchors(
grid_height,
grid_width,
scales_grid,
aspect_ratios_grid,
self._base_anchor_size,
self._anchor_stride,
self._anchor_offset,
)
# add visibility information to anchors
image_height, image_width = image_sizes
inds_inside = (
(anchors[..., 0] >= -self.straddle_thresh)
& (anchors[..., 1] >= -self.straddle_thresh)
& (anchors[..., 2] < image_width + self.straddle_thresh)
& (anchors[..., 3] < image_height + self.straddle_thresh)
)
anchors = BoxList(anchors, (image_width, image_height), mode="xyxy")
anchors.add_field("visibility", inds_inside)
# TODO check if want to return list of not
# return [anchors]
return anchors
def forward(self, images_sizes, feature_maps):
anchors = []
for image_sizes, feature_map in zip(images_sizes, feature_maps):
anchors.append(self.forward_single_image(image_sizes, feature_map))
return anchors
# copyied from tensorflow
# @torch.jit.compile(nderivs=0) # TODO JIT doesn't work
def tile_anchors(
grid_height,
grid_width,
scales,
aspect_ratios,
base_anchor_size,
anchor_stride,
anchor_offset,
):
device = scales.device
ratio_sqrts = torch.sqrt(aspect_ratios)
heights = scales / ratio_sqrts * base_anchor_size[0]
widths = scales * ratio_sqrts * base_anchor_size[1]
# heights = torch.round((widths - 1) / aspect_ratios + 1)
# widths = torch.round((heights - 1) * aspect_ratios + 1)
print(heights, widths)
# TODO extra here
# heights = heights.round()
# widths = widths.round()
# TODO replace scale + shift with a single call to arange
# Get a grid of box centers
y_centers = torch.arange(grid_height, dtype=torch.float32, device=device)
y_centers = y_centers * anchor_stride[0] + anchor_offset[0]
x_centers = torch.arange(grid_width, dtype=torch.float32, device=device)
x_centers = x_centers * anchor_stride[1] + anchor_offset[1]
x_centers, y_centers = meshgrid(x_centers, y_centers)
widths_grid, x_centers_grid = meshgrid(widths, x_centers)
heights_grid, y_centers_grid = meshgrid(heights, y_centers)
bbox_centers = torch.stack([y_centers_grid, x_centers_grid], dim=3)
bbox_sizes = torch.stack([heights_grid, widths_grid], dim=3)
bbox_centers = torch.reshape(bbox_centers, [-1, 2])
bbox_sizes = torch.reshape(bbox_sizes, [-1, 2])
# bbox_corners = torch.cat([bbox_centers - .5 * bbox_sizes, bbox_centers + .5 * bbox_sizes], 1)
bbox_corners = torch.cat(
[bbox_centers - .5 * (bbox_sizes - 1), bbox_centers + .5 * (bbox_sizes - 1)], 1
)
return bbox_corners
if __name__ == "__main__":
g = AnchorGenerator(anchor_offset=(8.5, 8.5))
# g = AnchorGenerator(anchor_offset=(9., 9.))
tt = g([[10, 10]], [torch.rand(1, 3, 1, 1)])
t = torch.tensor(
[
[-83., -39., 100., 56.],
[-175., -87., 192., 104.],
[-359., -183., 376., 200.],
[-55., -55., 72., 72.],
[-119., -119., 136., 136.],
[-247., -247., 264., 264.],
[-35., -79., 52., 96.],
[-79., -167., 96., 184.],
[-167., -343., 184., 360.],
]
)
print(t - tt[0].bbox)
# from IPython import embed; embed()
|
|
#
# Jasy - Web Tooling Framework
# Copyright 2010-2012 Zynga Inc.
# Copyright 2013-2014 Sebastian Werner
#
#
# License: MPL 1.1/GPL 2.0/LGPL 2.1
# Authors:
# - Brendan Eich <[email protected]> (Original JavaScript) (2004-2010)
# - Sebastian Werner <[email protected]> (Python Port) (2010)
#
import re
import copy
import jasy.script.tokenize.Lang as Lang
import jasy.script.api.Comment as Comment
import jasy.core.Console as Console
# Operator and punctuator mapping from token to tree node type name.
# NB: because the lexer doesn't backtrack, all token prefixes must themselves
# be valid tokens (e.g. !== is acceptable because its prefixes are the valid
# tokens != and !).
operatorNames = {
'<' : 'lt',
'>' : 'gt',
'<=' : 'le',
'>=' : 'ge',
'!=' : 'ne',
'!' : 'not',
'==' : 'eq',
'===' : 'strict_eq',
'!==' : 'strict_ne',
'>>' : 'rsh',
'<<' : 'lsh',
'>>>' : 'ursh',
'+' : 'plus',
'*' : 'mul',
'-' : 'minus',
'/' : 'div',
'%' : 'mod',
',' : 'comma',
';' : 'semicolon',
':' : 'colon',
'=' : 'assign',
'?' : 'hook',
'&&' : 'and',
'||' : 'or',
'++' : 'increment',
'--' : 'decrement',
')' : 'right_paren',
'(' : 'left_paren',
'[' : 'left_bracket',
']' : 'right_bracket',
'{' : 'left_curly',
'}' : 'right_curly',
'&' : 'bitwise_and',
'^' : 'bitwise_xor',
'|' : 'bitwise_or',
'~' : 'bitwise_not'
}
# Assignment operators
assignOperators = ["|", "^", "&", "<<", ">>", ">>>", "+", "-", "*", "/", "%"]
#
# Classes
#
class Token:
__slots__ = ["type", "start", "line", "assignOp", "end", "value"]
class ParseError(Exception):
def __init__(self, message, fileId, line):
Exception.__init__(self, "Syntax error: %s\n%s:%s" % (message, fileId, line))
class Tokenizer(object):
def __init__(self, source, fileId="", line=1):
# source: JavaScript source
# fileId: Filename (for debugging proposes)
# line: Line number (for debugging proposes)
self.cursor = 0
self.source = str(source)
self.tokens = {}
self.tokenIndex = 0
self.lookahead = 0
self.scanNewlines = False
self.fileId = fileId
self.line = line
self.comments = []
input_ = property(lambda self: self.source[self.cursor:])
token = property(lambda self: self.tokens.get(self.tokenIndex))
def done(self):
# We need to set scanOperand to true here because the first thing
# might be a regexp.
return self.peek(True) == "end"
def match(self, tokenType, scanOperand=False):
return self.get(scanOperand) == tokenType or self.unget()
def mustMatch(self, tokenType):
if not self.match(tokenType):
raise ParseError("Missing " + tokenType, self.fileId, self.line)
return self.token
def peek(self, scanOperand=False):
if self.lookahead:
next = self.tokens.get((self.tokenIndex + self.lookahead) & 3)
if self.scanNewlines and (getattr(next, "line", None) != getattr(self, "line", None)):
tokenType = "newline"
else:
tokenType = getattr(next, "type", None)
else:
tokenType = self.get(scanOperand)
self.unget()
return tokenType
def peekOnSameLine(self, scanOperand=False):
self.scanNewlines = True
tokenType = self.peek(scanOperand)
self.scanNewlines = False
return tokenType
def getComments(self):
if self.comments:
comments = self.comments
self.comments = []
return comments
return None
def skip(self):
"""Eats comments and whitespace."""
input = self.source
startLine = self.line
# Whether this is the first called as happen on start parsing a file (eat leading comments/white space)
startOfFile = self.cursor is 0
indent = ""
while True:
if len(input) > self.cursor:
ch = input[self.cursor]
else:
return
self.cursor += 1
if len(input) > self.cursor:
next = input[self.cursor]
else:
next = None
if ch == "\n" and not self.scanNewlines:
self.line += 1
indent = ""
elif ch == "/" and next == "*":
self.cursor += 1
text = "/*"
inline = startLine == self.line and startLine > 1
commentStartLine = self.line
if startLine == self.line and not startOfFile:
mode = "inline"
elif (self.line - 1) > startLine:
# distance before this comment means it is a comment block for a whole section (multiple lines of code)
mode = "section"
else:
# comment for maybe multiple following lines of code, but not that important (no visual white space divider)
mode = "block"
while True:
try:
ch = input[self.cursor]
self.cursor += 1
except IndexError:
raise ParseError("Unterminated comment", self.fileId, self.line)
if ch == "*":
next = input[self.cursor]
if next == "/":
text += "*/"
self.cursor += 1
break
elif ch == "\n":
self.line += 1
text += ch
# Filter escaping on slash-star combinations in comment text
text = text.replace("*\/", "*/")
try:
self.comments.append(Comment.Comment(text, mode, commentStartLine, indent, self.fileId))
except Comment.CommentException as commentError:
Console.error("Ignoring comment in %s: %s", self.fileId, commentError)
elif ch == "/" and next == "/":
self.cursor += 1
text = "//"
if startLine == self.line and not startOfFile:
mode = "inline"
elif (self.line - 1) > startLine:
# distance before this comment means it is a comment block for a whole section (multiple lines of code)
mode = "section"
else:
# comment for maybe multiple following lines of code, but not that important (no visual white space divider)
mode = "block"
while True:
try:
ch = input[self.cursor]
self.cursor += 1
except IndexError:
# end of file etc.
break
if ch == "\n":
self.line += 1
break
text += ch
try:
self.comments.append(Comment.Comment(text, mode, self.line - 1, "", self.fileId))
except Comment.CommentException:
Console.error("Ignoring comment in %s: %s", self.fileId, commentError)
# check for whitespace, also for special cases like 0xA0
elif ch in "\xA0 \t":
indent += ch
else:
self.cursor -= 1
return
# Lexes the exponential part of a number, if present. Returns True if an
# exponential part was found.
def lexExponent(self):
input = self.source
next = input[self.cursor]
if next == "e" or next == "E":
self.cursor += 1
ch = input[self.cursor]
self.cursor += 1
if ch == "+" or ch == "-":
ch = input[self.cursor]
self.cursor += 1
if ch < "0" or ch > "9":
raise ParseError("Missing exponent", self.fileId, self.line)
while(True):
ch = input[self.cursor]
self.cursor += 1
if not (ch >= "0" and ch <= "9"):
break
self.cursor -= 1
return True
return False
def lexZeroNumber(self, ch):
token = self.token
input = self.source
token.type = "number"
ch = input[self.cursor]
self.cursor += 1
if ch == ".":
while(True):
ch = input[self.cursor]
self.cursor += 1
if not (ch >= "0" and ch <= "9"):
break
self.cursor -= 1
self.lexExponent()
token.value = input[token.start:self.cursor]
elif ch == "x" or ch == "X":
while(True):
ch = input[self.cursor]
self.cursor += 1
if not ((ch >= "0" and ch <= "9") or (ch >= "a" and ch <= "f") or (ch >= "A" and ch <= "F")):
break
self.cursor -= 1
token.value = input[token.start:self.cursor]
elif ch >= "0" and ch <= "7":
while(True):
ch = input[self.cursor]
self.cursor += 1
if not (ch >= "0" and ch <= "7"):
break
self.cursor -= 1
token.value = input[token.start:self.cursor]
else:
self.cursor -= 1
self.lexExponent() # 0E1, &c.
token.value = 0
def lexNumber(self, ch):
token = self.token
input = self.source
token.type = "number"
floating = False
while(True):
ch = input[self.cursor]
self.cursor += 1
if ch == "." and not floating:
floating = True
ch = input[self.cursor]
self.cursor += 1
if not (ch >= "0" and ch <= "9"):
break
self.cursor -= 1
exponent = self.lexExponent()
segment = input[token.start:self.cursor]
# Protect float or exponent numbers
if floating or exponent:
token.value = segment
else:
token.value = int(segment)
def lexDot(self, ch):
token = self.token
input = self.source
next = input[self.cursor]
if next >= "0" and next <= "9":
while True:
ch = input[self.cursor]
self.cursor += 1
if not (ch >= "0" and ch <= "9"):
break
self.cursor -= 1
self.lexExponent()
token.type = "number"
token.value = input[token.start:self.cursor]
else:
token.type = "dot"
def lexString(self, ch):
token = self.token
input = self.source
token.type = "string"
hasEscapes = False
delim = ch
ch = input[self.cursor]
self.cursor += 1
while ch != delim:
if ch == "\\":
hasEscapes = True
self.cursor += 1
ch = input[self.cursor]
self.cursor += 1
if hasEscapes:
token.value = eval(input[token.start:self.cursor])
else:
token.value = input[token.start + 1:self.cursor - 1]
def lexRegExp(self, ch):
token = self.token
input = self.source
token.type = "regexp"
while True:
try:
ch = input[self.cursor]
self.cursor += 1
except IndexError:
raise ParseError("Unterminated regex", self.fileId, self.line)
if ch == "\\":
self.cursor += 1
elif ch == "[":
while True:
if ch == "\\":
self.cursor += 1
try:
ch = input[self.cursor]
self.cursor += 1
except IndexError:
raise ParseError("Unterminated character class", self.fileId, self.line)
if ch == "]":
break
if ch == "/":
break
while(True):
ch = input[self.cursor]
self.cursor += 1
if not (ch >= "a" and ch <= "z"):
break
self.cursor -= 1
token.value = input[token.start:self.cursor]
def lexOp(self, ch):
token = self.token
input = self.source
op = ch
while(True):
try:
next = input[self.cursor]
except IndexError:
break
if (op + next) in operatorNames:
self.cursor += 1
op += next
else:
break
try:
next = input[self.cursor]
except IndexError:
next = None
if next == "=" and op in assignOperators:
self.cursor += 1
token.type = "assign"
token.assignOp = operatorNames[op]
op += "="
else:
token.type = operatorNames[op]
token.assignOp = None
# FIXME: Unicode escape sequences
# FIXME: Unicode identifiers
def lexIdent(self, ch):
token = self.token
input = self.source
try:
while True:
ch = input[self.cursor]
self.cursor += 1
if not ((ch >= "a" and ch <= "z") or (ch >= "A" and ch <= "Z") or (ch >= "0" and ch <= "9") or ch == "$" or ch == "_"):
break
except IndexError:
self.cursor += 1
pass
# Put the non-word character back.
self.cursor -= 1
identifier = input[token.start:self.cursor]
if identifier in Lang.keywords:
token.type = identifier
else:
token.type = "identifier"
token.value = identifier
def get(self, scanOperand=False):
"""
It consumes input *only* if there is no lookahead.
Dispatches to the appropriate lexing function depending on the input.
"""
while self.lookahead:
self.lookahead -= 1
self.tokenIndex = (self.tokenIndex + 1) & 3
token = self.tokens[self.tokenIndex]
if token.type != "newline" or self.scanNewlines:
return token.type
self.skip()
self.tokenIndex = (self.tokenIndex + 1) & 3
self.tokens[self.tokenIndex] = token = Token()
token.start = self.cursor
token.line = self.line
input = self.source
if self.cursor == len(input):
token.end = token.start
token.type = "end"
return token.type
ch = input[self.cursor]
self.cursor += 1
if (ch >= "a" and ch <= "z") or (ch >= "A" and ch <= "Z") or ch == "$" or ch == "_":
self.lexIdent(ch)
elif scanOperand and ch == "/":
self.lexRegExp(ch)
elif ch == ".":
self.lexDot(ch)
elif self.scanNewlines and ch == "\n":
token.type = "newline"
self.line += 1
elif ch in operatorNames:
self.lexOp(ch)
elif ch >= "1" and ch <= "9":
self.lexNumber(ch)
elif ch == "0":
self.lexZeroNumber(ch)
elif ch == '"' or ch == "'":
self.lexString(ch)
else:
raise ParseError("Illegal token: %s (Code: %s)" % (ch, ord(ch)), self.fileId, self.line)
token.end = self.cursor
return token.type
def unget(self):
"""Match depends on unget returning undefined."""
self.lookahead += 1
if self.lookahead == 4:
raise ParseError("PANIC: too much lookahead!", self.fileId, self.line)
self.tokenIndex = (self.tokenIndex - 1) & 3
|
|
#!/usr/bin/env python
import sys
import os
import threading
import traceback
import json
import multiprocessing
import subprocess
import http
import html
import urllib
import argparse
from .aserver import AsyncCache, AsyncTCPServer, AsyncHTTPRequestHandler
from ..fpbench import fpcparser
from ..arithmetic import native, np
from ..arithmetic import softfloat, softposit
from ..arithmetic import ieee754, posit
from ..arithmetic import sinking
from ..arithmetic import canonicalize
from ..arithmetic import evalctx
here = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(here, 'index.html'), 'rb') as f:
index = f.read()
with open(os.path.join(here, 'evaluate.html'), 'rb') as f:
evaluate_page = f.read()
with open(os.path.join(here, 'translate.html'), 'rb') as f:
translate_page = f.read()
with open(os.path.join(here, 'titanic.css'), 'rb') as f:
css = f.read()
with open(os.path.join(here, 'titanfp.min.js'), 'rb') as f:
bundle = f.read()
with open(os.path.join(here, '../../../www/favicon.ico'), 'rb') as f:
favicon = f.read()
with open(os.path.join(here, '../../../www/piceberg_round.png'), 'rb') as f:
logo = f.read()
fpbench_root = '/home/bill/private/research/origin-FPBench'
fpbench_tools = os.path.join(fpbench_root, 'tools')
fpbench_benchmarks = os.path.join(fpbench_root, 'benchmarks')
def run_tool(toolname, core, *args):
tool = subprocess.Popen(
args=['racket', os.path.join(fpbench_tools, toolname), *args],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout_data, stderr_data = tool.communicate(input=core.sexp.encode('utf-8'))
success = True
retval = tool.wait()
if retval != 0:
success = False
print('subprocess:\n {}\nreturned {:d}'.format(' '.join(tool.args), retval),
file=sys.stderr, flush=True)
if stderr_data:
print(stderr_data, file=sys.stderr, flush=True)
return success, stdout_data.decode('utf-8')
def filter_cores(*args, benchmark_dir = fpbench_benchmarks):
if not os.path.isdir(benchmark_dir):
raise ValueError('{}: not a directory'.format(benchmark_dir))
names = os.listdir(benchmark_dir)
benchmark_files = [name for name in names
if name.lower().endswith('.fpcore')
and os.path.isfile(os.path.join(benchmark_dir, name))]
cat = subprocess.Popen(
cwd=benchmark_dir,
args=['cat', *benchmark_files],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
cat.stdin.close()
tool = subprocess.Popen(
args=['racket', os.path.join(fpbench_tools, 'filter.rkt'), *args],
stdin=cat.stdout,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout_data, stderr_data = tool.communicate()
# cleanup
success = True
for proc in [cat, tool]:
retval = proc.wait()
if retval != 0:
success = False
print('subprocess:\n {}\nreturned {:d}'.format(' '.join(proc.args), retval),
file=sys.stderr, flush=True)
cat_stderr_data = cat.stderr.read()
cat.stderr.close()
if cat_stderr_data:
print(cat_stderr_data, file=sys.stderr, flush=True)
if stderr_data:
print(stderr_data, file=sys.stderr, flush=True)
return success, stdout_data.decode('utf-8')
def demo_tool(success, output):
if success:
return output
else:
return 'Error - tool subprocess returned nonzero value'
def demo_arith(evaluator, arguments, core, ctx=None):
if arguments is None:
try:
return str(evaluator(core))
except Exception:
print('Exception in FPCore evaluation\n evaluator={}\n args={}\n core={}'
.format(repr(evaluator), repr(arguments), core.sexp))
traceback.print_exc()
return 'Error evaluating FPCore.'
else:
inputs = arguments.strip().split()
if len(inputs) != len(core.inputs):
return 'Error - wrong number of arguments (core expects {:d})'.format(len(core.inputs))
try:
return str(evaluator(core, inputs, ctx))
except Exception:
print('Exception in FPCore evaluation\n evaluator={}\n args={}\n core={}'
.format(repr(evaluator), repr(arguments), core.sexp))
traceback.print_exc()
return 'Error evaluating FPCore.'
class RaisingArgumentParser(argparse.ArgumentParser):
def error(self, message):
raise ValueError('unable to parse inputs')
DEFAULT_PROPAGATE = {'precision', 'round', 'math-library'}
DEFAULT_RECURSE = {'pre', 'spec'}
def parse_canon_args(args):
parser = RaisingArgumentParser(add_help=False)
parser.add_argument('--default', action='store_true')
parser.add_argument('--recurse', type=str, nargs='*')
parser.add_argument('--propagate', type=str, nargs='*')
ns = parser.parse_args(args.strip().split())
if ns.recurse is None and ns.propagate is None:
return DEFAULT_RECURSE, DEFAULT_PROPAGATE
if ns.recurse is None:
recurse = set()
else:
recurse = set(ns.recurse)
if ns.propagate is None:
propagate = set()
else:
propagate = set(ns.propagate)
if ns.default:
recurse.update(DEFAULT_RECURSE)
propagate.update(DEFAULT_PROPAGATE)
return recurse, propagate
def demo_canon(evaluator, arguments, core, use_prop=False):
try:
recurse, propagate = parse_canon_args(arguments)
except Exception:
print('Exception parsing arguments for canonicalizer: {}'.format(repr(arguments)))
traceback.print_exc()
return 'Error parsing arguments.'
try:
if use_prop:
return evaluator(core, recurse=recurse, propagate=propagate).sexp
else:
return evaluator(core, recurse=recurse).sexp
except Exception:
print('Exception in FPCore translation\n translator={}\n recurse={}\n propagate={}\n use_prop={}\n core={}'
.format(repr(evaluator), repr(recurse), repr(propagate), repr(use_prop), core.sexp))
traceback.print_exc()
return 'Error translating FPCore.'
class TitanfpHTTPRequestHandler(AsyncHTTPRequestHandler):
def import_core_from_query(self, content, query):
qd = urllib.parse.parse_qs(query)
return content.decode('utf-8').format(qd.get('core', [''])[-1]).encode('utf-8')
def construct_content(self, data):
pr = self.translate_path()
if pr.path == '/titanfp.min.js':
response = http.server.HTTPStatus.OK
msg = None
headers = (
('Content-Type', 'text/javascript'),
)
content = bundle
elif pr.path == '/titanic.css':
response = http.server.HTTPStatus.OK
msg = None
headers = (
('Content-Type', 'text/css'),
)
content = css
else:
response = http.server.HTTPStatus.OK
msg = None
if data is None:
if pr.path == '/favicon.ico':
headers = (
('Content-Type', 'image/x-icon'),
)
content = favicon
elif pr.path == '/piceberg_round.png':
headers = (
('Content-Type', 'image/png'),
)
content = logo
# elif pr.path == '/evaluate':
else:
headers = (
('Content-Type', 'text/html'),
)
content = self.import_core_from_query(evaluate_page, pr.query)
# elif pr.path == '/translate':
# headers = (
# ('Content-Type', 'text/html'),
# )
# content = self.import_core_from_query(translate_page, pr.query)
# else:
# print(pr)
# headers = (
# ('Content-Type', 'text/html'),
# )
# content = index
else:
try:
payload = json.loads(data.decode('utf-8'))
except Exception as e:
print('Malformed data payload:\n{}'.format(repr(data)))
traceback.print_exc()
try:
core = fpcparser.compile(payload['core'])[0]
except Exception:
print('Exception parsing FPCore {}'.format(repr(payload['core'])))
traceback.print_exc()
core = None
output = 'Error - unable to parse FPCore'
try:
if core is not None:
backend = payload['backend']
if backend == 'sink':
ctx = ieee754.ieee_ctx(int(payload['w']), int(payload['p']))
output = demo_arith(sinking.Interpreter.interpret, payload['inputs'], core, ctx)
elif backend == 'ieee754':
ctx = ieee754.ieee_ctx(int(payload['w']), int(payload['p']))
output = demo_arith(ieee754.Interpreter.interpret, payload['inputs'], core, ctx)
elif backend == 'posit':
ctx = posit.posit_ctx(int(payload['es']), int(payload['nbits']))
output = demo_arith(posit.Interpreter.interpret, payload['inputs'], core, ctx)
elif backend == 'native':
output = demo_arith(native.Interpreter.interpret, payload['inputs'], core)
elif backend == 'np':
output = demo_arith(np.Interpreter.interpret, payload['inputs'], core)
elif backend == 'softfloat':
output = demo_arith(softfloat.Interpreter.interpret, payload['inputs'], core)
elif backend == 'softposit':
output = demo_arith(softposit.Interpreter.interpret, payload['inputs'], core)
elif backend == 'canonicalize':
output = demo_canon(canonicalize.Canonicalizer.translate, payload['inputs'], core, use_prop=True)
elif backend == 'condense':
output = demo_canon(canonicalize.Condenser.translate, payload['inputs'], core, use_prop=False)
elif backend == 'minimize':
output = demo_canon(canonicalize.Minimizer.translate, payload['inputs'], core, use_prop=False)
elif backend == 'fpcore':
inputs = payload['inputs'].strip().split()
if len(inputs) != len(core.inputs):
output = 'Error - wrong number of arguments (core expects {:d})'.format(len(core.inputs))
else:
output = demo_tool(*run_tool('fpcore.rkt', core, *inputs))
elif backend == 'core2c':
output = demo_tool(*run_tool('core2c.rkt', core))
elif backend == 'core2js':
output = demo_tool(*run_tool('core2js.rkt', core))
elif backend == 'core2smtlib2':
output = demo_tool(*run_tool('core2smtlib2.rkt', core))
# elif backend == 'filter':
# inputs = payload['inputs'].strip().split()
# output = demo_tool(*filter_cores(*inputs))
else:
output = 'Unknown backend ' + repr(backend)
except Exception as e:
print('Exception running backend\n payload={}'.format(repr(payload)))
traceback.print_exc()
output = 'Error running backend.'
headers = (
('Content-Type', 'text/plain'),
)
content = html.escape(str(output)).encode('utf-8')
return response, msg, headers, content
def run():
import argparse
ncores = os.cpu_count()
#default_pool_size = max(1, min(ncores - 1, (ncores // 2) + 1))
default_pool_size = 2
parser = argparse.ArgumentParser()
parser.add_argument('--cache', type=int, default=1,
help='number of requests to cache')
parser.add_argument('--workers', type=int, default=default_pool_size,
help='number of worker processes to run in parallel')
parser.add_argument('--host', type=str, default='localhost',
help='server host')
parser.add_argument('--port', type=int, default=8000,
help='server port')
args = parser.parse_args()
cache = AsyncCache(args.cache)
with multiprocessing.Pool(args.workers, maxtasksperchild=100) as pool:
class CustomHTTPRequestHandler(TitanfpHTTPRequestHandler):
the_cache = cache
the_pool = pool
print('caching {:d} requests'.format(args.cache))
print('{:d} worker processes'.format(args.workers))
with AsyncTCPServer((args.host, args.port,), CustomHTTPRequestHandler) as server:
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
print('server on thread:', server_thread.name)
print('close stdin to stop.')
for line in sys.stdin:
pass
print('stdin closed, stopping.')
pool.close()
print('workers closing...')
pool.join()
print('workers joined successfully.')
server.shutdown()
print('goodbye!')
if __name__ == '__main__':
run()
|
|
#!/usr/bin/env python3
from cum import config, exceptions, output
from functools import wraps
import click
import concurrent.futures
import requests
class CumGroup(click.Group):
def command(self, check_db=True, *args, **kwargs):
def decorator(f):
@wraps(f)
def wrapper(*args, **kwargs):
if check_db:
db.test_database()
return f(*args, **kwargs)
return super(CumGroup, self).command(*args, **kwargs)(wrapper)
return decorator
def list_new():
items = {}
for chapter in db.Chapter.find_new():
try:
items[chapter.alias].append(chapter.chapter)
except KeyError:
items[chapter.alias] = [chapter.chapter]
for series in sorted(items):
if config.get().compact_new:
name = click.style(series, bold=True)
chapters = ' '.join([x for x in items[series]])
line = click.wrap_text(' '.join([name, chapters]),
subsequent_indent=' ' * (len(series) + 1),
width=click.get_terminal_size()[0])
click.echo(line)
else:
click.secho(series, bold=True)
click.echo(click.wrap_text(' '.join([x for x in items[series]]),
width=click.get_terminal_size()[0]))
@click.command(cls=CumGroup)
@click.option('--cum-directory',
help='Directory used by cum to store application files.')
def cli(cum_directory=None):
global db, chapter_by_url, output, sanity, series_by_url
config.initialize(directory=cum_directory)
from cum import db, output, sanity
from cum.utility import chapter_by_url, series_by_url
db.initialize()
@cli.command()
@click.argument('alias')
@click.argument('new_alias')
def alias(alias, new_alias):
"""Assign a new alias to series."""
s = db.Series.alias_lookup(alias)
s.alias = new_alias
try:
db.session.commit()
except:
db.session.rollback()
else:
output.chapter('Changing alias "{}" to "{}"'.format(alias, new_alias))
@cli.command()
@click.argument('alias')
def chapters(alias):
"""List all chapters for a manga series.
Chapter listing will contain the flag value for the chapter ('n' for new,
'i' for ignored and blank for downloaded), the chapter identifier ("chapter
number") and the possible chapter title and group.
"""
s = db.Series.alias_lookup(alias)
if s.chapters:
click.secho('f chapter title [group]', bold=True)
for chapter in s.ordered_chapters:
name_len = click.get_terminal_size()[0] - 11
name = '{} {}'.format(chapter.title, chapter.group_tag)[:name_len]
row = '{} {:>7} {}'.format(chapter.status, chapter.chapter, name)
if row[0] == 'n':
style = {'fg': 'white', 'bold': True}
elif row[0] == ' ':
style = {'bold': True}
else:
style = {}
click.secho(row, **style)
@cli.command()
@click.argument('aliases', required=False, nargs=-1)
def download(aliases):
"""Download all available chapters.
If an optional alias is specified, the command will only download new
chapters for that alias.
"""
chapters = []
if not aliases:
chapters = db.Chapter.find_new()
for alias in aliases:
chapters += db.Chapter.find_new(alias=alias)
output.chapter('Downloading {} chapters'.format(len(chapters)))
for chapter in chapters:
chapter.get()
@cli.command()
@click.argument('urls', required=True, nargs=-1)
@click.option('--directory',
help='Directory which download the series chapters into.')
@click.option('--download', is_flag=True,
help='Downloads the chapters for the added follows.')
@click.option('--ignore', is_flag=True,
help='Ignores the chapters for the added follows.')
def follow(urls, directory, download, ignore):
"""Follow a series."""
chapters = []
for url in urls:
try:
series = series_by_url(url)
except exceptions.ScrapingError:
output.warning('Scraping error ({})'.format(url))
continue
if not series:
output.warning('Invalid URL "{}"'.format(url))
continue
series.directory = directory
if ignore:
series.follow(ignore=True)
output.chapter('Ignoring {} chapters'.format(len(series.chapters)))
else:
series.follow()
chapters += db.Chapter.find_new(alias=series.alias)
if download:
output.chapter('Downloading {} chapters'.format(len(chapters)))
for chapter in chapters:
chapter.get()
@cli.command()
def follows():
"""List all follows.
Will list all of the active follows in the database as a list of aliases.
To find out more information on an alias, use the info command.
"""
query = (db.session.query(db.Series)
.filter_by(following=True)
.order_by(db.Series.alias)
.all())
output.list([x.alias for x in query])
@cli.command()
@click.argument('input', required=True, nargs=-1)
@click.option('--directory',
help='Directory which download chapters into.')
def get(input, directory):
"""Download chapters by URL or by alias:chapter.
The command accepts input as either the chapter of the URL or the
alias:chapter combination (e.g. 'bakuon:11'), if the chapter is already
found in the database through a follow. The command will not enter the
downloads in the database in case of URLs and ignores downloaded status
in case of alias:chapter, so it can be used to download one-shots that
don't require follows or for redownloading already downloaded chapters.
"""
chapter_list = []
for i in input:
try:
series = series_by_url(i)
except exceptions.ScrapingError:
output.warning('Scraping error ({})'.format(i))
continue
if series:
chapter_list += series.chapters
try:
chapter = chapter_by_url(i)
except exceptions.ScrapingError:
output.warning('Scraping error ({})'.format(i))
continue
if chapter:
chapter_list.append(chapter)
if not series or chapter:
try:
a, c = i.split(':')
except ValueError:
output.warning('Invalid selection "{}"'.format(i))
else:
chapters = (db.session.query(db.Chapter)
.join(db.Series)
.filter(db.Series.alias == a,
db.Chapter.chapter == c)
.all())
for chapter in chapters:
chapter_list.append(chapter.to_object())
for chapter in chapter_list:
chapter.directory = directory
chapter.get(use_db=False)
@cli.command()
@click.argument('alias')
@click.argument('chapters', required=True, nargs=-1)
def ignore(alias, chapters):
"""Ignore chapters for a series.
Enter one or more chapters after the alias to ignore them. Enter the
chapter identifiers as they are listed when using the chapters command. To
ignore all of the chapters for a particular series, use the word "all" in
place of the chapters.
"""
s = db.Series.alias_lookup(alias)
query = db.session.query(db.Chapter).filter(db.Chapter.series == s,
db.Chapter.downloaded == 0)
if len(chapters) == 1 and chapters[0].lower() == 'all':
click.echo('Ignoring {} chapters for {}'.format(len(s.chapters),
s.name))
click.confirm('Do you want to continue',
prompt_suffix='? ', abort=True)
else:
query = query.filter(db.Chapter.chapter.in_(chapters))
chapters = [x.to_object() for x in query.all()]
for chapter in chapters:
chapter.ignore()
if len(chapters) == 1:
output.chapter('Ignored chapter {} for {}'.format(chapters[0].chapter,
s.name))
else:
output.series('Ignored {} chapters for {}'.format(len(chapters),
s.name))
@cli.command()
@click.argument('alias')
def open(alias):
"""Open the series URL in a browser."""
s = db.Series.alias_lookup(alias)
click.launch(s.url)
@cli.command()
def new():
"""List all new chapters."""
list_new()
@cli.command(check_db=False, name='repair-db')
def repair_db():
"""Runs an automated database repair."""
sanity_tester = sanity.DatabaseSanity(db.Base, db.engine)
sanity_tester.test()
if sanity_tester.errors:
output.series('Backing up database to cum.db.bak')
db.backup_database()
output.series('Running database repair')
for error in sanity_tester.errors:
error.fix()
@cli.command()
@click.argument('alias')
def unfollow(alias):
"""Unfollow manga.
Will mark a series as unfollowed. In order not to lose history of
downloaded chapters, the series is merely marked as unfollowed in the
database rather than removed.
"""
s = db.Series.alias_lookup(alias)
s.following = False
db.session.commit()
output.series('Removing follow for {}'.format(s.name))
@cli.command()
@click.argument('alias')
@click.argument('chapters', required=True, nargs=-1)
def unignore(alias, chapters):
"""Unignore chapters for a series.
Enter one or more chapters after the alias to mark them as new. Enter the
chapter identifiers as they are listed when using the chapters command. To
unignore all of the chapters for a particular series, use the word "all" in
place of the chapters.
"""
s = db.Series.alias_lookup(alias)
query = db.session.query(db.Chapter).filter(db.Chapter.series == s,
db.Chapter.downloaded == -1)
if len(chapters) == 1 and chapters[0].lower() == 'all':
click.echo('Unignoring {} chapters for {}'.format(len(s.chapters),
s.name))
click.confirm('Do you want to continue',
prompt_suffix='? ', abort=True)
else:
query = query.filter(db.Chapter.chapter.in_(chapters))
chapters = [x.to_object() for x in query.all()]
for chapter in chapters:
chapter.mark_new()
if len(chapters) == 1:
output.chapter('Unignored chapter {} for {}'.format(
chapters[0].chapter, s.name
))
else:
output.series('Unignored {} chapters for {}'.format(
len(chapters), s.name
))
@cli.command()
def update():
"""Gather new chapters from followed series."""
pool = concurrent.futures.ThreadPoolExecutor(config.get().download_threads)
futures = []
warnings = []
aliases = {}
query = db.session.query(db.Series).filter_by(following=True).all()
output.series('Updating {} series'.format(len(query)))
for follow in query:
fut = pool.submit(series_by_url, follow.url)
futures.append(fut)
aliases[fut] = follow.alias
with click.progressbar(length=len(futures), show_pos=True,
fill_char='>', empty_char=' ') as bar:
for future in concurrent.futures.as_completed(futures):
try:
series = future.result()
except requests.exceptions.ConnectionError as e:
warnings.append('Unable to update {} (connection error)'
.format(aliases[future]))
except exceptions.ScrapingError:
warnings.append('Unable to update {} (scraping error)'
.format(follow.alias))
else:
series.update()
bar.update(1)
for w in warnings:
output.warning(w)
list_new()
if __name__ == '__main__':
cli()
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the distributed values library."""
import copy
import os
from absl.testing import parameterized
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import tf2
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.distribute import test_util as ds_test_util
from tensorflow.python.distribute import tpu_strategy
from tensorflow.python.distribute import tpu_values
from tensorflow.python.distribute import values as values_lib
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.training import saver as saver_lib
def _device_str(d):
return "/device:GPU:" + str(d)
def _nested_value(d):
return ("a" + d, ["b" + d, {"c": "d" + d, "e": "f" + d}, "g" + d], "h" + d)
def mirrored_and_tpu_strategy_combinations():
return combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.mirrored_strategy_with_two_gpus_no_merge_call,
strategy_combinations.tpu_strategy,
strategy_combinations.tpu_strategy_packed_var,
strategy_combinations.tpu_strategy_spmd,
],
mode=["graph", "eager"])
class DistributedValuesTest(test.TestCase, parameterized.TestCase):
@combinations.generate(
combinations.combine(
distribution=(strategy_combinations.all_strategies_minus_default +
strategy_combinations.multiworker_strategies),
mode=["eager"]
))
def testMakeDistributedValueFromTensor(self, distribution):
if not tf2.enabled():
self.skipTest("Only V2 is supported.")
single_value = constant_op.constant(1)
def value_fn(ctx):
del ctx
return single_value
distributed_values = (
distribution.experimental_distribute_values_from_function(value_fn))
self.assertAllEqual(
ds_test_util.gather(distribution, distributed_values),
constant_op.constant(1., shape=(distribution.num_replicas_in_sync)))
@combinations.generate(
combinations.combine(
distribution=(strategy_combinations.all_strategies_minus_default +
strategy_combinations.multiworker_strategies),
mode=["eager"]
))
def testMakeDistributedValueSingleNumpyArrayConstant(self, distribution):
if not tf2.enabled():
self.skipTest("Only V2 is supported.")
array_value = np.array([1., 2., 3.])
def value_fn(ctx):
del ctx
return array_value
distributed_values = (
distribution.experimental_distribute_values_from_function(value_fn))
self.assertAllEqual(
ds_test_util.gather(distribution, distributed_values).numpy(),
[[1., 2., 3.]] * distribution.num_replicas_in_sync)
@combinations.generate(
combinations.combine(
distribution=(strategy_combinations.all_strategies_minus_default +
strategy_combinations.multiworker_strategies),
mode=["eager"]
))
def testMakeDistributedValueTupleConstant(self, distribution):
if not tf2.enabled():
self.skipTest("Only V2 is supported.")
tuple_value = (1., 2., 3.)
def value_fn(ctx):
del ctx
return tuple_value
distributed_values = (
distribution.experimental_distribute_values_from_function(value_fn))
distributed_values = ds_test_util.gather(distribution, distributed_values)
# Expected output for 2 replicas:
# ([1.0, 1.0], [2.0, 2.0], [3.0, 3.0])
expected = tuple([v for i in range(distribution.num_replicas_in_sync)]
for v in tuple_value)
self.assertAllEqual(distributed_values, expected)
@combinations.generate(
combinations.combine(
distribution=(strategy_combinations.all_strategies_minus_default +
strategy_combinations.multiworker_strategies),
mode=["eager"]
))
def testMakeDistributedValueNestedStructurePerReplica(self, distribution):
if not tf2.enabled():
self.skipTest("Only V2 is supported.")
tuple_value = (1., 2., 3.)
def value_fn(ctx):
per_replica = []
for val in tuple_value:
per_replica.append(val * ctx.replica_id_in_sync_group)
return tuple(per_replica)
distributed_values = (
distribution.experimental_distribute_values_from_function(value_fn))
distributed_values = ds_test_util.gather(distribution, distributed_values)
# Expected output for 2 replicas:
# ([0.0, 1.0], [0.0, 2.0], [0.0, 3.0])
expected = tuple([v * i for i in range(distribution.num_replicas_in_sync)]
for v in tuple_value)
self.assertAllEqual(distributed_values, expected)
# NOTE(priyag): Cannot test this with MultiWorkerMirroredStrategy because
# collective ops do not support SparseTensors.
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.all_strategies_minus_default,
mode=["eager"]
))
def testMakeDistributedValueSpareTensor(self, distribution):
if not tf2.enabled():
self.skipTest("Only V2 is supported.")
def value_fn(ctx):
del ctx
return sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
distributed_values = (
distribution.experimental_distribute_values_from_function(value_fn))
local_results = distribution.experimental_local_results(distributed_values)
for i in range(distribution.num_replicas_in_sync):
self.assertAllEqual(
sparse_ops.sparse_tensor_to_dense(local_results[i]),
[[1, 0, 0, 0], [0, 0, 2, 0], [0, 0, 0, 0]])
@combinations.generate(
combinations.combine(
distribution=(strategy_combinations.all_strategies_minus_default +
strategy_combinations.multiworker_strategies),
mode=["eager"]
))
def testMakeDistributedValueExtractFromArray(self, distribution):
if not tf2.enabled():
self.skipTest("Only V2 is supported.")
multiple_values = range(distribution.num_replicas_in_sync)
def value_fn(ctx):
return multiple_values[ctx.replica_id_in_sync_group]
distributed_values = (
distribution.experimental_distribute_values_from_function(value_fn))
distributed_values = ds_test_util.gather(distribution, distributed_values)
expected = range(distribution.num_replicas_in_sync)
self.assertAllEqual(distributed_values, expected)
@combinations.generate(
combinations.combine(
distribution=(strategy_combinations.all_strategies_minus_default +
strategy_combinations.multiworker_strategies),
mode=["eager"]
))
def testMakeDistributedValueAndRun(self, distribution):
if not tf2.enabled():
self.skipTest("Only V2 is supported.")
@def_function.function
def run():
multiple_values = range(distribution.num_replicas_in_sync)
def value_fn(ctx):
return multiple_values[ctx.replica_id_in_sync_group]
distributed_values = (
distribution.experimental_distribute_values_from_function(value_fn))
def computation(x):
return math_ops.square(x)
outputs = ds_test_util.gather(
distribution,
distribution.run(computation, args=(distributed_values,)))
return outputs
results = run()
expected = [i**2 for i in range(distribution.num_replicas_in_sync)]
self.assertAllEqual(results, expected)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations
.mirrored_strategy_with_two_gpus_no_merge_call,
strategy_combinations.tpu_strategy,
strategy_combinations.tpu_strategy_packed_var,
strategy_combinations.central_storage_strategy_with_two_gpus,
] + strategy_combinations.multiworker_strategies,
mode=["eager"]))
def testMakeDistributedValueDefaultDevicePlacement(self, distribution):
if not tf2.enabled():
self.skipTest("Only V2 is supported.")
def value_fn(ctx):
del ctx
return constant_op.constant(1.0)
distributed_values = (
distribution.experimental_distribute_values_from_function(value_fn))
default_device = array_ops.identity(constant_op.constant(1.0)).device
for i in range(len(distribution.extended.worker_devices)):
self.assertAllEqual(distributed_values._values[i].device, default_device)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations
.mirrored_strategy_with_two_gpus_no_merge_call,
strategy_combinations.tpu_strategy,
strategy_combinations.tpu_strategy_packed_var,
strategy_combinations.central_storage_strategy_with_two_gpus,
] + strategy_combinations.multiworker_strategies,
mode=["eager"],
op_type=[constant_op.constant, array_ops.identity]))
def testMakeDistributedValueExplicitDevicePlacement(self, distribution,
op_type):
if not tf2.enabled():
self.skipTest("Only V2 is supported.")
worker_devices = distribution.extended.worker_devices
def value_fn(ctx):
# In multi client setup, worker_devices is just the devices on that
# worker.
worker_device_id = ctx.replica_id_in_sync_group % len(worker_devices)
with ops.device(worker_devices[worker_device_id]):
return op_type(1.0)
distributed_values = (
distribution.experimental_distribute_values_from_function(value_fn))
for i in range(len(distribution.extended.worker_devices)):
self.assertAllEqual(distributed_values._values[i].device,
worker_devices[i])
class PerWorkerResourceTest(test.TestCase, parameterized.TestCase):
@combinations.generate(
combinations.combine(dataset_fn_as_tf_function=[True, False]))
def testMapFnTracing(self, dataset_fn_as_tf_function):
# For a PerWorkerResource to correctly behave when used in dataset.map,
# it has to be that the map_fn is not traced only once such that
# PerWorkerResource.local_table can return the correct resource. This test
# can detect the potential breakage of this behavior on TAP.
self._traced_once = 0
def map_fn(x):
self._traced_once += 1
return x
def dataset_fn():
dataset = dataset_ops.DatasetV2.from_tensors([0, 1, 2]).repeat().batch(
2, drop_remainder=True)
dataset = dataset.map(map_fn)
return dataset
datasets = []
number_of_input_pipelines = 5
if dataset_fn_as_tf_function:
dataset_fn = def_function.function(dataset_fn)
expected_tracing_times = 1
else:
expected_tracing_times = number_of_input_pipelines
for _ in range(number_of_input_pipelines):
datasets.append(dataset_fn())
self.assertEqual(self._traced_once, expected_tracing_times)
class DistributedDelegateTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testGetAttr(self):
class Foo(object):
def __init__(self, x):
self.x = x
v = values_lib.DistributedDelegate((Foo(7), Foo(8)))
self.assertEqual(7, v.x)
with self.assertRaises(AttributeError):
_ = v.y
@test_util.run_in_graph_and_eager_modes
def testOperatorOverride(self):
v = values_lib.DistributedDelegate((7, 8))
# v should act like int(7).
self.assertEqual(8, v + 1)
self.assertEqual(10, 3 + v)
self.assertEqual(14, v + v)
self.assertEqual(5, v - 2)
self.assertEqual(6, 13 - v)
self.assertEqual(0, v - v)
self.assertEqual(14, v * 2)
self.assertEqual(21, 3 * v)
self.assertEqual(49, v * v)
self.assertEqual(3.5, v / 2)
self.assertEqual(1.5, 10.5 / v)
self.assertEqual(3, v // 2)
self.assertEqual(2, 15 // v)
self.assertEqual(1, v % 2)
self.assertEqual(2, 16 % v)
# pylint: disable=g-generic-assert
self.assertTrue(v < 12)
self.assertTrue(v <= 12)
self.assertFalse(v > 12)
self.assertFalse(v >= 12)
self.assertFalse(12 < v)
self.assertFalse(12 <= v)
self.assertTrue(12 > v)
self.assertTrue(12 >= v)
# pylint: enable=g-generic-assert
self.assertEqual(3, v & 3)
self.assertEqual(3, 11 & v)
self.assertEqual(15, v | 8)
self.assertEqual(23, 16 | v)
self.assertEqual(4, v ^ 3)
self.assertEqual(12, 11 ^ v)
self.assertEqual(343, pow(v, 3))
self.assertEqual(3, pow(v, 3, 10))
self.assertEqual(128, pow(2, v))
self.assertEqual(-7, -v)
self.assertEqual(~7, ~v)
self.assertEqual(7, abs(v))
with self.assertRaises(TypeError):
_ = v[2]
@test_util.run_in_graph_and_eager_modes
def testCopy(self):
class Foo(object):
def __init__(self, x):
self.x = x
v = values_lib.DistributedDelegate((Foo(7), Foo(8)))
v_shallow_copy = copy.copy(v)
self.assertEqual(v.x, v_shallow_copy.x)
v_deep_copy = copy.deepcopy(v)
self.assertEqual(v.x, v_deep_copy.x)
_TPU_STRATEGIES = (tpu_strategy.TPUStrategy, tpu_strategy.TPUStrategyV1)
def _make_replica_local(method, strategy=None):
if strategy is None:
devices = ("/device:GPU:0", "/device:CPU:0")
else:
devices = strategy.extended.worker_devices
v = []
for d, n, init in zip(devices, ["v", "v/replica"], [1., 2.]):
with ops.device(d):
v.append(variable_scope.get_variable(
name=n, initializer=init, use_resource=True))
if (strategy is not None) and isinstance(strategy, _TPU_STRATEGIES):
var_cls = tpu_values.TPUSyncOnReadVariable
else:
var_cls = values_lib.SyncOnReadVariable
replica_local = var_cls(strategy, v, method)
return v, replica_local
class SyncOnReadVariableTest(test.TestCase, parameterized.TestCase):
def _assign_replica_local(self, v, new):
for var, n in zip(v, new):
with ops.device(var.device):
self.evaluate(var.assign(n))
def _save_return_saver(self, sess, var):
saver = saver_lib.Saver(var_list=[var])
test_dir = self.get_temp_dir()
prefix = os.path.join(test_dir, "ckpt")
return saver.save(sess, prefix), saver
def _save(self, sess, var):
save_path, _ = self._save_return_saver(sess, var)
return save_path
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
@test_util.run_in_graph_and_eager_modes(config=config)
def testProperties(self):
if context.num_gpus() < 1 and context.executing_eagerly():
self.skipTest("A GPU is not available for this test in eager mode.")
v, replica_local = _make_replica_local(
variable_scope.VariableAggregation.SUM)
self.assertEqual(v[0].constraint, replica_local.constraint)
self.assertEqual(v[0].name, replica_local.name)
self.assertEqual(v[0].dtype, replica_local.dtype)
self.assertEqual(v[0].shape, replica_local.shape)
self.assertEqual(variable_scope.VariableAggregation.SUM,
replica_local.aggregation)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu
],
mode=["eager"]))
def testCanPassToDefFun(self, distribution):
@def_function.function
def add1(x):
return x + 1.
with distribution.scope():
v = variables_lib.Variable(
1.,
aggregation=variables_lib.VariableAggregation.MEAN,
synchronization=variables_lib.VariableSynchronization.ON_READ)
self.assertEqual(2., self.evaluate(add1(v)))
@combinations.generate(mirrored_and_tpu_strategy_combinations())
def testTensorConversion(self, distribution):
with context.graph_mode():
_, replica_local = _make_replica_local(
variable_scope.VariableAggregation.SUM, distribution)
converted = ops.convert_to_tensor(replica_local, as_ref=False)
self.assertIsInstance(converted, ops.Tensor)
self.assertEqual(converted.dtype, replica_local.dtype)
converted = ops.convert_to_tensor(replica_local, as_ref=True)
# Resources variable are converted to tensors as well when as_ref is True.
self.assertIsInstance(converted, ops.Tensor)
self.assertEqual(converted.dtype, replica_local.dtype)
@combinations.generate(combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.mirrored_strategy_with_two_gpus_no_merge_call,
strategy_combinations.tpu_strategy,
strategy_combinations.tpu_strategy_packed_var,
], mode=["eager"]))
def testValueInCrossReplicaContext(self, distribution):
value_list, replica_local = _make_replica_local(
variable_scope.VariableAggregation.ONLY_FIRST_REPLICA, distribution)
self.assertIsInstance(replica_local.value(), ops.Tensor)
self.assertEqual(self.evaluate(replica_local.value()),
self.evaluate(value_list[0].value()))
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.tpu_strategy_packed_var,
],
mode=["eager"]))
def testValueInDefaultReplicaContext(self, distribution):
with distribution.scope():
v1 = variables_lib.Variable(
0.0,
aggregation=variables_lib.VariableAggregation.SUM,
synchronization=variables_lib.VariableSynchronization.ON_READ)
v2 = variables_lib.Variable(
0.0,
aggregation=variables_lib.VariableAggregation.SUM,
synchronization=variables_lib.VariableSynchronization.ON_READ)
@def_function.function
def replica_fn():
v1.assign_add(1.0)
v2.assign_add(2.0)
distribution.run(replica_fn)
sum_v = v1 + v2
self.assertEqual(sum_v, 6.0)
@combinations.generate(mirrored_and_tpu_strategy_combinations())
def testSaveAndRestoreReplicaLocalSumOneGraph(self, distribution):
with self.cached_session() as sess:
v, replica_local = _make_replica_local(
variable_scope.VariableAggregation.SUM, distribution)
# Overwrite the initial values.
self._assign_replica_local(v, [3., 4.])
with distribution.scope():
# Saves the current value of v[0] + v[1], 7.
save_path, saver = self._save_return_saver(sess, replica_local)
# Change the values between save and restore.
self._assign_replica_local(v, [5., 6.])
# Restores the saved value of 7. which gets divided equally
# between the variables.
saver.restore(sess, save_path)
self.assertEqual([3.5, 3.5], self.evaluate([v[0], v[1]]))
@combinations.generate(mirrored_and_tpu_strategy_combinations())
def testSaveAndRestoreReplicaLocalMeanOneGraph(self, distribution):
if context.num_gpus() < 1 and context.executing_eagerly():
self.skipTest("A GPU is not available for this test in eager mode.")
with self.cached_session() as sess:
v, replica_local = _make_replica_local(
variable_scope.VariableAggregation.MEAN, distribution)
# Overwrite the initial values.
self._assign_replica_local(v, [3., 4.])
with distribution.scope():
# Saves the current value of (v[0] + v[1])/2, 3.5.
save_path, saver = self._save_return_saver(sess, replica_local)
# Change the values between save and restore.
self._assign_replica_local(v, [5., 6.])
# Restores the saved value of 3.5 to both variables.
saver.restore(sess, save_path)
self.assertEqual([3.5, 3.5], self.evaluate([v[0], v[1]]))
def _save_replica_local_mean(self, distribution):
"""Save variables with mirroring, returns save_path."""
with self.session(graph=ops.Graph()) as sess:
v, replica_local = _make_replica_local(
variable_scope.VariableAggregation.MEAN, distribution)
# Overwrite the initial values.
self._assign_replica_local(v, [3., 4.])
with distribution.scope():
# Saves the current value of (v[0] + v[1])/2, 3.5
save_path = self._save(sess, replica_local)
# Change the values between save and restore.
self._assign_replica_local(v, [5., 6.])
return save_path
def _save_replica_local_sum(self, distribution):
"""Save variables with mirroring, returns save_path."""
with self.session(graph=ops.Graph()) as sess:
v, replica_local = _make_replica_local(
variable_scope.VariableAggregation.SUM, distribution)
# Overwrite the initial values.
self._assign_replica_local(v, [1.5, 2.])
with distribution.scope():
# Saves the current value of v[0] + v[1], 3.5
save_path = self._save(sess, replica_local)
# Change the values between save and restore.
self._assign_replica_local(v, [5., 6.])
return save_path
def _save_normal(self):
"""Save variables without mirroring, returns save_path."""
with self.session(graph=ops.Graph()) as sess:
var = variable_scope.get_variable(
name="v", initializer=1., use_resource=True)
# Overwrite the initial value.
self.evaluate(var.assign(3.5))
# Saves the current value of var, 3.5.
save_path = self._save(sess, var)
# Change the values between save and restore.
self.evaluate(var.assign(5.))
return save_path
def _restore_normal(self, save_path):
"""Restore to variables without mirroring in a fresh graph."""
with self.session(graph=ops.Graph()) as sess:
var = variable_scope.get_variable(
name="v", initializer=7., use_resource=True)
# Overwrite the initial value.
self.evaluate(var.assign(8.))
# Restores the saved value of 3.5 to `var`.
saver = saver_lib.Saver(var_list=[var])
saver.restore(sess, save_path)
self.assertEqual(3.5, self.evaluate(var))
def _restore_replica_local_mean(self, save_path, distribution):
"""Restore to variables with mirroring in a fresh graph."""
with self.session(graph=ops.Graph()) as sess:
v, replica_local = _make_replica_local(
variable_scope.VariableAggregation.MEAN, distribution)
# Overwrite the initial values.
self._assign_replica_local(v, [7., 8.])
with distribution.scope():
# Restores the saved value of 3.5 to both variables.
saver = saver_lib.Saver(var_list=[replica_local])
saver.restore(sess, save_path)
self.assertEqual([3.5, 3.5], self.evaluate([v[0], v[1]]))
def _restore_replica_local_sum(self, save_path, distribution):
"""Restore to variables with mirroring in a fresh graph."""
with self.session(graph=ops.Graph()) as sess:
v, replica_local = _make_replica_local(
variable_scope.VariableAggregation.SUM, distribution)
# Overwrite the initial values.
self._assign_replica_local(v, [7., 8.])
with distribution.scope():
# Restores the saved value of 3.5 to both variables.
saver = saver_lib.Saver(var_list=[replica_local])
saver.restore(sess, save_path)
self.assertEqual([1.75, 1.75], self.evaluate([v[0], v[1]]))
@combinations.generate(mirrored_and_tpu_strategy_combinations())
def testSaveReplicaLocalRestoreReplicaLocalMean(self, distribution):
save_path = self._save_replica_local_mean(distribution)
self._restore_replica_local_mean(save_path, distribution)
@combinations.generate(mirrored_and_tpu_strategy_combinations())
def testSaveReplicaLocalRestoreReplicaLocalSum(self, distribution):
save_path = self._save_replica_local_sum(distribution)
self._restore_replica_local_sum(save_path, distribution)
@combinations.generate(mirrored_and_tpu_strategy_combinations())
def testSaveReplicaLocalMeanRestoreNormal(self, distribution):
save_path = self._save_replica_local_mean(distribution)
self._restore_normal(save_path)
@combinations.generate(mirrored_and_tpu_strategy_combinations())
def testSaveReplicaLocalSumRestoreNormal(self, distribution):
save_path = self._save_replica_local_sum(distribution)
self._restore_normal(save_path)
@combinations.generate(mirrored_and_tpu_strategy_combinations())
def testSaveNormalRestoreReplicaLocalMean(self, distribution):
save_path = self._save_normal()
self._restore_replica_local_mean(save_path, distribution)
@combinations.generate(mirrored_and_tpu_strategy_combinations())
def testSaveNormalRestoreReplicaLocalSum(self, distribution):
save_path = self._save_normal()
self._restore_replica_local_sum(save_path, distribution)
if __name__ == "__main__":
ds_test_util.main()
|
|
data = (
'jjyim', # 0x00
'jjyib', # 0x01
'jjyibs', # 0x02
'jjyis', # 0x03
'jjyiss', # 0x04
'jjying', # 0x05
'jjyij', # 0x06
'jjyic', # 0x07
'jjyik', # 0x08
'jjyit', # 0x09
'jjyip', # 0x0a
'jjyih', # 0x0b
'jji', # 0x0c
'jjig', # 0x0d
'jjigg', # 0x0e
'jjigs', # 0x0f
'jjin', # 0x10
'jjinj', # 0x11
'jjinh', # 0x12
'jjid', # 0x13
'jjil', # 0x14
'jjilg', # 0x15
'jjilm', # 0x16
'jjilb', # 0x17
'jjils', # 0x18
'jjilt', # 0x19
'jjilp', # 0x1a
'jjilh', # 0x1b
'jjim', # 0x1c
'jjib', # 0x1d
'jjibs', # 0x1e
'jjis', # 0x1f
'jjiss', # 0x20
'jjing', # 0x21
'jjij', # 0x22
'jjic', # 0x23
'jjik', # 0x24
'jjit', # 0x25
'jjip', # 0x26
'jjih', # 0x27
'ca', # 0x28
'cag', # 0x29
'cagg', # 0x2a
'cags', # 0x2b
'can', # 0x2c
'canj', # 0x2d
'canh', # 0x2e
'cad', # 0x2f
'cal', # 0x30
'calg', # 0x31
'calm', # 0x32
'calb', # 0x33
'cals', # 0x34
'calt', # 0x35
'calp', # 0x36
'calh', # 0x37
'cam', # 0x38
'cab', # 0x39
'cabs', # 0x3a
'cas', # 0x3b
'cass', # 0x3c
'cang', # 0x3d
'caj', # 0x3e
'cac', # 0x3f
'cak', # 0x40
'cat', # 0x41
'cap', # 0x42
'cah', # 0x43
'cae', # 0x44
'caeg', # 0x45
'caegg', # 0x46
'caegs', # 0x47
'caen', # 0x48
'caenj', # 0x49
'caenh', # 0x4a
'caed', # 0x4b
'cael', # 0x4c
'caelg', # 0x4d
'caelm', # 0x4e
'caelb', # 0x4f
'caels', # 0x50
'caelt', # 0x51
'caelp', # 0x52
'caelh', # 0x53
'caem', # 0x54
'caeb', # 0x55
'caebs', # 0x56
'caes', # 0x57
'caess', # 0x58
'caeng', # 0x59
'caej', # 0x5a
'caec', # 0x5b
'caek', # 0x5c
'caet', # 0x5d
'caep', # 0x5e
'caeh', # 0x5f
'cya', # 0x60
'cyag', # 0x61
'cyagg', # 0x62
'cyags', # 0x63
'cyan', # 0x64
'cyanj', # 0x65
'cyanh', # 0x66
'cyad', # 0x67
'cyal', # 0x68
'cyalg', # 0x69
'cyalm', # 0x6a
'cyalb', # 0x6b
'cyals', # 0x6c
'cyalt', # 0x6d
'cyalp', # 0x6e
'cyalh', # 0x6f
'cyam', # 0x70
'cyab', # 0x71
'cyabs', # 0x72
'cyas', # 0x73
'cyass', # 0x74
'cyang', # 0x75
'cyaj', # 0x76
'cyac', # 0x77
'cyak', # 0x78
'cyat', # 0x79
'cyap', # 0x7a
'cyah', # 0x7b
'cyae', # 0x7c
'cyaeg', # 0x7d
'cyaegg', # 0x7e
'cyaegs', # 0x7f
'cyaen', # 0x80
'cyaenj', # 0x81
'cyaenh', # 0x82
'cyaed', # 0x83
'cyael', # 0x84
'cyaelg', # 0x85
'cyaelm', # 0x86
'cyaelb', # 0x87
'cyaels', # 0x88
'cyaelt', # 0x89
'cyaelp', # 0x8a
'cyaelh', # 0x8b
'cyaem', # 0x8c
'cyaeb', # 0x8d
'cyaebs', # 0x8e
'cyaes', # 0x8f
'cyaess', # 0x90
'cyaeng', # 0x91
'cyaej', # 0x92
'cyaec', # 0x93
'cyaek', # 0x94
'cyaet', # 0x95
'cyaep', # 0x96
'cyaeh', # 0x97
'ceo', # 0x98
'ceog', # 0x99
'ceogg', # 0x9a
'ceogs', # 0x9b
'ceon', # 0x9c
'ceonj', # 0x9d
'ceonh', # 0x9e
'ceod', # 0x9f
'ceol', # 0xa0
'ceolg', # 0xa1
'ceolm', # 0xa2
'ceolb', # 0xa3
'ceols', # 0xa4
'ceolt', # 0xa5
'ceolp', # 0xa6
'ceolh', # 0xa7
'ceom', # 0xa8
'ceob', # 0xa9
'ceobs', # 0xaa
'ceos', # 0xab
'ceoss', # 0xac
'ceong', # 0xad
'ceoj', # 0xae
'ceoc', # 0xaf
'ceok', # 0xb0
'ceot', # 0xb1
'ceop', # 0xb2
'ceoh', # 0xb3
'ce', # 0xb4
'ceg', # 0xb5
'cegg', # 0xb6
'cegs', # 0xb7
'cen', # 0xb8
'cenj', # 0xb9
'cenh', # 0xba
'ced', # 0xbb
'cel', # 0xbc
'celg', # 0xbd
'celm', # 0xbe
'celb', # 0xbf
'cels', # 0xc0
'celt', # 0xc1
'celp', # 0xc2
'celh', # 0xc3
'cem', # 0xc4
'ceb', # 0xc5
'cebs', # 0xc6
'ces', # 0xc7
'cess', # 0xc8
'ceng', # 0xc9
'cej', # 0xca
'cec', # 0xcb
'cek', # 0xcc
'cet', # 0xcd
'cep', # 0xce
'ceh', # 0xcf
'cyeo', # 0xd0
'cyeog', # 0xd1
'cyeogg', # 0xd2
'cyeogs', # 0xd3
'cyeon', # 0xd4
'cyeonj', # 0xd5
'cyeonh', # 0xd6
'cyeod', # 0xd7
'cyeol', # 0xd8
'cyeolg', # 0xd9
'cyeolm', # 0xda
'cyeolb', # 0xdb
'cyeols', # 0xdc
'cyeolt', # 0xdd
'cyeolp', # 0xde
'cyeolh', # 0xdf
'cyeom', # 0xe0
'cyeob', # 0xe1
'cyeobs', # 0xe2
'cyeos', # 0xe3
'cyeoss', # 0xe4
'cyeong', # 0xe5
'cyeoj', # 0xe6
'cyeoc', # 0xe7
'cyeok', # 0xe8
'cyeot', # 0xe9
'cyeop', # 0xea
'cyeoh', # 0xeb
'cye', # 0xec
'cyeg', # 0xed
'cyegg', # 0xee
'cyegs', # 0xef
'cyen', # 0xf0
'cyenj', # 0xf1
'cyenh', # 0xf2
'cyed', # 0xf3
'cyel', # 0xf4
'cyelg', # 0xf5
'cyelm', # 0xf6
'cyelb', # 0xf7
'cyels', # 0xf8
'cyelt', # 0xf9
'cyelp', # 0xfa
'cyelh', # 0xfb
'cyem', # 0xfc
'cyeb', # 0xfd
'cyebs', # 0xfe
'cyes', # 0xff
)
|
|
#!/usr/bin/env python
"""WAL-E is a program to assist in performing PostgreSQL continuous
archiving on S3 or Windows Azure Blob Service (WABS): it handles pushing
and fetching of WAL segments and base backups of the PostgreSQL data directory.
"""
import sys
def gevent_monkey(*args, **kwargs):
import gevent.monkey
gevent.monkey.patch_os()
gevent.monkey.patch_socket(dns=True, aggressive=True)
gevent.monkey.patch_ssl()
gevent.monkey.patch_time()
# Monkey-patch procedures early. If it doesn't work with gevent,
# sadly it cannot be used (easily) in WAL-E.
gevent_monkey()
def ssl_monkey():
import ssl
original = ssl.wrap_socket
def wrap_socket_monkey(*args, **kwargs):
# Set up an OpenSSL cipher string.
#
# Rationale behind each part:
#
# * HIGH: only use the most secure class of ciphers and
# key lengths, generally being 128 bits and larger.
#
# * !aNULL: exclude cipher suites that contain anonymous
# key exchange, making man in the middle attacks much
# more tractable.
#
# * !SSLv2: exclude any SSLv2 cipher suite, as this
# category has security weaknesses. There is only one
# OpenSSL cipher suite that is in the "HIGH" category
# but uses SSLv2 protocols: DES_192_EDE3_CBC_WITH_MD5
# (see s2_lib.c)
#
# Technically redundant given "!3DES", but the intent in
# listing it here is more apparent.
#
# * !RC4: exclude because it's a weak block cipher.
#
# * !3DES: exclude because it's very CPU intensive and
# most peers support another reputable block cipher.
#
# * !MD5: although it doesn't seem use of known flaws in
# MD5 is able to compromise an SSL session, the wide
# deployment of SHA-family functions means the
# compatibility benefits of allowing it are slim to
# none, so disable it until someone produces material
# complaint.
kwargs['ciphers'] = 'HIGH:!aNULL:!SSLv2:!RC4:!3DES:!MD5'
return original(*args, **kwargs)
ssl.wrap_socket = wrap_socket_monkey
ssl_monkey()
import argparse
import logging
import os
import re
import subprocess
import textwrap
import traceback
from wal_e import log_help
from wal_e.exception import UserCritical
from wal_e.exception import UserException
from wal_e import storage
from wal_e.piper import popen_sp
from wal_e.worker.pg import PSQL_BIN, psql_csv_run
from wal_e.pipeline import LZOP_BIN, PV_BIN, GPG_BIN
from wal_e.worker.pg import CONFIG_BIN, PgControlDataParser
log_help.configure(
format='%(name)-12s %(levelname)-8s %(message)s')
logger = log_help.WalELogger('wal_e.main')
def external_program_check(
to_check=frozenset([PSQL_BIN, LZOP_BIN, PV_BIN])):
"""
Validates the existence and basic working-ness of other programs
Implemented because it is easy to get confusing error output when
one does not install a dependency because of the fork-worker model
that is both necessary for throughput and makes more obscure the
cause of failures. This is intended to be a time and frustration
saving measure. This problem has confused The Author in practice
when switching rapidly between machines.
"""
could_not_run = []
error_msgs = []
def psql_err_handler(popen):
assert popen.returncode != 0
error_msgs.append(textwrap.fill(
'Could not get a connection to the database: '
'note that superuser access is required'))
# Bogus error message that is re-caught and re-raised
raise EnvironmentError('INTERNAL: Had problems running psql '
'from external_program_check')
with open(os.devnull, 'wb') as nullf:
for program in to_check:
try:
if program is PSQL_BIN:
psql_csv_run('SELECT 1', error_handler=psql_err_handler)
else:
if program is PV_BIN:
extra_args = ['--quiet']
else:
extra_args = []
proc = popen_sp([program] + extra_args,
stdout=nullf, stderr=nullf,
stdin=subprocess.PIPE)
# Close stdin for processes that default to
# reading from the pipe; the programs WAL-E uses
# of this kind will terminate in this case.
proc.stdin.close()
proc.wait()
except EnvironmentError:
could_not_run.append(program)
if could_not_run:
error_msgs.append(
'Could not run the following programs, are they installed? ' +
', '.join(could_not_run))
if error_msgs:
raise UserException(
'could not run one or more external programs WAL-E depends upon',
'\n'.join(error_msgs))
return None
def extract_segment(text_with_extractable_segment):
from wal_e.storage import BASE_BACKUP_REGEXP
from wal_e.storage.base import SegmentNumber
match = re.match(BASE_BACKUP_REGEXP, text_with_extractable_segment)
if match is None:
return None
else:
groupdict = match.groupdict()
return SegmentNumber(log=groupdict['log'], seg=groupdict['seg'])
def build_parser():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=__doc__)
aws_group = parser.add_mutually_exclusive_group()
aws_group.add_argument('-k', '--aws-access-key-id',
help='public AWS access key. Can also be defined '
'in an environment variable. If both are defined, '
'the one defined in the programs arguments takes '
'precedence.')
aws_group.add_argument('--aws-instance-profile', action='store_true',
help='Use the IAM Instance Profile associated '
'with this instance to authenticate with the S3 '
'API.')
gs_group = parser.add_mutually_exclusive_group()
gs_group.add_argument('--gs-access-key-id',
help='public GS access key. Can also be defined '
'in an environment variable. If both are defined, '
'the one defined in the programs arguments takes '
'precedence.')
parser.add_argument('-a', '--wabs-account-name',
help='Account name of Windows Azure Blob Service '
'account. Can also be defined in an environment'
'variable. If both are defined, the one defined'
'in the programs arguments takes precedence.')
parser.add_argument('--s3-prefix',
help='S3 prefix to run all commands against. '
'Can also be defined via environment variable '
'WALE_S3_PREFIX.')
parser.add_argument('--wabs-prefix',
help='Storage prefix to run all commands against. '
'Can also be defined via environment variable '
'WALE_WABS_PREFIX.')
parser.add_argument('--gs-prefix',
help='Storage prefix to run all commands against. '
'Can also be defined via environment variable '
'WALE_GS_PREFIX.')
parser.add_argument(
'--gpg-key-id',
help='GPG key ID to encrypt to. (Also needed when decrypting.) '
'Can also be defined via environment variable '
'WALE_GPG_KEY_ID')
parser.add_argument(
'--terse', action='store_true',
help='Only log messages as or more severe than a warning.')
subparsers = parser.add_subparsers(title='subcommands',
dest='subcommand')
# Common arguments for backup-fetch and backup-push
backup_fetchpush_parent = argparse.ArgumentParser(add_help=False)
backup_fetchpush_parent.add_argument('PG_CLUSTER_DIRECTORY',
help="Postgres cluster path, "
"such as '/var/lib/database'")
backup_fetchpush_parent.add_argument(
'--pool-size', '-p', type=int, default=4,
help='Set the maximum number of concurrent transfers')
# operator to print the wal-e version
subparsers.add_parser('version', help='print the wal-e version')
# Common arguments for backup-list and backup-fetch
#
# NB: This does not include the --detail options because some
# other commands use backup listing functionality in a way where
# --detail is never required.
backup_list_nodetail_parent = argparse.ArgumentParser(add_help=False)
# Common arguments between wal-push and wal-fetch
wal_fetchpush_parent = argparse.ArgumentParser(add_help=False)
wal_fetchpush_parent.add_argument('WAL_SEGMENT',
help='Path to a WAL segment to upload')
backup_fetch_parser = subparsers.add_parser(
'backup-fetch', help='fetch a hot backup from S3 or WABS',
parents=[backup_fetchpush_parent, backup_list_nodetail_parent])
backup_list_parser = subparsers.add_parser(
'backup-list', parents=[backup_list_nodetail_parent],
help='list backups in S3 or WABS')
backup_push_parser = subparsers.add_parser(
'backup-push', help='pushing a fresh hot backup to S3 or WABS',
parents=[backup_fetchpush_parent])
backup_push_parser.add_argument(
'--cluster-read-rate-limit',
help='Rate limit reading the PostgreSQL cluster directory to a '
'tunable number of bytes per second', dest='rate_limit',
metavar='BYTES_PER_SECOND',
type=int, default=None)
backup_push_parser.add_argument(
'--while-offline',
help=('Backup a Postgres cluster that is in a stopped state '
'(for example, a replica that you stop and restart '
'when taking a backup)'),
dest='while_offline',
action='store_true',
default=False)
# wal-push operator section
wal_push_parser = subparsers.add_parser(
'wal-push', help='push a WAL file to S3 or WABS',
parents=[wal_fetchpush_parent])
wal_push_parser.add_argument(
'--pool-size', '-p', type=int, default=32,
help='Set the maximum number of concurrent transfers')
# backup-fetch operator section
backup_fetch_parser.add_argument('BACKUP_NAME',
help='the name of the backup to fetch')
backup_fetch_parser.add_argument(
'--blind-restore',
help='Restore from backup without verification of tablespace symlinks',
dest='blind_restore',
action='store_true',
default=False)
backup_fetch_parser.add_argument(
'--restore-spec',
help=('Specification for the directory structure of the database '
'restoration (optional, see README for more information).'),
type=str,
default=None)
# backup-list operator section
backup_list_parser.add_argument(
'QUERY', nargs='?', default=None,
help='a string qualifying backups to list')
backup_list_parser.add_argument(
'--detail', default=False, action='store_true',
help='show more detailed information about every backup')
# wal-fetch operator section
wal_fetch_parser = subparsers.add_parser(
'wal-fetch', help='fetch a WAL file from S3 or WABS',
parents=[wal_fetchpush_parent])
wal_fetch_parser.add_argument('WAL_DESTINATION',
help='Path to download the WAL segment to')
wal_fetch_parser.add_argument(
'--prefetch', '-p', type=int, default=8,
help='Set the maximum number of WAL segments to prefetch.')
wal_prefetch_parser = subparsers.add_parser('wal-prefetch',
help='Prefetch WAL')
wal_prefetch_parser.add_argument(
'BASE_DIRECTORY',
help='Contains writable directory to place ".wal-e" directory in.')
wal_prefetch_parser.add_argument('SEGMENT',
help='Segment by name to download.')
# delete subparser section
delete_parser = subparsers.add_parser(
'delete', help='operators to destroy specified data in S3 or WABS')
delete_parser.add_argument('--dry-run', '-n', action='store_true',
help=('Only print what would be deleted, '
'do not actually delete anything'))
delete_parser.add_argument('--confirm', action='store_true',
help=('Actually delete data. '
'By default, a dry run is performed. '
'Overridden by --dry-run.'))
delete_subparsers = delete_parser.add_subparsers(
title='delete subcommands',
description=('All operators that may delete data are contained '
'in this subcommand.'),
dest='delete_subcommand')
# delete 'before' operator
delete_before_parser = delete_subparsers.add_parser(
'before', help=('Delete all backups and WAL segments strictly before '
'the given base backup name or WAL segment number. '
'The passed backup is *not* deleted.'))
delete_before_parser.add_argument(
'BEFORE_SEGMENT_EXCLUSIVE',
help='A WAL segment number or base backup name')
# delete 'retain' operator
delete_retain_parser = delete_subparsers.add_parser(
'retain', help=('Delete backups and WAL segments older than the '
'NUM_TO_RETAIN oldest base backup. This will leave '
'NUM_TO_RETAIN working backups in place.'))
delete_retain_parser.add_argument(
'NUM_TO_RETAIN', type=int,
help='The number of base backups to retain')
# delete old versions operator
delete_subparsers.add_parser(
'old-versions',
help=('Delete all old versions of WAL-E backup files. One probably '
'wants to ensure that they take a new backup with the new '
'format first. '
'This is useful after a WAL-E major release upgrade.'))
# delete *everything* operator
delete_subparsers.add_parser(
'everything',
help=('Delete all data in the current WAL-E context. '
'Typically this is only appropriate when decommissioning an '
'entire WAL-E archive.'))
return parser
def _config_hint_generate(optname, both_env_and_param):
"""Generate HINT language for missing configuration"""
env = optname.replace('-', '_').upper()
if both_env_and_param:
option = '--' + optname.lower()
return ('Pass "{0}" or set the environment variable "{1}".'
.format(option, env))
else:
return 'Set the environment variable {0}.'.format(env)
def s3_explicit_creds(args):
access_key = args.aws_access_key_id or os.getenv('AWS_ACCESS_KEY_ID')
if access_key is None:
raise UserException(
msg='AWS Access Key credential is required but not provided',
hint=(_config_hint_generate('aws-access-key-id', True)))
secret_key = os.getenv('AWS_SECRET_ACCESS_KEY')
if secret_key is None:
raise UserException(
msg='AWS Secret Key credential is required but not provided',
hint=_config_hint_generate('aws-secret-access-key', False))
security_token = os.getenv('AWS_SECURITY_TOKEN')
from wal_e.blobstore import s3
return s3.Credentials(access_key, secret_key, security_token)
def s3_instance_profile(args):
from wal_e.blobstore import s3
assert args.aws_instance_profile
return s3.InstanceProfileCredentials()
def gs_creds(args):
from wal_e.blobstore import gs
if args.gs_instance_metadata:
access_key, secret_key = None, None
else:
access_key = args.gs_access_key_id or os.getenv('GS_ACCESS_KEY_ID')
if access_key is None:
raise UserException(
msg='GS Access Key credential is required but not provided',
hint=(_config_hint_generate('gs-access-key-id', True)))
secret_key = os.getenv('GS_SECRET_ACCESS_KEY')
if secret_key is None:
raise UserException(
msg='GS Secret Key credential is required but not provided',
hint=_config_hint_generate('gs-secret-access-key', False))
return gs.Credentials(access_key, secret_key)
def configure_backup_cxt(args):
# Try to find some WAL-E prefix to store data in.
prefix = (args.s3_prefix or args.wabs_prefix or args.gs_prefix
or os.getenv('WALE_S3_PREFIX') or os.getenv('WALE_WABS_PREFIX')
or os.getenv('WALE_GS_PREFIX') or os.getenv('WALE_SWIFT_PREFIX'))
if prefix is None:
raise UserException(
msg='no storage prefix defined',
hint=(
'Either set one of the --wabs-prefix, --s3-prefix or '
'--gs-prefix options or define one of the WALE_WABS_PREFIX, '
'WALE_S3_PREFIX, WALE_SWIFT_PREFIX or WALE_GS_PREFIX '
'environment variables.'
)
)
store = storage.StorageLayout(prefix)
# GPG can be optionally layered atop of every backend, so a common
# code path suffices.
gpg_key_id = args.gpg_key_id or os.getenv('WALE_GPG_KEY_ID')
if gpg_key_id is not None:
external_program_check([GPG_BIN])
# Enumeration of reading in configuration for all supported
# backend data stores, yielding value adhering to the
# 'operator.Backup' protocol.
if store.is_s3:
if args.aws_instance_profile:
creds = s3_instance_profile(args)
else:
creds = s3_explicit_creds(args)
from wal_e.blobstore import s3
s3.sigv4_check_apply()
from wal_e.operator import s3_operator
return s3_operator.S3Backup(store, creds, gpg_key_id)
elif store.is_wabs:
account_name = args.wabs_account_name or os.getenv('WABS_ACCOUNT_NAME')
if account_name is None:
raise UserException(
msg='WABS account name is undefined',
hint=_config_hint_generate('wabs-account-name', True))
access_key = os.getenv('WABS_ACCESS_KEY')
access_token = os.getenv('WABS_SAS_TOKEN')
if not (access_key or access_token):
raise UserException(
msg='WABS access credentials is required but not provided',
hint=(
'Define one of the WABS_ACCESS_KEY or '
'WABS_SAS_TOKEN environment variables.'
))
from wal_e.blobstore import wabs
from wal_e.operator.wabs_operator import WABSBackup
creds = wabs.Credentials(account_name, access_key, access_token)
return WABSBackup(store, creds, gpg_key_id)
elif store.is_swift:
from wal_e.blobstore import swift
from wal_e.operator.swift_operator import SwiftBackup
creds = swift.Credentials(
os.getenv('SWIFT_AUTHURL'),
os.getenv('SWIFT_USER'),
os.getenv('SWIFT_PASSWORD'),
os.getenv('SWIFT_TENANT'),
os.getenv('SWIFT_REGION'),
os.getenv('SWIFT_ENDPOINT_TYPE', 'publicURL'),
os.getenv('SWIFT_AUTH_VERSION', '2'),
)
return SwiftBackup(store, creds, gpg_key_id)
elif store.is_gs:
from wal_e.operator.gs_operator import GSBackup
return GSBackup(store, gpg_key_id)
else:
raise UserCritical(
msg='no unsupported blob stores should get here',
hint='Report a bug.')
def monkeypatch_tarfile_copyfileobj():
"""Monkey-patch tarfile.copyfileobj to exploit large buffers"""
import tarfile
from wal_e import copyfileobj
tarfile.copyfileobj = copyfileobj.copyfileobj
def render_subcommand(args):
"""Render a subcommand for human-centric viewing"""
if args.subcommand == 'delete':
return 'delete ' + args.delete_subcommand
if args.subcommand in ('wal-prefetch', 'wal-push', 'wal-fetch'):
return None
return args.subcommand
def main():
parser = build_parser()
args = parser.parse_args()
subcommand = args.subcommand
# Adjust logging level if terse output is set.
if args.terse:
log_help.set_level(logging.WARNING)
# Handle version printing specially, because it doesn't need
# credentials.
if subcommand == 'version':
import pkgutil
print(pkgutil.get_data('wal_e', 'VERSION').decode('ascii').strip())
sys.exit(0)
# Print a start-up message right away.
#
# Otherwise, it is hard to tell when and how WAL-E started in logs
# because often emits status output too late.
rendered = render_subcommand(args)
if rendered is not None:
logger.info(msg='starting WAL-E',
detail='The subcommand is "{0}".'.format(rendered))
try:
backup_cxt = configure_backup_cxt(args)
if subcommand == 'backup-fetch':
monkeypatch_tarfile_copyfileobj()
external_program_check([LZOP_BIN])
backup_cxt.database_fetch(
args.PG_CLUSTER_DIRECTORY,
args.BACKUP_NAME,
blind_restore=args.blind_restore,
restore_spec=args.restore_spec,
pool_size=args.pool_size)
elif subcommand == 'backup-list':
backup_cxt.backup_list(query=args.QUERY, detail=args.detail)
elif subcommand == 'backup-push':
monkeypatch_tarfile_copyfileobj()
if args.while_offline:
# we need to query pg_config first for the
# pg_controldata's bin location
external_program_check([CONFIG_BIN])
parser = PgControlDataParser(args.PG_CLUSTER_DIRECTORY)
controldata_bin = parser.controldata_bin()
external_programs = [
LZOP_BIN,
PV_BIN,
controldata_bin]
else:
external_programs = [LZOP_BIN, PSQL_BIN, PV_BIN]
external_program_check(external_programs)
rate_limit = args.rate_limit
while_offline = args.while_offline
backup_cxt.database_backup(
args.PG_CLUSTER_DIRECTORY,
rate_limit=rate_limit,
while_offline=while_offline,
pool_size=args.pool_size)
elif subcommand == 'wal-fetch':
external_program_check([LZOP_BIN])
res = backup_cxt.wal_restore(args.WAL_SEGMENT,
args.WAL_DESTINATION,
args.prefetch)
if not res:
sys.exit(1)
elif subcommand == 'wal-prefetch':
external_program_check([LZOP_BIN])
backup_cxt.wal_prefetch(args.BASE_DIRECTORY, args.SEGMENT)
elif subcommand == 'wal-push':
external_program_check([LZOP_BIN])
backup_cxt.wal_archive(args.WAL_SEGMENT,
concurrency=args.pool_size)
elif subcommand == 'delete':
# Set up pruning precedence, optimizing for *not* deleting data
#
# Canonicalize the passed arguments into the value
# "is_dry_run_really"
if args.dry_run is False and args.confirm is True:
# Actually delete data *only* if there are *no* --dry-runs
# present and --confirm is present.
logger.info(msg='deleting data in the store')
is_dry_run_really = False
else:
logger.info(msg='performing dry run of data deletion')
is_dry_run_really = True
# This is not necessary, but "just in case" to find bugs.
def just_error(*args, **kwargs):
assert False, ('About to delete something in '
'dry-run mode. Please report a bug.')
# Handle the subcommands and route them to the right
# implementations.
if args.delete_subcommand == 'old-versions':
backup_cxt.delete_old_versions(is_dry_run_really)
elif args.delete_subcommand == 'everything':
backup_cxt.delete_all(is_dry_run_really)
elif args.delete_subcommand == 'retain':
backup_cxt.delete_with_retention(is_dry_run_really,
args.NUM_TO_RETAIN)
elif args.delete_subcommand == 'before':
segment_info = extract_segment(args.BEFORE_SEGMENT_EXCLUSIVE)
assert segment_info is not None
backup_cxt.delete_before(is_dry_run_really, segment_info)
else:
assert False, 'Should be rejected by argument parsing.'
else:
logger.error(msg='subcommand not implemented',
detail=('The submitted subcommand was {0}.'
.format(subcommand)),
hint='Check for typos or consult wal-e --help.')
sys.exit(127)
# Report on all encountered exceptions, and raise the last one
# to take advantage of the final catch-all reporting and exit
# code management.
if backup_cxt.exceptions:
for exc in backup_cxt.exceptions[:-1]:
if isinstance(exc, UserException):
logger.log(level=exc.severity,
msg=exc.msg, detail=exc.detail, hint=exc.hint)
else:
logger.error(msg=exc)
raise backup_cxt.exceptions[-1]
except UserException as e:
logger.log(level=e.severity,
msg=e.msg, detail=e.detail, hint=e.hint)
sys.exit(1)
except Exception as e:
logger.critical(
msg='An unprocessed exception has avoided all error handling',
detail=''.join(traceback.format_exception(*sys.exc_info())))
sys.exit(2)
|
|
#!/usr/bin/python -u
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from hashlib import md5
import sys
import itertools
import time
import unittest
import uuid
from optparse import OptionParser
from urlparse import urlparse
import random
from nose import SkipTest
from swift.common.manager import Manager
from swift.common.internal_client import InternalClient
from swift.common import utils, direct_client, ring
from swift.common.storage_policy import POLICIES
from swift.common.http import HTTP_NOT_FOUND
from test.probe.common import reset_environment, get_to_final_state
from swiftclient import client, get_auth, ClientException
TIMEOUT = 60
def meta_command(name, bases, attrs):
"""
Look for attrs with a truthy attribute __command__ and add them to an
attribute __commands__ on the type that maps names to decorated methods.
The decorated methods' doc strings also get mapped in __docs__.
Also adds a method run(command_name, *args, **kwargs) that will
execute the method mapped to the name in __commands__.
"""
commands = {}
docs = {}
for attr, value in attrs.items():
if getattr(value, '__command__', False):
commands[attr] = value
# methods have always have a __doc__ attribute, sometimes empty
docs[attr] = (getattr(value, '__doc__', None) or
'perform the %s command' % attr).strip()
attrs['__commands__'] = commands
attrs['__docs__'] = docs
def run(self, command, *args, **kwargs):
return self.__commands__[command](self, *args, **kwargs)
attrs.setdefault('run', run)
return type(name, bases, attrs)
def command(f):
f.__command__ = True
return f
class BrainSplitter(object):
__metaclass__ = meta_command
def __init__(self, url, token, container_name='test', object_name='test'):
self.url = url
self.token = token
self.account = utils.split_path(urlparse(url).path, 2, 2)[1]
self.container_name = container_name
self.object_name = object_name
self.servers = Manager(['container-server'])
policies = list(POLICIES)
random.shuffle(policies)
self.policies = itertools.cycle(policies)
container_part, container_nodes = ring.Ring(
'/etc/swift/container.ring.gz').get_nodes(
self.account, self.container_name)
container_node_ids = [n['id'] for n in container_nodes]
if all(n_id in container_node_ids for n_id in (0, 1)):
self.primary_numbers = (1, 2)
self.handoff_numbers = (3, 4)
else:
self.primary_numbers = (3, 4)
self.handoff_numbers = (1, 2)
@command
def start_primary_half(self):
"""
start container servers 1 & 2
"""
tuple(self.servers.start(number=n) for n in self.primary_numbers)
@command
def stop_primary_half(self):
"""
stop container servers 1 & 2
"""
tuple(self.servers.stop(number=n) for n in self.primary_numbers)
@command
def start_handoff_half(self):
"""
start container servers 3 & 4
"""
tuple(self.servers.start(number=n) for n in self.handoff_numbers)
@command
def stop_handoff_half(self):
"""
stop container servers 3 & 4
"""
tuple(self.servers.stop(number=n) for n in self.handoff_numbers)
@command
def put_container(self, policy_index=None):
"""
put container with next storage policy
"""
policy = self.policies.next()
if policy_index is not None:
policy = POLICIES.get_by_index(int(policy_index))
if not policy:
raise ValueError('Unknown policy with index %s' % policy)
headers = {'X-Storage-Policy': policy.name}
client.put_container(self.url, self.token, self.container_name,
headers=headers)
@command
def delete_container(self):
"""
delete container
"""
client.delete_container(self.url, self.token, self.container_name)
@command
def put_object(self, headers=None):
"""
issue put for zero byte test object
"""
client.put_object(self.url, self.token, self.container_name,
self.object_name, headers=headers)
@command
def delete_object(self):
"""
issue delete for test object
"""
try:
client.delete_object(self.url, self.token, self.container_name,
self.object_name)
except ClientException as err:
if err.http_status != HTTP_NOT_FOUND:
raise
parser = OptionParser('%prog split-brain [options] '
'<command>[:<args>[,<args>...]] [<command>...]')
parser.usage += '\n\nCommands:\n\t' + \
'\n\t'.join("%s - %s" % (name, doc) for name, doc in
BrainSplitter.__docs__.items())
parser.add_option('-c', '--container', default='container-%s' % uuid.uuid4(),
help='set container name')
parser.add_option('-o', '--object', default='object-%s' % uuid.uuid4(),
help='set object name')
class TestContainerMergePolicyIndex(unittest.TestCase):
def setUp(self):
if len(POLICIES) < 2:
raise SkipTest()
(self.pids, self.port2server, self.account_ring, self.container_ring,
self.object_ring, self.policy, self.url, self.token,
self.account, self.configs) = reset_environment()
self.container_name = 'container-%s' % uuid.uuid4()
self.object_name = 'object-%s' % uuid.uuid4()
self.brain = BrainSplitter(self.url, self.token, self.container_name,
self.object_name)
def test_merge_storage_policy_index(self):
# generic split brain
self.brain.stop_primary_half()
self.brain.put_container()
self.brain.start_primary_half()
self.brain.stop_handoff_half()
self.brain.put_container()
self.brain.put_object()
self.brain.start_handoff_half()
# make sure we have some manner of split brain
container_part, container_nodes = self.container_ring.get_nodes(
self.account, self.container_name)
head_responses = []
for node in container_nodes:
metadata = direct_client.direct_head_container(
node, container_part, self.account, self.container_name)
head_responses.append((node, metadata))
found_policy_indexes = \
set(metadata['X-Backend-Storage-Policy-Index'] for
node, metadata in head_responses)
self.assert_(len(found_policy_indexes) > 1,
'primary nodes did not disagree about policy index %r' %
head_responses)
# find our object
orig_policy_index = None
for policy_index in found_policy_indexes:
object_ring = POLICIES.get_object_ring(policy_index, '/etc/swift')
part, nodes = object_ring.get_nodes(
self.account, self.container_name, self.object_name)
for node in nodes:
try:
direct_client.direct_head_object(
node, part, self.account, self.container_name,
self.object_name,
headers={'X-Backend-Storage-Policy-Index':
policy_index})
except direct_client.ClientException as err:
continue
orig_policy_index = policy_index
break
if orig_policy_index is not None:
break
else:
self.fail('Unable to find /%s/%s/%s in %r' % (
self.account, self.container_name, self.object_name,
found_policy_indexes))
get_to_final_state()
Manager(['container-reconciler']).once()
# validate containers
head_responses = []
for node in container_nodes:
metadata = direct_client.direct_head_container(
node, container_part, self.account, self.container_name)
head_responses.append((node, metadata))
found_policy_indexes = \
set(metadata['X-Backend-Storage-Policy-Index'] for
node, metadata in head_responses)
self.assert_(len(found_policy_indexes) == 1,
'primary nodes disagree about policy index %r' %
head_responses)
expected_policy_index = found_policy_indexes.pop()
self.assertNotEqual(orig_policy_index, expected_policy_index)
# validate object placement
orig_policy_ring = POLICIES.get_object_ring(orig_policy_index,
'/etc/swift')
for node in orig_policy_ring.devs:
try:
direct_client.direct_head_object(
node, part, self.account, self.container_name,
self.object_name, headers={
'X-Backend-Storage-Policy-Index': orig_policy_index})
except direct_client.ClientException as err:
if err.http_status == HTTP_NOT_FOUND:
continue
raise
else:
self.fail('Found /%s/%s/%s in %s' % (
self.account, self.container_name, self.object_name,
orig_policy_index))
# use proxy to access object (bad container info might be cached...)
timeout = time.time() + TIMEOUT
while time.time() < timeout:
try:
metadata = client.head_object(self.url, self.token,
self.container_name,
self.object_name)
except ClientException as err:
if err.http_status != HTTP_NOT_FOUND:
raise
time.sleep(1)
else:
break
else:
self.fail('could not HEAD /%s/%s/%s/ from policy %s '
'after %s seconds.' % (
self.account, self.container_name, self.object_name,
expected_policy_index, TIMEOUT))
def test_reconcile_delete(self):
# generic split brain
self.brain.stop_primary_half()
self.brain.put_container()
self.brain.put_object()
self.brain.start_primary_half()
self.brain.stop_handoff_half()
self.brain.put_container()
self.brain.delete_object()
self.brain.start_handoff_half()
# make sure we have some manner of split brain
container_part, container_nodes = self.container_ring.get_nodes(
self.account, self.container_name)
head_responses = []
for node in container_nodes:
metadata = direct_client.direct_head_container(
node, container_part, self.account, self.container_name)
head_responses.append((node, metadata))
found_policy_indexes = \
set(metadata['X-Backend-Storage-Policy-Index'] for
node, metadata in head_responses)
self.assert_(len(found_policy_indexes) > 1,
'primary nodes did not disagree about policy index %r' %
head_responses)
# find our object
orig_policy_index = ts_policy_index = None
for policy_index in found_policy_indexes:
object_ring = POLICIES.get_object_ring(policy_index, '/etc/swift')
part, nodes = object_ring.get_nodes(
self.account, self.container_name, self.object_name)
for node in nodes:
try:
direct_client.direct_head_object(
node, part, self.account, self.container_name,
self.object_name,
headers={'X-Backend-Storage-Policy-Index':
policy_index})
except direct_client.ClientException as err:
if 'x-backend-timestamp' in err.http_headers:
ts_policy_index = policy_index
break
else:
orig_policy_index = policy_index
break
if not orig_policy_index:
self.fail('Unable to find /%s/%s/%s in %r' % (
self.account, self.container_name, self.object_name,
found_policy_indexes))
if not ts_policy_index:
self.fail('Unable to find tombstone /%s/%s/%s in %r' % (
self.account, self.container_name, self.object_name,
found_policy_indexes))
get_to_final_state()
Manager(['container-reconciler']).once()
# validate containers
head_responses = []
for node in container_nodes:
metadata = direct_client.direct_head_container(
node, container_part, self.account, self.container_name)
head_responses.append((node, metadata))
new_found_policy_indexes = \
set(metadata['X-Backend-Storage-Policy-Index'] for node,
metadata in head_responses)
self.assert_(len(new_found_policy_indexes) == 1,
'primary nodes disagree about policy index %r' %
dict((node['port'],
metadata['X-Backend-Storage-Policy-Index'])
for node, metadata in head_responses))
expected_policy_index = new_found_policy_indexes.pop()
self.assertEqual(orig_policy_index, expected_policy_index)
# validate object fully deleted
for policy_index in found_policy_indexes:
object_ring = POLICIES.get_object_ring(policy_index, '/etc/swift')
part, nodes = object_ring.get_nodes(
self.account, self.container_name, self.object_name)
for node in nodes:
try:
direct_client.direct_head_object(
node, part, self.account, self.container_name,
self.object_name,
headers={'X-Backend-Storage-Policy-Index':
policy_index})
except direct_client.ClientException as err:
if err.http_status == HTTP_NOT_FOUND:
continue
else:
self.fail('Found /%s/%s/%s in %s on %s' % (
self.account, self.container_name, self.object_name,
orig_policy_index, node))
def test_reconcile_manifest(self):
manifest_data = []
def write_part(i):
body = 'VERIFY%0.2d' % i + '\x00' * 1048576
part_name = 'manifest_part_%0.2d' % i
manifest_entry = {
"path": "/%s/%s" % (self.container_name, part_name),
"etag": md5(body).hexdigest(),
"size_bytes": len(body),
}
client.put_object(self.url, self.token, self.container_name,
part_name, contents=body)
manifest_data.append(manifest_entry)
# get an old container stashed
self.brain.stop_primary_half()
policy = random.choice(list(POLICIES))
self.brain.put_container(policy.idx)
self.brain.start_primary_half()
# write some parts
for i in range(10):
write_part(i)
self.brain.stop_handoff_half()
wrong_policy = random.choice([p for p in POLICIES if p is not policy])
self.brain.put_container(wrong_policy.idx)
# write some more parts
for i in range(10, 20):
write_part(i)
# write manifest
try:
client.put_object(self.url, self.token, self.container_name,
self.object_name,
contents=utils.json.dumps(manifest_data),
query_string='multipart-manifest=put')
except ClientException as err:
# so as it works out, you can't really upload a multi-part
# manifest for objects that are currently misplaced - you have to
# wait until they're all available - which is about the same as
# some other failure that causes data to be unavailable to the
# proxy at the time of upload
self.assertEqual(err.http_status, 400)
# but what the heck, we'll sneak one in just to see what happens...
direct_manifest_name = self.object_name + '-direct-test'
object_ring = POLICIES.get_object_ring(wrong_policy.idx, '/etc/swift')
part, nodes = object_ring.get_nodes(
self.account, self.container_name, direct_manifest_name)
container_part = self.container_ring.get_part(self.account,
self.container_name)
def translate_direct(data):
return {
'hash': data['etag'],
'bytes': data['size_bytes'],
'name': data['path'],
}
direct_manifest_data = map(translate_direct, manifest_data)
headers = {
'x-container-host': ','.join('%s:%s' % (n['ip'], n['port']) for n
in self.container_ring.devs),
'x-container-device': ','.join(n['device'] for n in
self.container_ring.devs),
'x-container-partition': container_part,
'X-Backend-Storage-Policy-Index': wrong_policy.idx,
'X-Static-Large-Object': 'True',
}
for node in nodes:
direct_client.direct_put_object(
node, part, self.account, self.container_name,
direct_manifest_name,
contents=utils.json.dumps(direct_manifest_data),
headers=headers)
break # one should do it...
self.brain.start_handoff_half()
get_to_final_state()
Manager(['container-reconciler']).once()
# clear proxy cache
client.post_container(self.url, self.token, self.container_name, {})
# let's see how that direct upload worked out...
metadata, body = client.get_object(
self.url, self.token, self.container_name, direct_manifest_name,
query_string='multipart-manifest=get')
self.assertEqual(metadata['x-static-large-object'].lower(), 'true')
for i, entry in enumerate(utils.json.loads(body)):
for key in ('hash', 'bytes', 'name'):
self.assertEquals(entry[key], direct_manifest_data[i][key])
metadata, body = client.get_object(
self.url, self.token, self.container_name, direct_manifest_name)
self.assertEqual(metadata['x-static-large-object'].lower(), 'true')
self.assertEqual(int(metadata['content-length']),
sum(part['size_bytes'] for part in manifest_data))
self.assertEqual(body, ''.join('VERIFY%0.2d' % i + '\x00' * 1048576
for i in range(20)))
# and regular upload should work now too
client.put_object(self.url, self.token, self.container_name,
self.object_name,
contents=utils.json.dumps(manifest_data),
query_string='multipart-manifest=put')
metadata = client.head_object(self.url, self.token,
self.container_name,
self.object_name)
self.assertEqual(int(metadata['content-length']),
sum(part['size_bytes'] for part in manifest_data))
def test_reconciler_move_object_twice(self):
# select some policies
old_policy = random.choice(list(POLICIES))
new_policy = random.choice([p for p in POLICIES if p != old_policy])
# setup a split brain
self.brain.stop_handoff_half()
# get old_policy on two primaries
self.brain.put_container(policy_index=int(old_policy))
self.brain.start_handoff_half()
self.brain.stop_primary_half()
# force a recreate on handoffs
self.brain.put_container(policy_index=int(old_policy))
self.brain.delete_container()
self.brain.put_container(policy_index=int(new_policy))
self.brain.put_object() # populate memcache with new_policy
self.brain.start_primary_half()
# at this point two primaries have old policy
container_part, container_nodes = self.container_ring.get_nodes(
self.account, self.container_name)
head_responses = []
for node in container_nodes:
metadata = direct_client.direct_head_container(
node, container_part, self.account, self.container_name)
head_responses.append((node, metadata))
old_container_node_ids = [
node['id'] for node, metadata in head_responses
if int(old_policy) ==
int(metadata['X-Backend-Storage-Policy-Index'])]
self.assertEqual(2, len(old_container_node_ids))
# hopefully memcache still has the new policy cached
self.brain.put_object()
# double-check object correctly written to new policy
conf_files = []
for server in Manager(['container-reconciler']).servers:
conf_files.extend(server.conf_files())
conf_file = conf_files[0]
client = InternalClient(conf_file, 'probe-test', 3)
client.get_object_metadata(
self.account, self.container_name, self.object_name,
headers={'X-Backend-Storage-Policy-Index': int(new_policy)})
client.get_object_metadata(
self.account, self.container_name, self.object_name,
acceptable_statuses=(4,),
headers={'X-Backend-Storage-Policy-Index': int(old_policy)})
# shutdown the containers that know about the new policy
self.brain.stop_handoff_half()
# and get rows enqueued from old nodes
for server_type in ('container-replicator', 'container-updater'):
server = Manager([server_type])
tuple(server.once(number=n + 1) for n in old_container_node_ids)
# verify entry in the queue for the "misplaced" new_policy
for container in client.iter_containers('.misplaced_objects'):
for obj in client.iter_objects('.misplaced_objects',
container['name']):
expected = '%d:/%s/%s/%s' % (new_policy, self.account,
self.container_name,
self.object_name)
self.assertEqual(obj['name'], expected)
Manager(['container-reconciler']).once()
# verify object in old_policy
client.get_object_metadata(
self.account, self.container_name, self.object_name,
headers={'X-Backend-Storage-Policy-Index': int(old_policy)})
# verify object is *not* in new_policy
client.get_object_metadata(
self.account, self.container_name, self.object_name,
acceptable_statuses=(4,),
headers={'X-Backend-Storage-Policy-Index': int(new_policy)})
get_to_final_state()
# verify entry in the queue
client = InternalClient(conf_file, 'probe-test', 3)
for container in client.iter_containers('.misplaced_objects'):
for obj in client.iter_objects('.misplaced_objects',
container['name']):
expected = '%d:/%s/%s/%s' % (old_policy, self.account,
self.container_name,
self.object_name)
self.assertEqual(obj['name'], expected)
Manager(['container-reconciler']).once()
# and now it flops back
client.get_object_metadata(
self.account, self.container_name, self.object_name,
headers={'X-Backend-Storage-Policy-Index': int(new_policy)})
client.get_object_metadata(
self.account, self.container_name, self.object_name,
acceptable_statuses=(4,),
headers={'X-Backend-Storage-Policy-Index': int(old_policy)})
# make sure the queue is settled
get_to_final_state()
for container in client.iter_containers('.misplaced_objects'):
for obj in client.iter_objects('.misplaced_objects',
container['name']):
self.fail('Found unexpected object %r in the queue' % obj)
def main():
options, commands = parser.parse_args()
commands.remove('split-brain')
if not commands:
parser.print_help()
return 'ERROR: must specify at least one command'
for cmd_args in commands:
cmd = cmd_args.split(':', 1)[0]
if cmd not in BrainSplitter.__commands__:
parser.print_help()
return 'ERROR: unknown command %s' % cmd
url, token = get_auth('http://127.0.0.1:8080/auth/v1.0',
'test:tester', 'testing')
brain = BrainSplitter(url, token, options.container, options.object)
for cmd_args in commands:
parts = cmd_args.split(':', 1)
command = parts[0]
if len(parts) > 1:
args = utils.list_from_csv(parts[1])
else:
args = ()
try:
brain.run(command, *args)
except ClientException as e:
print '**WARNING**: %s raised %s' % (command, e)
print 'STATUS'.join(['*' * 25] * 2)
brain.servers.status()
sys.exit()
if __name__ == "__main__":
if any('split-brain' in arg for arg in sys.argv):
sys.exit(main())
unittest.main()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 VMware, Inc.
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A fake VMware VI API implementation.
"""
import collections
import pprint
import uuid
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.virt.vmwareapi import error_util
_CLASSES = ['Datacenter', 'Datastore', 'ResourcePool', 'VirtualMachine',
'Network', 'HostSystem', 'HostNetworkSystem', 'Task', 'session',
'files', 'ClusterComputeResource']
_FAKE_FILE_SIZE = 1024
_db_content = {}
LOG = logging.getLogger(__name__)
def log_db_contents(msg=None):
"""Log DB Contents."""
LOG.debug(_("%(text)s: _db_content => %(content)s"),
{'text': msg or "", 'content': pprint.pformat(_db_content)})
def reset():
"""Resets the db contents."""
for c in _CLASSES:
# We fake the datastore by keeping the file references as a list of
# names in the db
if c == 'files':
_db_content[c] = []
else:
_db_content[c] = {}
create_network()
create_host_network_system()
create_host()
create_host()
create_datacenter()
create_datastore()
create_res_pool()
create_cluster('test_cluster')
create_cluster('test_cluster2')
def cleanup():
"""Clear the db contents."""
for c in _CLASSES:
_db_content[c] = {}
def _create_object(table, table_obj):
"""Create an object in the db."""
_db_content[table][table_obj.obj] = table_obj
def _get_object(obj_ref):
"""Get object for the give reference."""
return _db_content[obj_ref.type][obj_ref]
def _get_objects(obj_type):
"""Get objects of the type."""
lst_objs = FakeRetrieveResult()
for key in _db_content[obj_type]:
lst_objs.add_object(_db_content[obj_type][key])
return lst_objs
def _convert_to_array_of_mor(mors):
"""Wraps the given array into a DataObject."""
array_of_mors = DataObject()
array_of_mors.ManagedObjectReference = mors
return array_of_mors
class FakeRetrieveResult(object):
"""Object to retrieve a ObjectContent list."""
def __init__(self):
self.objects = []
def add_object(self, object):
self.objects.append(object)
def _get_object_refs(obj_type):
"""Get object References of the type."""
lst_objs = []
for key in _db_content[obj_type]:
lst_objs.append(key)
return lst_objs
def _update_object(table, table_obj):
"""Update objects of the type."""
_db_content[table][table_obj.obj] = table_obj
class Prop(object):
"""Property Object base class."""
def __init__(self, name=None, val=None):
self.name = name
self.val = val
class ManagedObjectReference(object):
"""A managed object reference is a remote identifier."""
def __init__(self, name="ManagedObject", value=None):
super(ManagedObjectReference, self)
# Managed Object Reference value attributes
# typically have values like vm-123 or
# host-232 and not UUID.
self.value = value
# Managed Object Reference type
# attributes hold the name of the type
# of the vCenter object the value
# attribute is the identifier for
self.type = name
self._type = name
class ObjectContent(object):
"""ObjectContent array holds dynamic properties."""
# This class is a *fake* of a class sent back to us by
# SOAP. It has its own names. These names are decided
# for us by the API we are *faking* here.
def __init__(self, obj_ref, prop_list=None, missing_list=None):
self.obj = obj_ref
if not isinstance(prop_list, collections.Iterable):
prop_list = []
if not isinstance(missing_list, collections.Iterable):
missing_list = []
# propSet is the name your Python code will need to
# use since this is the name that the API will use
if prop_list:
self.propSet = prop_list
# missingSet is the name your python code will
# need to use since this is the name that the
# API we are talking to will use.
if missing_list:
self.missingSet = missing_list
class ManagedObject(object):
"""Managed Object base class."""
_counter = 0
def __init__(self, mo_id_prefix="obj"):
"""Sets the obj property which acts as a reference to the object."""
object.__setattr__(self, 'mo_id', self._generate_moid(mo_id_prefix))
object.__setattr__(self, 'propSet', [])
object.__setattr__(self, 'obj',
ManagedObjectReference(self.__class__.__name__,
self.mo_id))
def set(self, attr, val):
"""
Sets an attribute value. Not using the __setattr__ directly for we
want to set attributes of the type 'a.b.c' and using this function
class we set the same.
"""
self.__setattr__(attr, val)
def get(self, attr):
"""
Gets an attribute. Used as an intermediary to get nested
property like 'a.b.c' value.
"""
return self.__getattr__(attr)
def __setattr__(self, attr, val):
# TODO(hartsocks): this is adds unnecessary complexity to the class
for prop in self.propSet:
if prop.name == attr:
prop.val = val
return
elem = Prop()
elem.name = attr
elem.val = val
self.propSet.append(elem)
def __getattr__(self, attr):
# TODO(hartsocks): remove this
# in a real ManagedObject you have to iterate the propSet
# in a real ManagedObject, the propSet is a *set* not a list
for elem in self.propSet:
if elem.name == attr:
return elem.val
msg = _("Property %(attr)s not set for the managed object %(name)s")
raise exception.NovaException(msg % {'attr': attr,
'name': self.__class__.__name__})
def _generate_moid(self, prefix):
"""Generates a new Managed Object ID."""
self.__class__._counter += 1
return prefix + "-" + str(self.__class__._counter)
def __repr__(self):
return jsonutils.dumps(dict([(elem.name, elem.val)
for elem in self.propSet]))
class DataObject(object):
"""Data object base class."""
def __init__(self, obj_name=None):
self.obj_name = obj_name
def __repr__(self):
return str(self.__dict__)
class HostInternetScsiHba():
pass
class VirtualDisk(DataObject):
"""
Virtual Disk class.
"""
def __init__(self):
super(VirtualDisk, self).__init__()
self.key = 0
self.unitNumber = 0
class VirtualDiskFlatVer2BackingInfo(DataObject):
"""VirtualDiskFlatVer2BackingInfo class."""
def __init__(self):
super(VirtualDiskFlatVer2BackingInfo, self).__init__()
self.thinProvisioned = False
self.eagerlyScrub = False
class VirtualDiskRawDiskMappingVer1BackingInfo(DataObject):
"""VirtualDiskRawDiskMappingVer1BackingInfo class."""
def __init__(self):
super(VirtualDiskRawDiskMappingVer1BackingInfo, self).__init__()
self.lunUuid = ""
class VirtualLsiLogicController(DataObject):
"""VirtualLsiLogicController class."""
pass
class VirtualLsiLogicSASController(DataObject):
"""VirtualLsiLogicSASController class."""
pass
class VirtualPCNet32(DataObject):
"""VirtualPCNet32 class."""
def __init__(self):
super(VirtualPCNet32, self).__init__()
self.key = 4000
class VirtualMachine(ManagedObject):
"""Virtual Machine class."""
def __init__(self, **kwargs):
super(VirtualMachine, self).__init__("vm")
self.set("name", kwargs.get("name", 'test-vm'))
self.set("runtime.connectionState",
kwargs.get("conn_state", "connected"))
self.set("summary.config.guestId", kwargs.get("guest", "otherGuest"))
ds_do = kwargs.get("ds", None)
self.set("datastore", _convert_to_array_of_mor(ds_do))
self.set("summary.guest.toolsStatus", kwargs.get("toolsstatus",
"toolsOk"))
self.set("summary.guest.toolsRunningStatus", kwargs.get(
"toolsrunningstate", "guestToolsRunning"))
self.set("runtime.powerState", kwargs.get("powerstate", "poweredOn"))
self.set("config.files.vmPathName", kwargs.get("vmPathName"))
self.set("summary.config.numCpu", kwargs.get("numCpu", 1))
self.set("summary.config.memorySizeMB", kwargs.get("mem", 1))
self.set("config.hardware.device", kwargs.get("virtual_device", None))
self.set("config.extraConfig", kwargs.get("extra_config", None))
self.set('runtime.host', kwargs.get("runtime_host", None))
self.device = kwargs.get("virtual_device")
def reconfig(self, factory, val):
"""
Called to reconfigure the VM. Actually customizes the property
setting of the Virtual Machine object.
"""
try:
if len(val.deviceChange) < 2:
return
# Case of Reconfig of VM to attach disk
controller_key = val.deviceChange[1].device.controllerKey
filename = val.deviceChange[1].device.backing.fileName
disk = VirtualDisk()
disk.controllerKey = controller_key
disk_backing = VirtualDiskFlatVer2BackingInfo()
disk_backing.fileName = filename
disk_backing.key = -101
disk.backing = disk_backing
controller = VirtualLsiLogicController()
controller.key = controller_key
self.set("config.hardware.device", [disk, controller,
self.device[0]])
except AttributeError:
# Case of Reconfig of VM to set extra params
self.set("config.extraConfig", val.extraConfig)
class Network(ManagedObject):
"""Network class."""
def __init__(self):
super(Network, self).__init__("network")
self.set("summary.name", "vmnet0")
class ResourcePool(ManagedObject):
"""Resource Pool class."""
def __init__(self, name="test_ResPool", value="resgroup-test"):
super(ResourcePool, self).__init__("rp")
self.set("name", name)
summary = DataObject()
runtime = DataObject()
config = DataObject()
memory = DataObject()
cpu = DataObject()
memoryAllocation = DataObject()
cpuAllocation = DataObject()
memory.maxUsage = 1000 * 1024 * 1024
memory.overallUsage = 500 * 1024 * 1024
cpu.maxUsage = 10000
cpu.overallUsage = 1000
runtime.cpu = cpu
runtime.memory = memory
summary.runtime = runtime
cpuAllocation.limit = 10000
memoryAllocation.limit = 1024
memoryAllocation.reservation = 1024
config.memoryAllocation = memoryAllocation
config.cpuAllocation = cpuAllocation
self.set("summary", summary)
self.set("summary.runtime.memory", memory)
self.set("config", config)
parent = ManagedObjectReference(value=value,
name=name)
owner = ManagedObjectReference(value=value,
name=name)
self.set("parent", parent)
self.set("owner", owner)
class DatastoreHostMount(DataObject):
def __init__(self, value='host-100'):
super(DatastoreHostMount, self).__init__()
host_ref = (_db_content["HostSystem"]
[_db_content["HostSystem"].keys()[0]].obj)
host_system = DataObject()
host_system.ManagedObjectReference = [host_ref]
host_system.value = value
self.key = host_system
class ClusterComputeResource(ManagedObject):
"""Cluster class."""
def __init__(self, name="test_cluster"):
super(ClusterComputeResource, self).__init__("domain")
self.set("name", name)
self.set("host", None)
self.set("datastore", None)
self.set("resourcePool", None)
summary = DataObject()
summary.numHosts = 0
summary.numCpuCores = 0
summary.numCpuThreads = 0
summary.numEffectiveHosts = 0
summary.totalMemory = 0
summary.effectiveMemory = 0
summary.effectiveCpu = 10000
self.set("summary", summary)
def _add_root_resource_pool(self, r_pool):
if r_pool:
self.set("resourcePool", r_pool)
def _add_host(self, host_sys):
if host_sys:
hosts = self.get("host")
if hosts is None:
hosts = DataObject()
hosts.ManagedObjectReference = []
self.set("host", hosts)
hosts.ManagedObjectReference.append(host_sys)
# Update summary every time a new host is added
self._update_summary()
def _add_datastore(self, datastore):
if datastore:
datastores = self.get("datastore")
if datastores is None:
datastores = DataObject()
datastores.ManagedObjectReference = []
self.set("datastore", datastores)
datastores.ManagedObjectReference.append(datastore)
# Method to update summary of a cluster upon host addition
def _update_summary(self):
summary = self.get("summary")
summary.numHosts = 0
summary.numCpuCores = 0
summary.numCpuThreads = 0
summary.numEffectiveHosts = 0
summary.totalMemory = 0
summary.effectiveMemory = 0
hosts = self.get("host")
# Compute the aggregate stats
summary.numHosts = len(hosts.ManagedObjectReference)
for host_ref in hosts.ManagedObjectReference:
host_sys = _get_object(host_ref)
connected = host_sys.get("connected")
host_summary = host_sys.get("summary")
summary.numCpuCores += host_summary.hardware.numCpuCores
summary.numCpuThreads += host_summary.hardware.numCpuThreads
summary.totalMemory += host_summary.hardware.memorySize
free_memory = (host_summary.hardware.memorySize / (1024 * 1024)
- host_summary.quickStats.overallMemoryUsage)
summary.effectiveMemory += free_memory if connected else 0
summary.numEffectiveHosts += 1 if connected else 0
self.set("summary", summary)
class Datastore(ManagedObject):
"""Datastore class."""
def __init__(self, name="fake-ds"):
super(Datastore, self).__init__("ds")
self.set("summary.type", "VMFS")
self.set("summary.name", name)
self.set("summary.capacity", 1024 * 1024 * 1024 * 1024)
self.set("summary.freeSpace", 500 * 1024 * 1024 * 1024)
self.set("summary.accessible", True)
class HostNetworkSystem(ManagedObject):
"""HostNetworkSystem class."""
def __init__(self, name="networkSystem"):
super(HostNetworkSystem, self).__init__("ns")
self.set("name", name)
pnic_do = DataObject()
pnic_do.device = "vmnic0"
net_info_pnic = DataObject()
net_info_pnic.PhysicalNic = [pnic_do]
self.set("networkInfo.pnic", net_info_pnic)
class HostSystem(ManagedObject):
"""Host System class."""
def __init__(self, name="ha-host", connected=True):
super(HostSystem, self).__init__("host")
self.set("name", name)
if _db_content.get("HostNetworkSystem", None) is None:
create_host_network_system()
host_net_key = _db_content["HostNetworkSystem"].keys()[0]
host_net_sys = _db_content["HostNetworkSystem"][host_net_key].obj
self.set("configManager.networkSystem", host_net_sys)
summary = DataObject()
hardware = DataObject()
hardware.numCpuCores = 8
hardware.numCpuPkgs = 2
hardware.numCpuThreads = 16
hardware.vendor = "Intel"
hardware.cpuModel = "Intel(R) Xeon(R)"
hardware.uuid = "host-uuid"
hardware.memorySize = 1024 * 1024 * 1024
summary.hardware = hardware
quickstats = DataObject()
quickstats.overallMemoryUsage = 500
summary.quickStats = quickstats
product = DataObject()
product.name = "VMware ESXi"
product.version = "5.0.0"
config = DataObject()
config.product = product
summary.config = config
pnic_do = DataObject()
pnic_do.device = "vmnic0"
net_info_pnic = DataObject()
net_info_pnic.PhysicalNic = [pnic_do]
self.set("summary", summary)
self.set("capability.maxHostSupportedVcpus", 600)
self.set("summary.runtime.inMaintenanceMode", False)
self.set("runtime.connectionState", "connected")
self.set("summary.hardware", hardware)
self.set("config.network.pnic", net_info_pnic)
self.set("connected", connected)
if _db_content.get("Network", None) is None:
create_network()
net_ref = _db_content["Network"][_db_content["Network"].keys()[0]].obj
network_do = DataObject()
network_do.ManagedObjectReference = [net_ref]
self.set("network", network_do)
vswitch_do = DataObject()
vswitch_do.pnic = ["vmnic0"]
vswitch_do.name = "vSwitch0"
vswitch_do.portgroup = ["PortGroup-vmnet0"]
net_swicth = DataObject()
net_swicth.HostVirtualSwitch = [vswitch_do]
self.set("config.network.vswitch", net_swicth)
host_pg_do = DataObject()
host_pg_do.key = "PortGroup-vmnet0"
pg_spec = DataObject()
pg_spec.vlanId = 0
pg_spec.name = "vmnet0"
host_pg_do.spec = pg_spec
host_pg = DataObject()
host_pg.HostPortGroup = [host_pg_do]
self.set("config.network.portgroup", host_pg)
config = DataObject()
storageDevice = DataObject()
hostBusAdapter = HostInternetScsiHba()
hostBusAdapter.HostHostBusAdapter = [hostBusAdapter]
hostBusAdapter.iScsiName = "iscsi-name"
storageDevice.hostBusAdapter = hostBusAdapter
config.storageDevice = storageDevice
self.set("config.storageDevice.hostBusAdapter",
config.storageDevice.hostBusAdapter)
def _add_port_group(self, spec):
"""Adds a port group to the host system object in the db."""
pg_name = spec.name
vswitch_name = spec.vswitchName
vlanid = spec.vlanId
vswitch_do = DataObject()
vswitch_do.pnic = ["vmnic0"]
vswitch_do.name = vswitch_name
vswitch_do.portgroup = ["PortGroup-%s" % pg_name]
vswitches = self.get("config.network.vswitch").HostVirtualSwitch
vswitches.append(vswitch_do)
host_pg_do = DataObject()
host_pg_do.key = "PortGroup-%s" % pg_name
pg_spec = DataObject()
pg_spec.vlanId = vlanid
pg_spec.name = pg_name
host_pg_do.spec = pg_spec
host_pgrps = self.get("config.network.portgroup").HostPortGroup
host_pgrps.append(host_pg_do)
class Datacenter(ManagedObject):
"""Datacenter class."""
def __init__(self, name="ha-datacenter"):
super(Datacenter, self).__init__("dc")
self.set("name", name)
self.set("vmFolder", "vm_folder_ref")
if _db_content.get("Network", None) is None:
create_network()
net_ref = _db_content["Network"][_db_content["Network"].keys()[0]].obj
network_do = DataObject()
network_do.ManagedObjectReference = [net_ref]
self.set("network", network_do)
class Task(ManagedObject):
"""Task class."""
def __init__(self, task_name, state="running", result=None):
super(Task, self).__init__("Task")
info = DataObject()
info.name = task_name
info.state = state
info.result = result
self.set("info", info)
def create_host_network_system():
host_net_system = HostNetworkSystem()
_create_object("HostNetworkSystem", host_net_system)
def create_host():
host_system = HostSystem()
_create_object('HostSystem', host_system)
def create_datacenter():
data_center = Datacenter()
_create_object('Datacenter', data_center)
def create_datastore():
data_store = Datastore()
_create_object('Datastore', data_store)
def create_res_pool():
res_pool = ResourcePool()
_create_object('ResourcePool', res_pool)
def create_network():
network = Network()
_create_object('Network', network)
def create_cluster(name):
cluster = ClusterComputeResource(name=name)
cluster._add_host(_get_object_refs("HostSystem")[0])
cluster._add_host(_get_object_refs("HostSystem")[1])
cluster._add_datastore(_get_object_refs("Datastore")[0])
cluster._add_root_resource_pool(_get_object_refs("ResourcePool")[0])
_create_object('ClusterComputeResource', cluster)
def create_task(task_name, state="running", result=None):
task = Task(task_name, state, result)
_create_object("Task", task)
return task
def _add_file(file_path):
"""Adds a file reference to the db."""
_db_content["files"].append(file_path)
def _remove_file(file_path):
"""Removes a file reference from the db."""
if _db_content.get("files") is None:
raise exception.NoFilesFound()
# Check if the remove is for a single file object or for a folder
if file_path.find(".vmdk") != -1:
if file_path not in _db_content.get("files"):
raise exception.FileNotFound(file_path=file_path)
_db_content.get("files").remove(file_path)
else:
# Removes the files in the folder and the folder too from the db
for file in _db_content.get("files"):
if file.find(file_path) != -1:
lst_files = _db_content.get("files")
if lst_files and lst_files.count(file):
lst_files.remove(file)
def fake_plug_vifs(*args, **kwargs):
"""Fakes plugging vifs."""
pass
def fake_get_network(*args, **kwargs):
"""Fake get network."""
return {'type': 'fake'}
def get_file(file_path):
"""Check if file exists in the db."""
if _db_content.get("files") is None:
raise exception.NoFilesFound()
return file_path in _db_content.get("files")
def fake_fetch_image(context, image, instance, **kwargs):
"""Fakes fetch image call. Just adds a reference to the db for the file."""
ds_name = kwargs.get("datastore_name")
file_path = kwargs.get("file_path")
ds_file_path = "[" + ds_name + "] " + file_path
_add_file(ds_file_path)
def fake_upload_image(context, image, instance, **kwargs):
"""Fakes the upload of an image."""
pass
def fake_get_vmdk_size_and_properties(context, image_id, instance):
"""Fakes the file size and properties fetch for the image file."""
props = {"vmware_ostype": "otherGuest",
"vmware_adaptertype": "lsiLogic"}
return _FAKE_FILE_SIZE, props
def _get_vm_mdo(vm_ref):
"""Gets the Virtual Machine with the ref from the db."""
if _db_content.get("VirtualMachine", None) is None:
raise exception.NotFound(_("There is no VM registered"))
if vm_ref not in _db_content.get("VirtualMachine"):
raise exception.NotFound(_("Virtual Machine with ref %s is not "
"there") % vm_ref)
return _db_content.get("VirtualMachine")[vm_ref]
class FakeFactory(object):
"""Fake factory class for the suds client."""
def create(self, obj_name):
"""Creates a namespace object."""
return DataObject(obj_name)
class FakeVim(object):
"""Fake VIM Class."""
def __init__(self, protocol="https", host="localhost", trace=None):
"""
Initializes the suds client object, sets the service content
contents and the cookies for the session.
"""
self._session = None
self.client = DataObject()
self.client.factory = FakeFactory()
transport = DataObject()
transport.cookiejar = "Fake-CookieJar"
options = DataObject()
options.transport = transport
self.client.options = options
service_content = self.client.factory.create('ns0:ServiceContent')
service_content.propertyCollector = "PropCollector"
service_content.virtualDiskManager = "VirtualDiskManager"
service_content.fileManager = "FileManager"
service_content.rootFolder = "RootFolder"
service_content.sessionManager = "SessionManager"
about_info = DataObject()
about_info.name = "VMware vCenter Server"
about_info.version = "5.1.0"
service_content.about = about_info
self._service_content = service_content
def get_service_content(self):
return self._service_content
def __repr__(self):
return "Fake VIM Object"
def __str__(self):
return "Fake VIM Object"
def _login(self):
"""Logs in and sets the session object in the db."""
self._session = str(uuid.uuid4())
session = DataObject()
session.key = self._session
_db_content['session'][self._session] = session
return session
def _logout(self):
"""Logs out and remove the session object ref from the db."""
s = self._session
self._session = None
if s not in _db_content['session']:
raise exception.NovaException(
_("Logging out a session that is invalid or already logged "
"out: %s") % s)
del _db_content['session'][s]
def _terminate_session(self, *args, **kwargs):
"""Terminates a session."""
s = kwargs.get("sessionId")[0]
if s not in _db_content['session']:
return
del _db_content['session'][s]
def _check_session(self):
"""Checks if the session is active."""
if (self._session is None or self._session not in
_db_content['session']):
LOG.debug(_("Session is faulty"))
raise error_util.VimFaultException(
[error_util.FAULT_NOT_AUTHENTICATED],
_("Session Invalid"))
def _create_vm(self, method, *args, **kwargs):
"""Creates and registers a VM object with the Host System."""
config_spec = kwargs.get("config")
ds = _db_content["Datastore"].keys()[0]
host = _db_content["HostSystem"].keys()[0]
vm_dict = {"name": config_spec.name,
"ds": [ds],
"runtime_host": host,
"powerstate": "poweredOff",
"vmPathName": config_spec.files.vmPathName,
"numCpu": config_spec.numCPUs,
"mem": config_spec.memoryMB,
"extra_config": config_spec.extraConfig,
"virtual_device": config_spec.deviceChange}
virtual_machine = VirtualMachine(**vm_dict)
_create_object("VirtualMachine", virtual_machine)
task_mdo = create_task(method, "success")
return task_mdo.obj
def _reconfig_vm(self, method, *args, **kwargs):
"""Reconfigures a VM and sets the properties supplied."""
vm_ref = args[0]
vm_mdo = _get_vm_mdo(vm_ref)
vm_mdo.reconfig(self.client.factory, kwargs.get("spec"))
task_mdo = create_task(method, "success")
return task_mdo.obj
def _create_copy_disk(self, method, vmdk_file_path):
"""Creates/copies a vmdk file object in the datastore."""
# We need to add/create both .vmdk and .-flat.vmdk files
flat_vmdk_file_path = vmdk_file_path.replace(".vmdk", "-flat.vmdk")
_add_file(vmdk_file_path)
_add_file(flat_vmdk_file_path)
task_mdo = create_task(method, "success")
return task_mdo.obj
def _extend_disk(self, method, size):
"""Extend disk size when create a instance."""
task_mdo = create_task(method, "success")
return task_mdo.obj
def _snapshot_vm(self, method):
"""Snapshots a VM. Here we do nothing for faking sake."""
task_mdo = create_task(method, "success")
return task_mdo.obj
def _delete_disk(self, method, *args, **kwargs):
"""Deletes .vmdk and -flat.vmdk files corresponding to the VM."""
vmdk_file_path = kwargs.get("name")
flat_vmdk_file_path = vmdk_file_path.replace(".vmdk", "-flat.vmdk")
_remove_file(vmdk_file_path)
_remove_file(flat_vmdk_file_path)
task_mdo = create_task(method, "success")
return task_mdo.obj
def _delete_file(self, method, *args, **kwargs):
"""Deletes a file from the datastore."""
_remove_file(kwargs.get("name"))
task_mdo = create_task(method, "success")
return task_mdo.obj
def _just_return(self):
"""Fakes a return."""
return
def _just_return_task(self, method):
"""Fakes a task return."""
task_mdo = create_task(method, "success")
return task_mdo.obj
def _unregister_vm(self, method, *args, **kwargs):
"""Unregisters a VM from the Host System."""
vm_ref = args[0]
_get_vm_mdo(vm_ref)
del _db_content["VirtualMachine"][vm_ref]
def _search_ds(self, method, *args, **kwargs):
"""Searches the datastore for a file."""
ds_path = kwargs.get("datastorePath")
if _db_content.get("files", None) is None:
raise exception.NoFilesFound()
for file in _db_content.get("files"):
if file.find(ds_path) != -1:
result = DataObject()
result.path = ds_path
task_mdo = create_task(method, state="success",
result=result)
return task_mdo.obj
task_mdo = create_task(method, "error")
return task_mdo.obj
def _make_dir(self, method, *args, **kwargs):
"""Creates a directory in the datastore."""
ds_path = kwargs.get("name")
if _db_content.get("files", None) is None:
raise exception.NoFilesFound()
_db_content["files"].append(ds_path)
def _set_power_state(self, method, vm_ref, pwr_state="poweredOn"):
"""Sets power state for the VM."""
if _db_content.get("VirtualMachine", None) is None:
raise exception.NotFound(_("No Virtual Machine has been "
"registered yet"))
if vm_ref not in _db_content.get("VirtualMachine"):
raise exception.NotFound(_("Virtual Machine with ref %s is not "
"there") % vm_ref)
vm_mdo = _db_content.get("VirtualMachine").get(vm_ref)
vm_mdo.set("runtime.powerState", pwr_state)
task_mdo = create_task(method, "success")
return task_mdo.obj
def _retrieve_properties_continue(self, method, *args, **kwargs):
"""Continues the retrieve."""
return FakeRetrieveResult()
def _retrieve_properties_cancel(self, method, *args, **kwargs):
"""Cancels the retrieve."""
return None
def _retrieve_properties(self, method, *args, **kwargs):
"""Retrieves properties based on the type."""
spec_set = kwargs.get("specSet")[0]
type = spec_set.propSet[0].type
properties = spec_set.propSet[0].pathSet
if not isinstance(properties, list):
properties = properties.split()
objs = spec_set.objectSet
lst_ret_objs = FakeRetrieveResult()
for obj in objs:
try:
obj_ref = obj.obj
# This means that we are doing a search for the managed
# data objects of the type in the inventory
if obj_ref == "RootFolder":
mdo_refs = _db_content[type]
else:
mdo_refs = [obj_ref]
for mdo_ref in mdo_refs:
mdo = _db_content[type][mdo_ref]
prop_list = []
for prop_name in properties:
prop = Prop(prop_name, mdo.get(prop_name))
prop_list.append(prop)
obj_content = ObjectContent(mdo.obj, prop_list)
lst_ret_objs.add_object(obj_content)
except Exception as exc:
LOG.exception(exc)
continue
return lst_ret_objs
def _add_port_group(self, method, *args, **kwargs):
"""Adds a port group to the host system."""
_host_sk = _db_content["HostSystem"].keys()[0]
host_mdo = _db_content["HostSystem"][_host_sk]
host_mdo._add_port_group(kwargs.get("portgrp"))
def __getattr__(self, attr_name):
if attr_name != "Login":
self._check_session()
if attr_name == "Login":
return lambda *args, **kwargs: self._login()
elif attr_name == "Logout":
self._logout()
elif attr_name == "TerminateSession":
return lambda *args, **kwargs: self._terminate_session(
*args, **kwargs)
elif attr_name == "CreateVM_Task":
return lambda *args, **kwargs: self._create_vm(attr_name,
*args, **kwargs)
elif attr_name == "ReconfigVM_Task":
return lambda *args, **kwargs: self._reconfig_vm(attr_name,
*args, **kwargs)
elif attr_name == "CreateVirtualDisk_Task":
return lambda *args, **kwargs: self._create_copy_disk(attr_name,
kwargs.get("name"))
elif attr_name == "DeleteDatastoreFile_Task":
return lambda *args, **kwargs: self._delete_file(attr_name,
*args, **kwargs)
elif attr_name == "PowerOnVM_Task":
return lambda *args, **kwargs: self._set_power_state(attr_name,
args[0], "poweredOn")
elif attr_name == "PowerOffVM_Task":
return lambda *args, **kwargs: self._set_power_state(attr_name,
args[0], "poweredOff")
elif attr_name == "RebootGuest":
return lambda *args, **kwargs: self._just_return()
elif attr_name == "ResetVM_Task":
return lambda *args, **kwargs: self._set_power_state(attr_name,
args[0], "poweredOn")
elif attr_name == "SuspendVM_Task":
return lambda *args, **kwargs: self._set_power_state(attr_name,
args[0], "suspended")
elif attr_name == "CreateSnapshot_Task":
return lambda *args, **kwargs: self._snapshot_vm(attr_name)
elif attr_name == "CopyVirtualDisk_Task":
return lambda *args, **kwargs: self._create_copy_disk(attr_name,
kwargs.get("destName"))
elif attr_name == "ExtendVirtualDisk_Task":
return lambda *args, **kwargs: self._extend_disk(attr_name,
kwargs.get("size"))
elif attr_name == "DeleteVirtualDisk_Task":
return lambda *args, **kwargs: self._delete_disk(attr_name,
*args, **kwargs)
elif attr_name == "Destroy_Task":
return lambda *args, **kwargs: self._unregister_vm(attr_name,
*args, **kwargs)
elif attr_name == "UnregisterVM":
return lambda *args, **kwargs: self._unregister_vm(attr_name,
*args, **kwargs)
elif attr_name == "SearchDatastore_Task":
return lambda *args, **kwargs: self._search_ds(attr_name,
*args, **kwargs)
elif attr_name == "MakeDirectory":
return lambda *args, **kwargs: self._make_dir(attr_name,
*args, **kwargs)
elif attr_name == "RetrievePropertiesEx":
return lambda *args, **kwargs: self._retrieve_properties(
attr_name, *args, **kwargs)
elif attr_name == "ContinueRetrievePropertiesEx":
return lambda *args, **kwargs: self._retrieve_properties_continue(
attr_name, *args, **kwargs)
elif attr_name == "CancelRetrievePropertiesEx":
return lambda *args, **kwargs: self._retrieve_properties_cancel(
attr_name, *args, **kwargs)
elif attr_name == "AcquireCloneTicket":
return lambda *args, **kwargs: self._just_return()
elif attr_name == "AddPortGroup":
return lambda *args, **kwargs: self._add_port_group(attr_name,
*args, **kwargs)
elif attr_name == "RebootHost_Task":
return lambda *args, **kwargs: self._just_return_task(attr_name)
elif attr_name == "ShutdownHost_Task":
return lambda *args, **kwargs: self._just_return_task(attr_name)
elif attr_name == "PowerDownHostToStandBy_Task":
return lambda *args, **kwargs: self._just_return_task(attr_name)
elif attr_name == "PowerUpHostFromStandBy_Task":
return lambda *args, **kwargs: self._just_return_task(attr_name)
elif attr_name == "EnterMaintenanceMode_Task":
return lambda *args, **kwargs: self._just_return_task(attr_name)
elif attr_name == "ExitMaintenanceMode_Task":
return lambda *args, **kwargs: self._just_return_task(attr_name)
|
|
from pysnmp.hlapi import *
from enum import Enum
from pysnmp.error import PySnmpError
from pysnmp.entity.rfc3413.oneliner import cmdgen
import string
import ipaddress
Values = {
"hostForwarding" : "1.3.6.1.2.1.4.1.0",
"destRoute" : "1.3.6.1.2.1.4.24.4.1.1",
"netmask" : "1.3.6.1.2.1.4.24.4.1.2",
"nextHop" : "1.3.6.1.2.1.4.24.4.1.4",
"interfaceIndex" : "1.3.6.1.2.1.4.24.4.1.5",
"mac" : "1.3.6.1.2.1.2.2.1.6"
}
class SnmpWrapper:
def __init__(self, host, user, passwordMD5, passwordDES, port=161):
"""
Create a new snmp connection wrapper for a host.
This wrapper uses SNMPv3 with MD for authentification and DES for encryption.
:param host: Host or IP to connect to
:param host: str
:param port: Port used for the snmp connection
:type port: int
:param user: User used for the snmp connection
:type user: str
:param passwordMD5: Password used for snmp authentifications
:type passwordMD5: str
:param passwordDES: Password used for snmp encryption
:type: str
"""
self.user = user
self.auth = passwordMD5
self.enc = passwordDES
self.host = host
self.port = port
def getValue(self, oid, number=None):
"""
Execute a GET command on the host defined in the constructor.
:param oid: Value/OID to receive from the host
:type oid: str
:param number: Subelement of given OID if needed. For example interface if you want to read ips
:type number: int
:returns: Value returned by the SNMP-Agent or error code.
0 : Unknown error
1 : Connection-Timeout: Host has no installed SNMP-Agent or encryption password is wrong.
2 : Authentification failed due to wrong authentification password.
3 : Unknown username
4 : Host not reachable
:rtype: tuple on success, int on error
"""
if number:
oid += "." + str(number)
try:
errorIndication, errorStatus, errorIndex, varBinds = next(
getCmd(SnmpEngine(),
UsmUserData(self.user, self.auth, self.enc),
UdpTransportTarget((self.host, self.port)),
ContextData(),
ObjectType(ObjectIdentity(oid)))
)
except PySnmpError:
return 4
if errorIndication:
if errorIndication == "No SNMP response received before timeout":
return 1
if errorIndication == "wrongDigest":
return 2
if errorIndication == "unknownUserName":
return 3
return 0
elif errorStatus or errorIndex != 0:
return 0
else:
if len(varBinds) > 0:
return (str(varBinds[0][0]), toPythonType(varBinds[0][1]))
return None
def walkOid(self, oid):
"""
Execute a GETNEXT command on the host defined in the constructor.
Method will return all values which are subidentifiers of the fiven one.
:param oid: Value/OID to receive from the host
:type oid: str
:param number: Subelement of given OID if needed. For example interface if you want to read ips
:type number: int
:returns: List of values returned by the SNMP-Agent or error code.
0 : Unknown error
1 : Connection-Timeout: Host has no installed SNMP-Agent or encryption password is wrong.
2 : Authentification failed due to wrong authentification password.
3 : Unknown username
4 : Host not reachable
:rtype: list on success, int on error
"""
try:
cmd = nextCmd(SnmpEngine(),
UsmUserData(self.user, self.auth, self.enc),
UdpTransportTarget((self.host, self.port)),
ContextData(),
ObjectType(ObjectIdentity(oid)),
lexicographicMode = False)
vars = list()
for errorIndication, errorStatus, errorIndex, varBinds in cmd:
if errorIndication:
if errorIndication == "No SNMP response received before timeout":
return 1
if errorIndication == "wrongDigest":
return 2
if errorIndication == "unknownUserName":
return 3
return 0
elif errorStatus or errorIndex != 0:
return 0
else:
for oid, value in varBinds:
vars.append((str(oid), toPythonType(value)))
return vars
except PySnmpError:
return 4
def OidToRouteIdentifier(oid):
"""
Generate the subidentifier for one route.
The oid has the schema: <oid>.<4 dot seperated values dest-network>.<4 dot seperated values netmask>.<4 dot seperated values hop>
:param oid: OID to split
:type oid: str
:returns: sub-oid representing the route (Without leading dot)
:rtype: str
"""
parts = oid.rsplit(".", 13)
return ".".join(parts[1:])
def toPythonType(value):
if isinstance(value, Integer32) or isinstance(value, Integer) or isinstance(value, Gauge32) or isinstance(value, Counter32) or isinstance(value, Counter64) or isinstance(value, Unsigned32):
return int(value)
if isinstance(value, IpAddress):
return ".".join([str(c) for c in value._value])
if isinstance(value, ObjectIdentifier):
return str(value)
if isinstance(value, OctetString) and isinstance(value._value, bytes):
return value
if isinstance(value, OctetString):
return str(value)
return value
def checkReturnSnmp(answer, host, name, user, logger):
"""
Check the return type of SnmpWrapper functions and log if an error occured.
:param answer: Answer received from SnmapWrapper method
:type answer: list, tuple or int
:param host: Host currently processed
:type host: seealso: insalata.model.Host.Host
:param name: Name of this collector
:type name: str
:param user: User used for SNMP connection
:type user: str
:param logger: Logger used by this collector module
:type logger: seealso: insalata.Logging.Logger
:returns: answer if no error occured else None
:rtype: list, tuple or None
"""
if isinstance(answer, list) or isinstance(answer, tuple) or isinstance(answer, int) or isinstance(answer, string) or isinstance(answer, bytes):
return answer
if len(answer) == 1:
logger.error("Host '{0}' does not support SNMP or encryption password is wrong. Collector: {1}.".format(host.getID(), name))
if len(answer) == 2:
logger.error("Authentification failed on host '{0}'. Collector: {1}.".format(host.getID(), name))
if len(answer) == 3:
logger.error("Unknown SNMP user on host '{0}'. Collector: {1}. Username: {3}.".format(host.getID(), name, user))
if len(answer) == 4:
logger.error("Host '{0}' is not reachable. Collector: {1}.".format(host.getID(), name))
if len(answer) > 4:
logger.error("SNMP scanning of host '{0}' failed due to unknown reason. Collector: {1}.".format(host.getID(), name))
logger.debug("SNMP scanning of host '{0}' failed due to unknown reason. Collector: {1}; Anser-Code: {2}.".format(host.getID(), name, answer))
return None
|
|
"""
Words and static data
Please extend this file with more lvl=100 shibe wow.
"""
import random
from collections import deque
class DogeDeque(deque):
"""
A doge deque. A doqe, if you may.
Because random is random, just using a random choice from the static lists
below there will always be some repetition in the output. This collection
will instead shuffle the list upon init, and act as a rotating deque
whenever an item is gotten from it.
"""
def __init__(self, *args, **kwargs):
self.index = 0
args = list(args)
random.shuffle(args)
super(DogeDeque, self).__init__(args)
def get(self):
"""
Get one item. This will rotate the deque one step. Repeated gets will
return different items.
"""
self.index += 1
# If we've gone through the entire deque once, shuffle it again to
# simulate ever-flowing random. self.shuffle() will run __init__(),
# which will reset the index to 0.
if self.index == len(self):
self.shuffle()
self.rotate(1)
try:
return self[0]
except:
return "wow"
def extend(self, iterable):
# Whenever we extend the list, make sure to shuffle in the new items!
super(DogeDeque, self).extend(iterable)
self.shuffle()
def shuffle(self):
"""
Shuffle the deque
Deques themselves do not support this, so this will make all items into
a list, shuffle that list, clear the deque, and then re-init the deque.
"""
args = list(self)
random.shuffle(args)
self.clear()
super(DogeDeque, self).__init__(args)
class FrequencyBasedDogeDeque(deque):
def __init__(self, *args, **kwargs):
self.index = 0
if "step" in kwargs:
self.step = kwargs["step"]
else:
self.step = 2
args = list(args)
# sort words by frequency
args = (sorted(set(args), key=lambda x: args.count(x)))
super(FrequencyBasedDogeDeque, self).__init__(args)
def shuffle(self):
pass
def get(self):
"""
Get one item and prepare to get an item with lower
rank on the next call.
"""
if len(self) < 1:
return "wow"
if self.index >= len(self):
self.index = 0
step = random.randint(1, min(self.step, len(self)))
res = self[0]
self.index += step
self.rotate(step)
return res
def extend(self, iterable):
existing = list(self)
merged = existing + list(iterable)
self.clear()
self.index = 0
new_to_add = (sorted(set(merged), key=lambda x: merged.count(x)))
super(FrequencyBasedDogeDeque, self).__init__(new_to_add)
PREFIXES = DogeDeque(
'wow', 'such', 'very', 'so much', 'many', 'lol', 'beautiful',
'all the', 'the', 'most', 'very much', 'pretty', 'so',
)
# Please keep in mind that this particular shibe is a terminal hax0r shibe,
# and the words added should be in that domain
WORD_LIST = ['computer', 'hax0r', 'code', 'data', 'internet', 'server',
'hacker', 'terminal', 'doge', 'shibe', 'program', 'free software',
'web scale', 'monads', 'git', 'daemon', 'loop', 'pretty',
'uptime',
'thread safe', 'posix']
WORDS = DogeDeque(*WORD_LIST)
SUFFIXES = DogeDeque(
'wow', 'lol', 'hax', 'plz', 'lvl=100'
)
# A subset of the 255 color cube with the darkest colors removed. This is
# suited for use on dark terminals. Lighter colors are still present so some
# colors might be semi-unreadabe on lighter backgrounds.
#
# If you see this and use a light terminal, a pull request with a set that
# works well on a light terminal would be awesome.
COLORS = DogeDeque(
23, 24, 25, 26, 27, 29, 30, 31, 32, 33, 35, 36, 37, 38, 39, 41, 42, 43,
44, 45, 47, 48, 49, 50, 51, 58, 59, 63, 64, 65, 66, 67, 68, 69, 70, 71,
72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 94,
95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109,
110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123,
130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143,
144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157,
158, 159, 162, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176,
177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190,
191, 192, 193, 194, 195, 197, 202, 203, 204, 205, 206, 207, 208, 209,
210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223,
224, 225, 226, 227, 228
)
# Seasonal greetings by Shibe.
# Tuple for every single date is in (month, day) format (year is discarded).
# Doge checks if current date falls in between these dates and show wow
# congratulations, so do whatever complex math you need to make sure Shibe
# celebrates with you!
SEASONS = {
'xmas': {
'dates': ((12, 14), (12, 26)),
'pic': 'doge-xmas.txt',
'words': (
'christmas', 'xmas', 'candles', 'santa', 'merry', 'reindeers',
'gifts', 'jul', 'vacation', 'carol',
)
},
# To be continued...
}
STOPWORDS = ["able", "about", "above", "abroad", "according", "accordingly",
"across", "actually", "adj", "after",
"afterwards", "again", "against", "ago", "ahead", "ain't", "all",
"allow", "allows", "almost", "alone",
"along", "alongside", "already", "also", "although", "always",
"am", "amid", "amidst", "among", "amongst",
"an", "and", "another", "any", "anybody", "anyhow", "anyone",
"anything", "anyway", "anyways", "anywhere",
"apart", "appear", "appreciate", "appropriate", "are", "aren't",
"around", "as", "a's", "aside", "ask",
"asking", "associated", "at", "available", "away", "awfully",
"back", "backward", "backwards", "be",
"became", "because", "become", "becomes", "becoming", "been",
"before", "beforehand", "begin", "behind",
"being", "believe", "below", "beside", "besides", "best",
"better", "between", "beyond", "both", "brief",
"but", "by", "came", "can", "cannot", "cant", "can't", "caption",
"cause", "causes", "certain",
"certainly", "changes", "clearly", "c'mon", "co", "co.", "com",
"come", "comes", "concerning",
"consequently", "consider", "considering", "contain",
"containing", "contains", "corresponding", "could",
"couldn't", "course", "c's", "currently", "dare", "daren't",
"definitely", "described", "despite", "did",
"didn't", "different", "directly", "do", "does", "doesn't",
"doing", "done", "don't", "down", "downwards",
"during", "each", "edu", "eg", "eight", "eighty", "either",
"else", "elsewhere", "end", "ending", "enough",
"entirely", "especially", "et", "etc", "even", "ever", "evermore",
"every", "everybody", "everyone",
"everything", "everywhere", "ex", "exactly", "example", "except",
"fairly", "far", "farther", "few",
"fewer", "fifth", "first", "five", "followed", "following",
"follows", "for", "forever", "former",
"formerly", "forth", "forward", "found", "four", "from",
"further", "furthermore", "get", "gets",
"getting", "given", "gives", "go", "goes", "going", "gone", "got",
"gotten", "greetings", "had", "hadn't",
"half", "happens", "hardly", "has", "hasn't", "have", "haven't",
"having", "he", "he'd", "he'll", "hello",
"help", "hence", "her", "here", "hereafter", "hereby", "herein",
"here's", "hereupon", "hers", "herself",
"he's", "hi", "him", "himself", "his", "hither", "hopefully",
"how", "howbeit", "however", "hundred",
"i'd", "ie", "if", "ignored", "i'll", "i'm", "immediate", "in",
"inasmuch", "inc", "inc.", "indeed",
"indicate", "indicated", "indicates", "inner", "inside",
"insofar", "instead", "into", "inward", "is",
"isn't", "it", "it'd", "it'll", "its", "it's", "itself", "i've",
"just", "k", "keep", "keeps", "kept",
"know", "known", "knows", "last", "lately", "later", "latter",
"latterly", "least", "less", "lest", "let",
"let's", "like", "liked", "likely", "likewise", "little", "look",
"looking", "looks", "low", "lower",
"ltd", "made", "mainly", "make", "makes", "many", "may", "maybe",
"mayn't", "me", "mean", "meantime",
"meanwhile", "merely", "might", "mightn't", "mine", "minus",
"miss", "more", "moreover", "most", "mostly",
"mr", "mrs", "much", "must", "mustn't", "my", "myself", "name",
"namely", "nd", "near", "nearly",
"necessary", "need", "needn't", "needs", "neither", "never",
"neverf", "neverless", "nevertheless", "new",
"next", "nine", "ninety", "no", "nobody", "non", "none",
"nonetheless", "noone", "no-one", "nor",
"normally", "not", "nothing", "notwithstanding", "novel", "now",
"nowhere", "obviously", "of", "off",
"often", "oh", "ok", "okay", "old", "on", "once", "one", "ones",
"one's", "only", "onto", "opposite", "or",
"other", "others", "otherwise", "ought", "oughtn't", "our",
"ours", "ourselves", "out", "outside", "over",
"overall", "own", "particular", "particularly", "past", "per",
"perhaps", "placed", "please", "plus",
"possible", "presumably", "probably", "provided", "provides",
"que", "quite", "qv", "rather", "rd", "re",
"really", "reasonably", "recent", "recently", "regarding",
"regardless", "regards", "relatively",
"respectively", "right", "round", "said", "same", "saw", "say",
"saying", "says", "second", "secondly",
"see", "seeing", "seem", "seemed", "seeming", "seems", "seen",
"self", "selves", "sensible", "sent",
"serious", "seriously", "seven", "several", "shall", "shan't",
"she", "she'd", "she'll", "she's", "should",
"shouldn't", "since", "six", "so", "some", "somebody", "someday",
"somehow", "someone", "something",
"sometime", "sometimes", "somewhat", "somewhere", "soon", "sorry",
"specified", "specify", "specifying",
"still", "sub", "such", "sup", "sure", "take", "taken", "taking",
"tell", "tends", "th", "than", "thank",
"thanks", "thanx", "that", "that'll", "thats", "that's",
"that've", "the", "their", "theirs", "them",
"themselves", "then", "thence", "there", "thereafter", "thereby",
"there'd", "therefore", "therein",
"there'll", "there're", "theres", "there's", "thereupon",
"there've", "these", "they", "they'd", "they'll",
"they're", "they've", "thing", "things", "think", "third",
"thirty", "this", "thorough", "thoroughly",
"those", "though", "three", "through", "throughout", "thru",
"thus", "till", "to", "together", "too",
"took", "toward", "towards", "tried", "tries", "truly", "try",
"trying", "t's", "twice", "two", "un",
"under", "underneath", "undoing", "unfortunately", "unless",
"unlike", "unlikely", "until", "unto", "up",
"upon", "upwards", "us", "use", "used", "useful", "uses", "using",
"usually", "v", "value", "various",
"versus", "very", "via", "viz", "vs", "want", "wants", "was",
"wasn't", "way", "we", "we'd", "welcome",
"well", "we'll", "went", "were", "we're", "weren't", "we've",
"what", "whatever", "what'll", "what's",
"what've", "when", "whence", "whenever", "where", "whereafter",
"whereas", "whereby", "wherein", "where's",
"whereupon", "wherever", "whether", "which", "whichever", "while",
"whilst", "whither", "who", "who'd",
"whoever", "whole", "who'll", "whom", "whomever", "who's",
"whose", "why", "will", "willing", "wish",
"with", "within", "without", "wonder", "won't", "would",
"wouldn't", "yes", "yet", "you", "you'd",
"you'll", "your", "you're", "yours", "yourself", "yourselves",
"you've", "zero", "a", "about", "above",
"after", "again", "against", "all", "am", "an", "and", "any",
"are", "aren't", "as", "at", "be", "because",
"been", "before", "being", "below", "between", "both", "but",
"by", "can't", "cannot", "could", "couldn't",
"did", "didn't", "do", "does", "doesn't", "doing", "don't",
"down", "during", "each", "few", "for", "from",
"further", "had", "hadn't", "has", "hasn't", "have", "haven't",
"having", "he", "he'd", "he'll", "he's",
"her", "here", "here's", "hers", "herself", "him", "himself",
"his", "how", "how's", "i", "i'd", "i'll",
"i'm", "i've", "if", "in", "into", "is", "isn't", "it", "it's",
"its", "itself", "let's", "me", "more",
"most", "mustn't", "my", "myself", "no", "nor", "not", "of",
"off", "on", "once", "only", "or", "other",
"ought", "our", "ours", "", "ourselves", "out", "over", "own",
"same", "shan't", "she", "she'd", "she'll",
"she's", "should", "shouldn't", "so", "some", "such", "than",
"that", "that's", "the", "their", "theirs",
"them", "themselves", "then", "there", "there's", "these", "they",
"they'd", "they'll", "they're",
"they've", "this", "those", "through", "to", "too", "under",
"until", "up", "very", "was", "wasn't", "we",
"we'd", "we'll", "we're", "we've", "were", "weren't", "what",
"what's", "when", "when's", "where",
"where's", "which", "while", "who", "who's", "whom", "why",
"why's", "with", "won't", "would", "wouldn't",
"you", "you'd", "you'll", "you're", "you've", "your", "yours",
"yourself", "yourselves", "a", "a's",
"able", "about", "above", "according", "accordingly", "across",
"actually", "after", "afterwards", "again",
"against", "ain't", "all", "allow", "allows", "almost", "alone",
"along", "already", "also", "although",
"always", "am", "among", "amongst", "an", "and", "another", "any",
"anybody", "anyhow", "anyone",
"anything", "anyway", "anyways", "anywhere", "apart", "appear",
"appreciate", "appropriate", "are",
"aren't", "around", "as", "aside", "ask", "asking", "associated",
"at", "available", "away", "awfully",
"b", "be", "became", "because", "become", "becomes", "becoming",
"been", "before", "beforehand", "behind",
"being", "believe", "below", "beside", "besides", "best",
"better", "between", "beyond", "both", "brief",
"but", "by", "c", "c'mon", "c's", "came", "can", "can't",
"cannot", "cant", "cause", "causes", "certain",
"certainly", "changes", "clearly", "co", "com", "come", "comes",
"concerning", "consequently", "consider",
"considering", "contain", "containing", "contains",
"corresponding", "could", "couldn't", "course",
"currently", "d", "definitely", "described", "despite", "did",
"didn't", "different", "do", "does",
"doesn't", "doing", "don't", "done", "down", "downwards",
"during", "e", "each", "edu", "eg", "eight",
"either", "else", "elsewhere", "enough", "entirely", "especially",
"et", "etc", "even", "ever", "every",
"everybody", "everyone", "everything", "everywhere", "ex",
"exactly", "example", "except", "f", "far",
"few", "fifth", "first", "five", "followed", "following",
"follows", "for", "former", "formerly", "forth",
"four", "from", "further", "furthermore", "g", "get", "gets",
"getting", "given", "gives", "go", "goes",
"going", "gone", "got", "gotten", "greetings", "h", "had",
"hadn't", "happens", "hardly", "has", "hasn't",
"have", "haven't", "having", "he", "he's", "hello", "help",
"hence", "her", "here", "here's", "hereafter",
"hereby", "herein", "hereupon", "hers", "herself", "hi", "him",
"himself", "his", "hither", "hopefully",
"how", "howbeit", "however", "i", "i'd", "i'll", "i'm", "i've",
"ie", "if", "ignored", "immediate", "in",
"inasmuch", "inc", "indeed", "indicate", "indicated", "indicates",
"inner", "insofar", "instead", "into",
"inward", "is", "isn't", "it", "it'd", "it'll", "it's", "its",
"itself", "j", "just", "k", "keep", "keeps",
"kept", "know", "knows", "known", "l", "last", "lately", "later",
"latter", "latterly", "least", "less",
"lest", "let", "let's", "like", "liked", "likely", "little",
"look", "looking", "looks", "ltd", "m",
"mainly", "many", "may", "maybe", "me", "mean", "meanwhile",
"merely", "might", "more", "moreover", "most",
"mostly", "much", "must", "my", "myself", "n", "name", "namely",
"nd", "near", "nearly", "necessary",
"need", "needs", "neither", "never", "nevertheless", "new",
"next", "nine", "no", "nobody", "non", "none",
"noone", "nor", "normally", "not", "nothing", "novel", "now",
"nowhere", "o", "obviously", "of", "off",
"often", "oh", "ok", "okay", "old", "on", "once", "one", "ones",
"only", "onto", "or", "other", "others",
"otherwise", "ought", "our", "ours", "ourselves", "out",
"outside", "over", "overall", "own", "p",
"particular", "particularly", "per", "perhaps", "placed",
"please", "plus", "possible", "presumably",
"probably", "provides", "q", "que", "quite", "qv", "r", "rather",
"rd", "re", "really", "reasonably",
"regarding", "regardless", "regards", "relatively",
"respectively", "right", "s", "said", "same", "saw",
"say", "saying", "says", "second", "secondly", "see", "seeing",
"seem", "seemed", "seeming", "seems",
"seen", "self", "selves", "sensible", "sent", "serious",
"seriously", "seven", "several", "shall", "she",
"should", "shouldn't", "since", "six", "so", "some", "somebody",
"somehow", "someone", "something",
"sometime", "sometimes", "somewhat", "somewhere", "soon", "sorry",
"specified", "specify", "specifying",
"still", "sub", "such", "sup", "sure", "t", "t's", "take",
"taken", "tell", "tends", "th", "than", "thank",
"thanks", "thanx", "that", "that's", "thats", "the", "their",
"theirs", "them", "themselves", "then",
"thence", "there", "there's", "thereafter", "thereby",
"therefore", "therein", "theres", "thereupon",
"these", "they", "they'd", "they'll", "they're", "they've",
"think", "third", "this", "thorough",
"thoroughly", "those", "though", "three", "through", "throughout",
"thru", "thus", "to", "together", "too",
"took", "toward", "towards", "tried", "tries", "truly", "try",
"trying", "twice", "two", "u", "un",
"under", "unfortunately", "unless", "unlikely", "until", "unto",
"up", "upon", "us", "use", "used",
"useful", "uses", "using", "usually", "uucp", "v", "value",
"various", "very", "via", "viz", "vs", "w",
"want", "wants", "was", "wasn't", "way", "we", "we'd", "we'll",
"we're", "we've", "welcome", "well",
"went", "were", "weren't", "what", "what's", "whatever", "when",
"whence", "whenever", "where", "where's",
"whereafter", "whereas", "whereby", "wherein", "whereupon",
"wherever", "whether", "which", "while",
"whither", "who", "who's", "whoever", "whole", "whom", "whose",
"why", "will", "willing", "wish", "with",
"within", "without", "won't", "wonder", "would", "would",
"wouldn't", "x", "y", "yes", "yet", "you",
"you'd", "you'll", "you're", "you've", "your", "yours",
"yourself", "yourselves", "z", "zero", "I", "a",
"about", "an", "are", "as", "at", "be", "by", "com", "for",
"from", "how", "in", "is", "it", "of", "on",
"or", "that", "the", "this", "to", "was", "what", "when", "where",
"who", "will", "with", "the", "www"]
|
|
# Copyright (c) 2016 Dell Inc. or its subsidiaries.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import ddt
import mock
from oslo_log import log
from manila import exception
from manila.share.drivers.dell_emc.plugins.vmax import connection
from manila.share.drivers.dell_emc.plugins.vmax import connector
from manila.share.drivers.dell_emc.plugins.vmax import object_manager
from manila import test
from manila.tests import fake_share
from manila.tests.share.drivers.dell_emc.plugins.vmax import fakes
from manila.tests.share.drivers.dell_emc.plugins.vmax import utils
LOG = log.getLogger(__name__)
@ddt.ddt
class StorageConnectionTestCase(test.TestCase):
@mock.patch.object(connector.XMLAPIConnector, "_do_setup", mock.Mock())
def setUp(self):
super(StorageConnectionTestCase, self).setUp()
self.emc_share_driver = fakes.FakeEMCShareDriver()
self.connection = connection.VMAXStorageConnection(LOG)
self.pool = fakes.PoolTestData()
self.vdm = fakes.VDMTestData()
self.mover = fakes.MoverTestData()
self.fs = fakes.FileSystemTestData()
self.mount = fakes.MountPointTestData()
self.snap = fakes.SnapshotTestData()
self.cifs_share = fakes.CIFSShareTestData()
self.nfs_share = fakes.NFSShareTestData()
self.cifs_server = fakes.CIFSServerTestData()
self.dns = fakes.DNSDomainTestData()
with mock.patch.object(connector.XMLAPIConnector, 'request',
mock.Mock()):
self.connection.connect(self.emc_share_driver, None)
def test_check_for_setup_error(self):
hook = utils.RequestSideEffect()
hook.append(self.mover.resp_get_ref_succeed())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
with mock.patch.object(connection.VMAXStorageConnection,
'_get_managed_storage_pools',
mock.Mock()):
self.connection.check_for_setup_error()
expected_calls = [mock.call(self.mover.req_get_ref())]
xml_req_mock.assert_has_calls(expected_calls)
def test_check_for_setup_error_with_invalid_mover_name(self):
hook = utils.RequestSideEffect()
hook.append(self.mover.resp_get_error())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
self.assertRaises(exception.InvalidParameterValue,
self.connection.check_for_setup_error)
expected_calls = [mock.call(self.mover.req_get_ref())]
xml_req_mock.assert_has_calls(expected_calls)
@ddt.data({'pool_conf': None,
'real_pools': ['fake_pool', 'nas_pool'],
'matched_pool': set()},
{'pool_conf': [],
'real_pools': ['fake_pool', 'nas_pool'],
'matched_pool': set()},
{'pool_conf': ['*'],
'real_pools': ['fake_pool', 'nas_pool'],
'matched_pool': {'fake_pool', 'nas_pool'}},
{'pool_conf': ['fake_*'],
'real_pools': ['fake_pool', 'nas_pool', 'Perf_Pool'],
'matched_pool': {'fake_pool'}},
{'pool_conf': ['*pool'],
'real_pools': ['fake_pool', 'NAS_Pool', 'Perf_POOL'],
'matched_pool': {'fake_pool'}},
{'pool_conf': ['nas_pool'],
'real_pools': ['fake_pool', 'nas_pool', 'perf_pool'],
'matched_pool': {'nas_pool'}})
@ddt.unpack
def test__get_managed_storage_pools(self, pool_conf, real_pools,
matched_pool):
with mock.patch.object(object_manager.StoragePool,
'get_all',
mock.Mock(return_value=('ok', real_pools))):
pool = self.connection._get_managed_storage_pools(pool_conf)
self.assertEqual(matched_pool, pool)
def test__get_managed_storage_pools_failed_to_get_pool_info(self):
hook = utils.RequestSideEffect()
hook.append(self.pool.resp_get_error())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
pool_conf = fakes.FakeData.pool_name
self.assertRaises(exception.EMCVmaxXMLAPIError,
self.connection._get_managed_storage_pools,
pool_conf)
expected_calls = [mock.call(self.pool.req_get())]
xml_req_mock.assert_has_calls(expected_calls)
@ddt.data(
{'pool_conf': ['fake_*'],
'real_pools': ['nas_pool', 'Perf_Pool']},
{'pool_conf': ['*pool'],
'real_pools': ['NAS_Pool', 'Perf_POOL']},
{'pool_conf': ['nas_pool'],
'real_pools': ['fake_pool', 'perf_pool']},
)
@ddt.unpack
def test__get_managed_storage_pools_without_matched_pool(self, pool_conf,
real_pools):
with mock.patch.object(object_manager.StoragePool,
'get_all',
mock.Mock(return_value=('ok', real_pools))):
self.assertRaises(exception.InvalidParameterValue,
self.connection._get_managed_storage_pools,
pool_conf)
def test_create_cifs_share(self):
share_server = fakes.SHARE_SERVER
share = fakes.CIFS_SHARE
hook = utils.RequestSideEffect()
hook.append(self.vdm.resp_get_succeed())
hook.append(self.cifs_server.resp_get_succeed(
mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True))
hook.append(self.pool.resp_get_succeed())
hook.append(self.fs.resp_task_succeed())
hook.append(self.cifs_share.resp_task_succeed())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
ssh_hook = utils.SSHSideEffect()
ssh_hook.append()
ssh_cmd_mock = mock.Mock(side_effect=ssh_hook)
self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock
location = self.connection.create_share(None, share, share_server)
expected_calls = [
mock.call(self.vdm.req_get()),
mock.call(self.cifs_server.req_get(self.vdm.vdm_id)),
mock.call(self.pool.req_get()),
mock.call(self.fs.req_create_on_vdm()),
mock.call(self.cifs_share.req_create(self.vdm.vdm_id)),
]
xml_req_mock.assert_has_calls(expected_calls)
ssh_calls = [mock.call(self.cifs_share.cmd_disable_access(), True)]
ssh_cmd_mock.assert_has_calls(ssh_calls)
self.assertEqual([r'\\192.168.1.1\%s' % share['name']], location,
'CIFS export path is incorrect')
def test_create_nfs_share(self):
share_server = fakes.SHARE_SERVER
share = fakes.NFS_SHARE
hook = utils.RequestSideEffect()
hook.append(self.pool.resp_get_succeed())
hook.append(self.vdm.resp_get_succeed())
hook.append(self.fs.resp_task_succeed())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
ssh_hook = utils.SSHSideEffect()
ssh_hook.append(self.nfs_share.output_create())
ssh_cmd_mock = mock.Mock(side_effect=ssh_hook)
self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock
location = self.connection.create_share(None, share, share_server)
expected_calls = [
mock.call(self.pool.req_get()),
mock.call(self.vdm.req_get()),
mock.call(self.fs.req_create_on_vdm()),
]
xml_req_mock.assert_has_calls(expected_calls)
ssh_calls = [mock.call(self.nfs_share.cmd_create(), True)]
ssh_cmd_mock.assert_has_calls(ssh_calls)
self.assertEqual('192.168.1.2:/%s' % share['name'], location,
'NFS export path is incorrect')
def test_create_cifs_share_without_share_server(self):
share = fakes.CIFS_SHARE
self.assertRaises(exception.InvalidInput,
self.connection.create_share,
None, share, None)
def test_create_cifs_share_without_share_server_name(self):
share = fakes.CIFS_SHARE
share_server = copy.deepcopy(fakes.SHARE_SERVER)
share_server['backend_details']['share_server_name'] = None
self.assertRaises(exception.EMCVmaxXMLAPIError,
self.connection.create_share,
None, share, share_server)
def test_create_cifs_share_with_invalide_cifs_server_name(self):
share_server = fakes.SHARE_SERVER
share = fakes.CIFS_SHARE
hook = utils.RequestSideEffect()
hook.append(self.vdm.resp_get_succeed())
hook.append(self.cifs_server.resp_get_error())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
self.assertRaises(exception.EMCVmaxXMLAPIError,
self.connection.create_share,
None, share, share_server)
expected_calls = [
mock.call(self.vdm.req_get()),
mock.call(self.cifs_server.req_get(self.vdm.vdm_id)),
]
xml_req_mock.assert_has_calls(expected_calls)
def test_create_cifs_share_without_interface_in_cifs_server(self):
share_server = fakes.SHARE_SERVER
share = fakes.CIFS_SHARE
hook = utils.RequestSideEffect()
hook.append(self.vdm.resp_get_succeed())
hook.append(self.cifs_server.resp_get_without_interface(
mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True))
hook.append(self.pool.resp_get_succeed())
hook.append(self.fs.resp_task_succeed())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
self.assertRaises(exception.EMCVmaxXMLAPIError,
self.connection.create_share,
None, share, share_server)
expected_calls = [
mock.call(self.vdm.req_get()),
mock.call(self.cifs_server.req_get(self.vdm.vdm_id)),
mock.call(self.pool.req_get()),
mock.call(self.fs.req_create_on_vdm()),
]
xml_req_mock.assert_has_calls(expected_calls)
def test_create_cifs_share_without_pool_name(self):
share_server = fakes.SHARE_SERVER
share = fake_share.fake_share(host='HostA@BackendB',
share_proto='CIFS')
self.assertRaises(exception.InvalidHost,
self.connection.create_share,
None, share, share_server)
def test_create_cifs_share_from_snapshot(self):
share_server = fakes.SHARE_SERVER
share = fakes.CIFS_SHARE
snapshot = fake_share.fake_snapshot(
name=fakes.FakeData.src_snap_name,
share_name=fakes.FakeData.src_share_name,
share_id=fakes.FakeData.src_share_name,
id=fakes.FakeData.src_snap_name)
hook = utils.RequestSideEffect()
hook.append(self.fs.resp_get_succeed())
hook.append(self.vdm.resp_get_succeed())
hook.append(self.cifs_server.resp_get_succeed(
mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True))
hook.append(self.cifs_share.resp_task_succeed())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
ssh_hook = utils.SSHSideEffect()
ssh_hook.append(self.mover.output_get_interconnect_id())
ssh_hook.append()
ssh_hook.append()
ssh_hook.append(self.fs.output_copy_ckpt)
ssh_hook.append(self.fs.output_info())
ssh_hook.append()
ssh_hook.append()
ssh_hook.append()
ssh_hook.append()
ssh_cmd_mock = mock.Mock(side_effect=ssh_hook)
self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock
location = self.connection.create_share_from_snapshot(
None, share, snapshot, share_server)
expected_calls = [
mock.call(self.fs.req_get()),
mock.call(self.vdm.req_get()),
mock.call(self.cifs_server.req_get(self.vdm.vdm_id)),
mock.call(self.cifs_share.req_create(self.vdm.vdm_id)),
]
xml_req_mock.assert_has_calls(expected_calls)
ssh_calls = [
mock.call(self.mover.cmd_get_interconnect_id(), False),
mock.call(self.fs.cmd_create_from_ckpt(), False),
mock.call(self.mount.cmd_server_mount('ro'), False),
mock.call(self.fs.cmd_copy_ckpt(), True),
mock.call(self.fs.cmd_nas_fs_info(), False),
mock.call(self.mount.cmd_server_umount(), False),
mock.call(self.fs.cmd_delete(), False),
mock.call(self.mount.cmd_server_mount('rw'), False),
mock.call(self.cifs_share.cmd_disable_access(), True),
]
ssh_cmd_mock.assert_has_calls(ssh_calls)
self.assertEqual([r'\\192.168.1.1\%s' % share['name']], location,
'CIFS export path is incorrect')
def test_create_nfs_share_from_snapshot(self):
share_server = fakes.SHARE_SERVER
share = fakes.NFS_SHARE
snapshot = fake_share.fake_snapshot(
name=fakes.FakeData.src_snap_name,
share_name=fakes.FakeData.src_share_name,
share_id=fakes.FakeData.src_share_name,
id=fakes.FakeData.src_snap_name)
hook = utils.RequestSideEffect()
hook.append(self.fs.resp_get_succeed())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
ssh_hook = utils.SSHSideEffect()
ssh_hook.append(self.mover.output_get_interconnect_id())
ssh_hook.append()
ssh_hook.append()
ssh_hook.append(self.fs.output_copy_ckpt)
ssh_hook.append(self.fs.output_info())
ssh_hook.append()
ssh_hook.append()
ssh_hook.append()
ssh_hook.append(self.nfs_share.output_create())
ssh_cmd_mock = mock.Mock(side_effect=ssh_hook)
self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock
location = self.connection.create_share_from_snapshot(
None, share, snapshot, share_server)
expected_calls = [mock.call(self.fs.req_get())]
xml_req_mock.assert_has_calls(expected_calls)
ssh_calls = [
mock.call(self.mover.cmd_get_interconnect_id(), False),
mock.call(self.fs.cmd_create_from_ckpt(), False),
mock.call(self.mount.cmd_server_mount('ro'), False),
mock.call(self.fs.cmd_copy_ckpt(), True),
mock.call(self.fs.cmd_nas_fs_info(), False),
mock.call(self.mount.cmd_server_umount(), False),
mock.call(self.fs.cmd_delete(), False),
mock.call(self.mount.cmd_server_mount('rw'), False),
mock.call(self.nfs_share.cmd_create(), True)
]
ssh_cmd_mock.assert_has_calls(ssh_calls)
self.assertEqual('192.168.1.2:/%s' % share['name'], location,
'NFS export path is incorrect')
def test_create_share_with_incorrect_proto(self):
share_server = fakes.SHARE_SERVER
share = fake_share.fake_share(share_proto='FAKE_PROTO')
self.assertRaises(exception.InvalidShare,
self.connection.create_share,
context=None,
share=share,
share_server=share_server)
def test_create_share_from_snapshot_with_incorrect_proto(self):
share_server = fakes.SHARE_SERVER
share = fake_share.fake_share(share_proto='FAKE_PROTO')
snapshot = fake_share.fake_snapshot()
self.assertRaises(exception.InvalidShare,
self.connection.create_share_from_snapshot,
None, share, snapshot, share_server)
def test_create_share_from_snapshot_without_pool_name(self):
share_server = fakes.SHARE_SERVER
share = fake_share.fake_share(host='HostA@BackendB',
share_proto='CIFS')
snapshot = fake_share.fake_snapshot()
self.assertRaises(exception.InvalidHost,
self.connection.create_share_from_snapshot,
None, share, snapshot, share_server)
def test_delete_cifs_share(self):
share_server = fakes.SHARE_SERVER
share = fakes.CIFS_SHARE
hook = utils.RequestSideEffect()
hook.append(self.cifs_share.resp_get_succeed(self.vdm.vdm_id))
hook.append(self.vdm.resp_get_succeed())
hook.append(self.cifs_share.resp_task_succeed())
hook.append(self.mount.resp_task_succeed())
hook.append(self.fs.resp_get_succeed())
hook.append(self.fs.resp_task_succeed())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
self.connection.delete_share(None, share, share_server)
expected_calls = [
mock.call(self.cifs_share.req_get()),
mock.call(self.vdm.req_get()),
mock.call(self.cifs_share.req_delete(self.vdm.vdm_id)),
mock.call(self.mount.req_delete(self.vdm.vdm_id)),
mock.call(self.fs.req_get()),
mock.call(self.fs.req_delete()),
]
xml_req_mock.assert_has_calls(expected_calls)
def test_delete_nfs_share(self):
share_server = fakes.SHARE_SERVER
share = fakes.NFS_SHARE
hook = utils.RequestSideEffect()
hook.append(self.vdm.resp_get_succeed())
hook.append(self.mount.resp_task_succeed())
hook.append(self.fs.resp_get_succeed())
hook.append(self.fs.resp_task_succeed())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
ssh_hook = utils.SSHSideEffect()
ssh_hook.append(self.nfs_share.output_get_succeed(
rw_hosts=self.nfs_share.rw_hosts,
ro_hosts=self.nfs_share.ro_hosts))
ssh_hook.append(self.nfs_share.output_delete_succeed())
ssh_cmd_mock = mock.Mock(side_effect=ssh_hook)
self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock
self.connection.delete_share(None, share, share_server)
expected_calls = [
mock.call(self.vdm.req_get()),
mock.call(self.mount.req_delete(self.vdm.vdm_id)),
mock.call(self.fs.req_get()),
mock.call(self.fs.req_delete()),
]
xml_req_mock.assert_has_calls(expected_calls)
ssh_calls = [
mock.call(self.nfs_share.cmd_get(), False),
mock.call(self.nfs_share.cmd_delete(), True),
]
ssh_cmd_mock.assert_has_calls(ssh_calls)
def test_delete_share_without_share_server(self):
share = fakes.CIFS_SHARE
self.connection.delete_share(None, share)
def test_delete_share_with_incorrect_proto(self):
share_server = fakes.SHARE_SERVER
share = fake_share.fake_share(share_proto='FAKE_PROTO')
self.assertRaises(exception.InvalidShare,
self.connection.delete_share,
context=None,
share=share,
share_server=share_server)
def test_delete_cifs_share_with_nonexistent_mount_and_filesystem(self):
share_server = fakes.SHARE_SERVER
share = fakes.CIFS_SHARE
hook = utils.RequestSideEffect()
hook.append(self.cifs_share.resp_get_succeed(self.vdm.vdm_id))
hook.append(self.vdm.resp_get_succeed())
hook.append(self.cifs_share.resp_task_succeed())
hook.append(self.mount.resp_task_error())
hook.append(self.fs.resp_get_succeed())
hook.append(self.fs.resp_task_error())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
self.connection.delete_share(None, share, share_server)
expected_calls = [
mock.call(self.cifs_share.req_get()),
mock.call(self.vdm.req_get()),
mock.call(self.cifs_share.req_delete(self.vdm.vdm_id)),
mock.call(self.mount.req_delete(self.vdm.vdm_id)),
mock.call(self.fs.req_get()),
mock.call(self.fs.req_delete()),
]
xml_req_mock.assert_has_calls(expected_calls)
def test_extend_share(self):
share_server = fakes.SHARE_SERVER
share = fakes.CIFS_SHARE
new_size = fakes.FakeData.new_size
hook = utils.RequestSideEffect()
hook.append(self.fs.resp_get_succeed())
hook.append(self.pool.resp_get_succeed())
hook.append(self.fs.resp_task_succeed())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
self.connection.extend_share(share, new_size, share_server)
expected_calls = [
mock.call(self.fs.req_get()),
mock.call(self.pool.req_get()),
mock.call(self.fs.req_extend()),
]
xml_req_mock.assert_has_calls(expected_calls)
def test_extend_share_without_pool_name(self):
share_server = fakes.SHARE_SERVER
share = fake_share.fake_share(host='HostA@BackendB',
share_proto='CIFS')
new_size = fakes.FakeData.new_size
self.assertRaises(exception.InvalidHost,
self.connection.extend_share,
share, new_size, share_server)
def test_create_snapshot(self):
share_server = fakes.SHARE_SERVER
snapshot = fake_share.fake_snapshot(
id=fakes.FakeData.snapshot_name,
share_id=fakes.FakeData.filesystem_name,
share_name=fakes.FakeData.share_name)
hook = utils.RequestSideEffect()
hook.append(self.fs.resp_get_succeed())
hook.append(self.snap.resp_task_succeed())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
self.connection.create_snapshot(None, snapshot, share_server)
expected_calls = [
mock.call(self.fs.req_get()),
mock.call(self.snap.req_create()),
]
xml_req_mock.assert_has_calls(expected_calls)
def test_create_snapshot_with_incorrect_share_info(self):
share_server = fakes.SHARE_SERVER
snapshot = fake_share.fake_snapshot(
id=fakes.FakeData.snapshot_name,
share_id=fakes.FakeData.filesystem_name,
share_name=fakes.FakeData.share_name)
hook = utils.RequestSideEffect()
hook.append(self.fs.resp_get_but_not_found())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
self.assertRaises(exception.EMCVmaxXMLAPIError,
self.connection.create_snapshot,
None, snapshot, share_server)
expected_calls = [mock.call(self.fs.req_get())]
xml_req_mock.assert_has_calls(expected_calls)
def test_delete_snapshot(self):
share_server = fakes.SHARE_SERVER
snapshot = fake_share.fake_snapshot(
id=fakes.FakeData.snapshot_name,
share_id=fakes.FakeData.filesystem_name,
share_name=fakes.FakeData.share_name)
hook = utils.RequestSideEffect()
hook.append(self.snap.resp_get_succeed())
hook.append(self.snap.resp_task_succeed())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
self.connection.delete_snapshot(None, snapshot, share_server)
expected_calls = [
mock.call(self.snap.req_get()),
mock.call(self.snap.req_delete()),
]
xml_req_mock.assert_has_calls(expected_calls)
@utils.patch_get_managed_ports(return_value=['cge-1-0'])
def test_setup_server(self):
hook = utils.RequestSideEffect()
hook.append(self.vdm.resp_get_but_not_found())
hook.append(self.mover.resp_get_ref_succeed())
hook.append(self.vdm.resp_task_succeed())
hook.append(self.mover.resp_task_succeed())
hook.append(self.mover.resp_task_succeed())
hook.append(self.dns.resp_task_succeed())
hook.append(self.vdm.resp_get_succeed())
hook.append(self.cifs_server.resp_task_succeed())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
ssh_hook = utils.SSHSideEffect()
ssh_hook.append()
ssh_cmd_mock = mock.Mock(side_effect=ssh_hook)
self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock
self.connection.setup_server(fakes.NETWORK_INFO, None)
if_name_1 = fakes.FakeData.network_allocations_id1[-12:]
if_name_2 = fakes.FakeData.network_allocations_id2[-12:]
expected_calls = [
mock.call(self.vdm.req_get()),
mock.call(self.mover.req_get_ref()),
mock.call(self.vdm.req_create()),
mock.call(self.mover.req_create_interface(
if_name=if_name_1,
ip=fakes.FakeData.network_allocations_ip1)),
mock.call(self.mover.req_create_interface(
if_name=if_name_2,
ip=fakes.FakeData.network_allocations_ip2)),
mock.call(self.dns.req_create()),
mock.call(self.vdm.req_get()),
mock.call(self.cifs_server.req_create(self.vdm.vdm_id)),
]
xml_req_mock.assert_has_calls(expected_calls)
ssh_calls = [
mock.call(self.vdm.cmd_attach_nfs_interface(), False),
]
ssh_cmd_mock.assert_has_calls(ssh_calls)
@utils.patch_get_managed_ports(return_value=['cge-1-0'])
def test_setup_server_with_existing_vdm(self):
hook = utils.RequestSideEffect()
hook.append(self.vdm.resp_get_succeed())
hook.append(self.mover.resp_get_ref_succeed())
hook.append(self.mover.resp_task_succeed())
hook.append(self.mover.resp_task_succeed())
hook.append(self.dns.resp_task_succeed())
hook.append(self.cifs_server.resp_task_succeed())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
ssh_hook = utils.SSHSideEffect()
ssh_hook.append()
ssh_cmd_mock = mock.Mock(side_effect=ssh_hook)
self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock
self.connection.setup_server(fakes.NETWORK_INFO, None)
if_name_1 = fakes.FakeData.network_allocations_id1[-12:]
if_name_2 = fakes.FakeData.network_allocations_id2[-12:]
expected_calls = [
mock.call(self.vdm.req_get()),
mock.call(self.mover.req_get_ref()),
mock.call(self.mover.req_create_interface(
if_name=if_name_1,
ip=fakes.FakeData.network_allocations_ip1)),
mock.call(self.mover.req_create_interface(
if_name=if_name_2,
ip=fakes.FakeData.network_allocations_ip2)),
mock.call(self.dns.req_create()),
mock.call(self.cifs_server.req_create(self.vdm.vdm_id)),
]
xml_req_mock.assert_has_calls(expected_calls)
ssh_calls = [
mock.call(self.vdm.cmd_attach_nfs_interface(), False),
]
ssh_cmd_mock.assert_has_calls(ssh_calls)
def test_setup_server_with_invalid_security_service(self):
network_info = copy.deepcopy(fakes.NETWORK_INFO)
network_info['security_services'][0]['type'] = 'fake_type'
self.assertRaises(exception.EMCVmaxXMLAPIError,
self.connection.setup_server,
network_info, None)
@utils.patch_get_managed_ports(
side_effect=exception.EMCVmaxXMLAPIError(
err="Get managed ports fail."))
def test_setup_server_without_valid_physical_device(self):
hook = utils.RequestSideEffect()
hook.append(self.vdm.resp_get_but_not_found())
hook.append(self.mover.resp_get_ref_succeed())
hook.append(self.vdm.resp_task_succeed())
hook.append(self.vdm.resp_get_succeed())
hook.append(self.cifs_server.resp_get_without_value())
hook.append(self.vdm.resp_task_succeed())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
ssh_hook = utils.SSHSideEffect()
ssh_hook.append(self.vdm.output_get_interfaces(nfs_interface=''))
ssh_cmd_mock = mock.Mock(side_effect=ssh_hook)
self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock
self.assertRaises(exception.EMCVmaxXMLAPIError,
self.connection.setup_server,
fakes.NETWORK_INFO, None)
expected_calls = [
mock.call(self.vdm.req_get()),
mock.call(self.mover.req_get_ref()),
mock.call(self.vdm.req_create()),
mock.call(self.vdm.req_get()),
mock.call(self.cifs_server.req_get(self.vdm.vdm_id)),
mock.call(self.vdm.req_delete()),
]
xml_req_mock.assert_has_calls(expected_calls)
ssh_calls = [
mock.call(self.vdm.cmd_get_interfaces(), False),
]
ssh_cmd_mock.assert_has_calls(ssh_calls)
@utils.patch_get_managed_ports(return_value=['cge-1-0'])
def test_setup_server_with_exception(self):
hook = utils.RequestSideEffect()
hook.append(self.vdm.resp_get_but_not_found())
hook.append(self.mover.resp_get_ref_succeed())
hook.append(self.vdm.resp_task_succeed())
hook.append(self.mover.resp_task_succeed())
hook.append(self.mover.resp_task_error())
hook.append(self.vdm.resp_get_succeed())
hook.append(self.cifs_server.resp_get_without_value())
hook.append(self.mover.resp_task_succeed())
hook.append(self.vdm.resp_task_succeed())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
ssh_hook = utils.SSHSideEffect()
ssh_hook.append(self.vdm.output_get_interfaces(nfs_interface=''))
ssh_cmd_mock = mock.Mock(side_effect=ssh_hook)
self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock
self.assertRaises(exception.EMCVmaxXMLAPIError,
self.connection.setup_server,
fakes.NETWORK_INFO, None)
if_name_1 = fakes.FakeData.network_allocations_id1[-12:]
if_name_2 = fakes.FakeData.network_allocations_id2[-12:]
expected_calls = [
mock.call(self.vdm.req_get()),
mock.call(self.mover.req_get_ref()),
mock.call(self.vdm.req_create()),
mock.call(self.mover.req_create_interface(
if_name=if_name_1,
ip=fakes.FakeData.network_allocations_ip1)),
mock.call(self.mover.req_create_interface(
if_name=if_name_2,
ip=fakes.FakeData.network_allocations_ip2)),
mock.call(self.vdm.req_get()),
mock.call(self.cifs_server.req_get(self.vdm.vdm_id)),
mock.call(self.mover.req_delete_interface(
fakes.FakeData.network_allocations_ip1)),
mock.call(self.vdm.req_delete()),
]
xml_req_mock.assert_has_calls(expected_calls)
ssh_calls = [
mock.call(self.vdm.cmd_get_interfaces(), False),
]
ssh_cmd_mock.assert_has_calls(ssh_calls)
def test_teardown_server(self):
hook = utils.RequestSideEffect()
hook.append(self.vdm.resp_get_succeed())
hook.append(self.cifs_server.resp_get_succeed(
mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True))
hook.append(self.cifs_server.resp_task_succeed())
hook.append(self.cifs_server.resp_get_succeed(
mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=False))
hook.append(self.mover.resp_get_ref_succeed())
hook.append(self.mover.resp_task_succeed())
hook.append(self.mover.resp_task_succeed())
hook.append(self.vdm.resp_task_succeed())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
ssh_hook = utils.SSHSideEffect()
ssh_hook.append(self.vdm.output_get_interfaces())
ssh_hook.append()
ssh_cmd_mock = mock.Mock(side_effect=ssh_hook)
self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock
self.connection.teardown_server(fakes.SERVER_DETAIL,
fakes.SECURITY_SERVICE)
expected_calls = [
mock.call(self.vdm.req_get()),
mock.call(self.cifs_server.req_get(self.vdm.vdm_id)),
mock.call(self.cifs_server.req_modify(
mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=False)),
mock.call(self.cifs_server.req_delete(self.vdm.vdm_id)),
mock.call(self.mover.req_get_ref()),
mock.call(self.mover.req_delete_interface(
fakes.FakeData.network_allocations_ip1)),
mock.call(self.mover.req_delete_interface(
fakes.FakeData.network_allocations_ip2)),
mock.call(self.vdm.req_delete()),
]
xml_req_mock.assert_has_calls(expected_calls)
ssh_calls = [
mock.call(self.vdm.cmd_get_interfaces(), False),
mock.call(self.vdm.cmd_detach_nfs_interface(), True),
]
ssh_cmd_mock.assert_has_calls(ssh_calls)
def test_teardown_server_without_server_detail(self):
self.connection.teardown_server(None, fakes.SECURITY_SERVICE)
def test_teardown_server_without_security_services(self):
hook = utils.RequestSideEffect()
hook.append(self.vdm.resp_get_succeed())
hook.append(self.mover.resp_get_ref_succeed())
hook.append(self.mover.resp_task_succeed())
hook.append(self.mover.resp_task_succeed())
hook.append(self.vdm.resp_task_succeed())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
ssh_hook = utils.SSHSideEffect()
ssh_hook.append(self.vdm.output_get_interfaces())
ssh_hook.append()
ssh_cmd_mock = mock.Mock(side_effect=ssh_hook)
self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock
self.connection.teardown_server(fakes.SERVER_DETAIL, [])
expected_calls = [
mock.call(self.vdm.req_get()),
mock.call(self.mover.req_get_ref()),
mock.call(self.mover.req_delete_interface(
fakes.FakeData.network_allocations_ip1)),
mock.call(self.mover.req_delete_interface(
fakes.FakeData.network_allocations_ip2)),
mock.call(self.vdm.req_delete()),
]
xml_req_mock.assert_has_calls(expected_calls)
ssh_calls = [
mock.call(self.vdm.cmd_get_interfaces(), False),
mock.call(self.vdm.cmd_detach_nfs_interface(), True),
]
ssh_cmd_mock.assert_has_calls(ssh_calls)
def test_teardown_server_without_share_server_name_in_server_detail(self):
server_detail = {
'cifs_if': fakes.FakeData.network_allocations_ip1,
'nfs_if': fakes.FakeData.network_allocations_ip2,
}
self.connection.teardown_server(server_detail, fakes.SECURITY_SERVICE)
def test_teardown_server_with_invalid_server_name(self):
hook = utils.RequestSideEffect()
hook.append(self.vdm.resp_get_error())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
self.connection.teardown_server(fakes.SERVER_DETAIL,
fakes.SECURITY_SERVICE)
expected_calls = [mock.call(self.vdm.req_get())]
xml_req_mock.assert_has_calls(expected_calls)
def test_teardown_server_without_cifs_server(self):
hook = utils.RequestSideEffect()
hook.append(self.vdm.resp_get_succeed())
hook.append(self.cifs_server.resp_get_error())
hook.append(self.mover.resp_get_ref_succeed())
hook.append(self.cifs_server.resp_task_succeed())
hook.append(self.cifs_server.resp_get_succeed(
mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=False))
hook.append(self.mover.resp_task_succeed())
hook.append(self.mover.resp_task_succeed())
hook.append(self.vdm.resp_task_succeed())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
ssh_hook = utils.SSHSideEffect()
ssh_hook.append(self.vdm.output_get_interfaces())
ssh_hook.append()
ssh_cmd_mock = mock.Mock(side_effect=ssh_hook)
self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock
self.connection.teardown_server(fakes.SERVER_DETAIL,
fakes.SECURITY_SERVICE)
expected_calls = [
mock.call(self.vdm.req_get()),
mock.call(self.cifs_server.req_get(self.vdm.vdm_id)),
mock.call(self.mover.req_get_ref()),
mock.call(self.mover.req_delete_interface(
fakes.FakeData.network_allocations_ip1)),
mock.call(self.mover.req_delete_interface(
fakes.FakeData.network_allocations_ip2)),
mock.call(self.vdm.req_delete()),
]
xml_req_mock.assert_has_calls(expected_calls)
ssh_calls = [
mock.call(self.vdm.cmd_get_interfaces(), False),
mock.call(self.vdm.cmd_detach_nfs_interface(), True),
]
ssh_cmd_mock.assert_has_calls(ssh_calls)
def test_teardown_server_with_invalid_cifs_server_modification(self):
hook = utils.RequestSideEffect()
hook.append(self.vdm.resp_get_succeed())
hook.append(self.cifs_server.resp_get_succeed(
mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True))
hook.append(self.cifs_server.resp_task_error())
hook.append(self.cifs_server.resp_task_succeed())
hook.append(self.mover.resp_get_ref_succeed())
hook.append(self.mover.resp_task_succeed())
hook.append(self.mover.resp_task_succeed())
hook.append(self.vdm.resp_task_succeed())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
ssh_hook = utils.SSHSideEffect()
ssh_hook.append(self.vdm.output_get_interfaces())
ssh_hook.append()
ssh_cmd_mock = mock.Mock(side_effect=ssh_hook)
self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock
self.connection.teardown_server(fakes.SERVER_DETAIL,
fakes.SECURITY_SERVICE)
expected_calls = [
mock.call(self.vdm.req_get()),
mock.call(self.cifs_server.req_get(self.vdm.vdm_id)),
mock.call(self.cifs_server.req_modify(self.vdm.vdm_id)),
mock.call(self.cifs_server.req_delete(self.vdm.vdm_id)),
mock.call(self.mover.req_get_ref()),
mock.call(self.mover.req_delete_interface(
fakes.FakeData.network_allocations_ip1)),
mock.call(self.mover.req_delete_interface(
fakes.FakeData.network_allocations_ip2)),
mock.call(self.vdm.req_delete()),
]
xml_req_mock.assert_has_calls(expected_calls)
ssh_calls = [
mock.call(self.vdm.cmd_get_interfaces(), False),
mock.call(self.vdm.cmd_detach_nfs_interface(), True),
]
ssh_cmd_mock.assert_has_calls(ssh_calls)
def test_update_access_add_cifs_rw(self):
share_server = fakes.SHARE_SERVER
share = fakes.CIFS_SHARE
access = fakes.CIFS_RW_ACCESS
hook = utils.RequestSideEffect()
hook.append(self.vdm.resp_get_succeed())
hook.append(self.cifs_server.resp_get_succeed(
mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True))
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
ssh_hook = utils.SSHSideEffect()
ssh_hook.append(self.cifs_share.output_allow_access())
ssh_cmd_mock = mock.Mock(side_effect=ssh_hook)
self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock
self.connection.update_access(None, share, [], [access], [],
share_server=share_server)
expected_calls = [
mock.call(self.vdm.req_get()),
mock.call(self.cifs_server.req_get(self.vdm.vdm_id)),
]
xml_req_mock.assert_has_calls(expected_calls)
ssh_calls = [
mock.call(self.cifs_share.cmd_change_access(), True),
]
ssh_cmd_mock.assert_has_calls(ssh_calls)
def test_update_access_deny_nfs(self):
share_server = fakes.SHARE_SERVER
share = fakes.NFS_SHARE
access = fakes.NFS_RW_ACCESS
rw_hosts = copy.deepcopy(fakes.FakeData.rw_hosts)
rw_hosts.append(access['access_to'])
ssh_hook = utils.SSHSideEffect()
ssh_hook.append(self.nfs_share.output_get_succeed(
rw_hosts=rw_hosts,
ro_hosts=fakes.FakeData.ro_hosts))
ssh_hook.append(self.nfs_share.output_set_access_success())
ssh_hook.append(self.nfs_share.output_get_succeed(
rw_hosts=fakes.FakeData.rw_hosts,
ro_hosts=fakes.FakeData.ro_hosts))
ssh_cmd_mock = utils.EMCNFSShareMock(side_effect=ssh_hook)
self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock
self.connection.update_access(None, share, [], [], [access],
share_server=share_server)
ssh_calls = [
mock.call(self.nfs_share.cmd_get(), True),
mock.call(self.nfs_share.cmd_set_access(
rw_hosts=self.nfs_share.rw_hosts,
ro_hosts=self.nfs_share.ro_hosts), True),
mock.call(self.nfs_share.cmd_get(), True),
]
ssh_cmd_mock.assert_has_calls(ssh_calls)
def test_update_access_recover_nfs_rule(self):
share_server = fakes.SHARE_SERVER
share = fakes.NFS_SHARE
access = fakes.NFS_RW_ACCESS
hosts = ['192.168.1.5']
rw_hosts = copy.deepcopy(fakes.FakeData.rw_hosts)
rw_hosts.append(access['access_to'])
ssh_hook = utils.SSHSideEffect()
ssh_hook.append(self.nfs_share.output_get_succeed(
rw_hosts=rw_hosts,
ro_hosts=fakes.FakeData.ro_hosts))
ssh_hook.append(self.nfs_share.output_set_access_success())
ssh_hook.append(self.nfs_share.output_get_succeed(
rw_hosts=hosts,
ro_hosts=[]))
ssh_cmd_mock = utils.EMCNFSShareMock(side_effect=ssh_hook)
self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock
self.connection.update_access(None, share, [access], [], [],
share_server=share_server)
ssh_calls = [
mock.call(self.nfs_share.cmd_get(), True),
mock.call(self.nfs_share.cmd_set_access(
rw_hosts=hosts,
ro_hosts=[]), True),
mock.call(self.nfs_share.cmd_get(), True),
]
ssh_cmd_mock.assert_has_calls(ssh_calls)
def test_update_access_recover_cifs_rule(self):
share_server = fakes.SHARE_SERVER
share = fakes.CIFS_SHARE
access = fakes.CIFS_RW_ACCESS
hook = utils.RequestSideEffect()
hook.append(self.vdm.resp_get_succeed())
hook.append(self.cifs_server.resp_get_succeed(
mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True))
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
ssh_hook = utils.SSHSideEffect()
ssh_hook.append(self.cifs_share.output_allow_access())
ssh_hook.append(fakes.FakeData.cifs_access)
ssh_hook.append('Command succeeded')
ssh_cmd_mock = mock.Mock(side_effect=ssh_hook)
self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock
self.connection.update_access(None, share, [access], [], [],
share_server=share_server)
expected_calls = [
mock.call(self.vdm.req_get()),
mock.call(self.cifs_server.req_get(self.vdm.vdm_id)),
]
xml_req_mock.assert_has_calls(expected_calls)
ssh_calls = [
mock.call(self.cifs_share.cmd_change_access(), True),
mock.call(self.cifs_share.cmd_get_access(), True),
mock.call(self.cifs_share.cmd_change_access(
action='revoke', user='guest'), True),
]
ssh_cmd_mock.assert_has_calls(ssh_calls)
def test_cifs_clear_access_server_not_found(self):
server = fakes.SHARE_SERVER
hook = utils.RequestSideEffect()
hook.append(self.vdm.resp_get_succeed())
hook.append(self.cifs_server.resp_get_succeed(
mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True,
cifs_server_name='cifs_server_name'))
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
self.assertRaises(exception.EMCVmaxXMLAPIError,
self.connection._cifs_clear_access,
'share_name', server, None)
expected_calls = [
mock.call(self.vdm.req_get()),
mock.call(self.cifs_server.req_get(self.vdm.vdm_id)),
]
xml_req_mock.assert_has_calls(expected_calls)
def test_allow_cifs_rw_access(self):
share_server = fakes.SHARE_SERVER
share = fakes.CIFS_SHARE
access = fakes.CIFS_RW_ACCESS
hook = utils.RequestSideEffect()
hook.append(self.vdm.resp_get_succeed())
hook.append(self.cifs_server.resp_get_succeed(
mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True))
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
ssh_hook = utils.SSHSideEffect()
ssh_hook.append(self.cifs_share.output_allow_access())
ssh_cmd_mock = mock.Mock(side_effect=ssh_hook)
self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock
self.connection.allow_access(None, share, access, share_server)
expected_calls = [
mock.call(self.vdm.req_get()),
mock.call(self.cifs_server.req_get(self.vdm.vdm_id)),
]
xml_req_mock.assert_has_calls(expected_calls)
ssh_calls = [
mock.call(self.cifs_share.cmd_change_access(), True),
]
ssh_cmd_mock.assert_has_calls(ssh_calls)
def test_allow_cifs_ro_access(self):
share_server = fakes.SHARE_SERVER
share = fakes.CIFS_SHARE
access = fakes.CIFS_RO_ACCESS
hook = utils.RequestSideEffect()
hook.append(self.vdm.resp_get_succeed())
hook.append(self.cifs_server.resp_get_succeed(
mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True))
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
ssh_hook = utils.SSHSideEffect()
ssh_hook.append(self.cifs_share.output_allow_access())
ssh_cmd_mock = mock.Mock(side_effect=ssh_hook)
self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock
self.connection.allow_access(None, share, access, share_server)
expected_calls = [
mock.call(self.vdm.req_get()),
mock.call(self.cifs_server.req_get(self.vdm.vdm_id)),
]
xml_req_mock.assert_has_calls(expected_calls)
ssh_calls = [
mock.call(self.cifs_share.cmd_change_access('ro'), True),
]
ssh_cmd_mock.assert_has_calls(ssh_calls)
def test_allow_ro_access_without_share_server_name(self):
share = fakes.CIFS_SHARE
share_server = copy.deepcopy(fakes.SHARE_SERVER)
share_server['backend_details'].pop('share_server_name')
access = fakes.CIFS_RO_ACCESS
hook = utils.RequestSideEffect()
hook.append(self.vdm.resp_get_succeed())
hook.append(self.cifs_server.resp_get_succeed(
mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True))
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
ssh_hook = utils.SSHSideEffect()
ssh_hook.append(self.cifs_share.output_allow_access())
ssh_cmd_mock = mock.Mock(side_effect=ssh_hook)
self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock
self.connection.allow_access(None, share, access, share_server)
expected_calls = [
mock.call(self.vdm.req_get()),
mock.call(self.cifs_server.req_get(self.vdm.vdm_id)),
]
xml_req_mock.assert_has_calls(expected_calls)
ssh_calls = [
mock.call(self.cifs_share.cmd_change_access('ro'), True),
]
ssh_cmd_mock.assert_has_calls(ssh_calls)
def test_allow_access_with_invalid_access_level(self):
share_server = fakes.SHARE_SERVER
share = fakes.CIFS_SHARE
access = fake_share.fake_access(access_level='fake_level')
self.assertRaises(exception.InvalidShareAccessLevel,
self.connection.allow_access,
None, share, access, share_server)
def test_allow_access_with_invalid_share_server_name(self):
share_server = fakes.SHARE_SERVER
share = fakes.CIFS_SHARE
access = fakes.CIFS_RW_ACCESS
hook = utils.RequestSideEffect()
hook.append(self.vdm.resp_get_succeed())
hook.append(self.cifs_server.resp_get_error())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
self.assertRaises(exception.EMCVmaxXMLAPIError,
self.connection.allow_access,
None, share, access, share_server)
expected_calls = [
mock.call(self.vdm.req_get()),
mock.call(self.cifs_server.req_get(self.vdm.vdm_id)),
]
xml_req_mock.assert_has_calls(expected_calls)
def test_allow_nfs_access(self):
share_server = fakes.SHARE_SERVER
share = fakes.NFS_SHARE
access = fakes.NFS_RW_ACCESS
rw_hosts = copy.deepcopy(fakes.FakeData.rw_hosts)
rw_hosts.append(access['access_to'])
ssh_hook = utils.SSHSideEffect()
ssh_hook.append(self.nfs_share.output_get_succeed(
rw_hosts=fakes.FakeData.rw_hosts,
ro_hosts=fakes.FakeData.ro_hosts))
ssh_hook.append(self.nfs_share.output_set_access_success())
ssh_hook.append(self.nfs_share.output_get_succeed(
rw_hosts=rw_hosts,
ro_hosts=fakes.FakeData.ro_hosts))
ssh_cmd_mock = utils.EMCNFSShareMock(side_effect=ssh_hook)
self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock
self.connection.allow_access(None, share, access, share_server)
ssh_calls = [
mock.call(self.nfs_share.cmd_get(), True),
mock.call(self.nfs_share.cmd_set_access(
rw_hosts=rw_hosts, ro_hosts=self.nfs_share.ro_hosts), True),
mock.call(self.nfs_share.cmd_get(), True),
]
ssh_cmd_mock.assert_has_calls(ssh_calls)
def test_allow_cifs_access_with_incorrect_access_type(self):
share_server = fakes.SHARE_SERVER
share = fakes.CIFS_SHARE
access = fake_share.fake_access(access_type='fake_type')
self.assertRaises(exception.InvalidShareAccess,
self.connection.allow_access,
None, share, access, share_server)
def test_allow_nfs_access_with_incorrect_access_type(self):
share_server = fakes.SHARE_SERVER
share = fakes.NFS_SHARE
access = fake_share.fake_access(access_type='fake_type')
self.assertRaises(exception.InvalidShareAccess,
self.connection.allow_access,
None, share, access, share_server)
def test_allow_access_with_incorrect_proto(self):
share_server = fakes.SHARE_SERVER
share = fake_share.fake_share(share_proto='FAKE_PROTO')
access = fake_share.fake_access()
self.assertRaises(exception.InvalidShare,
self.connection.allow_access,
None, share, access, share_server)
def test_deny_cifs_rw_access(self):
share_server = fakes.SHARE_SERVER
share = fakes.CIFS_SHARE
access = fakes.CIFS_RW_ACCESS
hook = utils.RequestSideEffect()
hook.append(self.vdm.resp_get_succeed())
hook.append(self.cifs_server.resp_get_succeed(
mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True))
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
ssh_hook = utils.SSHSideEffect()
ssh_hook.append(self.cifs_share.output_allow_access())
ssh_cmd_mock = mock.Mock(side_effect=ssh_hook)
self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock
self.connection.deny_access(None, share, access, share_server)
expected_calls = [
mock.call(self.vdm.req_get()),
mock.call(self.cifs_server.req_get(self.vdm.vdm_id)),
]
xml_req_mock.assert_has_calls(expected_calls)
ssh_calls = [
mock.call(self.cifs_share.cmd_change_access(action='revoke'),
True),
]
ssh_cmd_mock.assert_has_calls(ssh_calls)
def test_deny_cifs_ro_access(self):
share_server = fakes.SHARE_SERVER
share = fakes.CIFS_SHARE
access = fakes.CIFS_RO_ACCESS
hook = utils.RequestSideEffect()
hook.append(self.vdm.resp_get_succeed())
hook.append(self.cifs_server.resp_get_succeed(
mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True))
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
ssh_hook = utils.SSHSideEffect()
ssh_hook.append(self.cifs_share.output_allow_access())
ssh_cmd_mock = mock.Mock(side_effect=ssh_hook)
self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock
self.connection.deny_access(None, share, access, share_server)
expected_calls = [
mock.call(self.vdm.req_get()),
mock.call(self.cifs_server.req_get(self.vdm.vdm_id)),
]
xml_req_mock.assert_has_calls(expected_calls)
ssh_calls = [
mock.call(self.cifs_share.cmd_change_access('ro', 'revoke'), True),
]
ssh_cmd_mock.assert_has_calls(ssh_calls)
def test_deny_cifs_access_with_invliad_share_server_name(self):
share_server = fakes.SHARE_SERVER
share = fakes.CIFS_SHARE
access = fakes.CIFS_RW_ACCESS
hook = utils.RequestSideEffect()
hook.append(self.vdm.resp_get_succeed())
hook.append(self.cifs_server.resp_get_error())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
self.assertRaises(exception.EMCVmaxXMLAPIError,
self.connection.deny_access,
None, share, access, share_server)
expected_calls = [
mock.call(self.vdm.req_get()),
mock.call(self.cifs_server.req_get(self.vdm.vdm_id)),
]
xml_req_mock.assert_has_calls(expected_calls)
def test_deny_nfs_access(self):
share_server = fakes.SHARE_SERVER
share = fakes.NFS_SHARE
access = fakes.NFS_RW_ACCESS
rw_hosts = copy.deepcopy(fakes.FakeData.rw_hosts)
rw_hosts.append(access['access_to'])
ssh_hook = utils.SSHSideEffect()
ssh_hook.append(self.nfs_share.output_get_succeed(
rw_hosts=rw_hosts,
ro_hosts=fakes.FakeData.ro_hosts))
ssh_hook.append(self.nfs_share.output_set_access_success())
ssh_hook.append(self.nfs_share.output_get_succeed(
rw_hosts=fakes.FakeData.rw_hosts,
ro_hosts=fakes.FakeData.ro_hosts))
ssh_cmd_mock = utils.EMCNFSShareMock(side_effect=ssh_hook)
self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock
self.connection.deny_access(None, share, access, share_server)
ssh_calls = [
mock.call(self.nfs_share.cmd_get(), True),
mock.call(self.nfs_share.cmd_set_access(
rw_hosts=self.nfs_share.rw_hosts,
ro_hosts=self.nfs_share.ro_hosts), True),
mock.call(self.nfs_share.cmd_get(), True),
]
ssh_cmd_mock.assert_has_calls(ssh_calls)
def test_deny_access_with_incorrect_proto(self):
share_server = fakes.SHARE_SERVER
share = fake_share.fake_share(share_proto='FAKE_PROTO')
access = fakes.CIFS_RW_ACCESS
self.assertRaises(exception.InvalidShare,
self.connection.deny_access,
None, share, access, share_server)
def test_deny_cifs_access_with_incorrect_access_type(self):
share_server = fakes.SHARE_SERVER
share = fakes.CIFS_SHARE
access = fake_share.fake_access(access_type='fake_type')
hook = utils.RequestSideEffect()
hook.append(self.vdm.resp_get_succeed())
hook.append(self.cifs_server.resp_get_succeed(
mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True))
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
ssh_hook = utils.SSHSideEffect()
ssh_hook.append(self.cifs_share.output_allow_access())
ssh_cmd_mock = mock.Mock(side_effect=ssh_hook)
self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock
self.connection.deny_access(None, share, access, share_server)
ssh_calls = []
ssh_cmd_mock.assert_has_calls(ssh_calls)
expected_calls = []
xml_req_mock.assert_has_calls(expected_calls)
def test_deny_nfs_access_with_incorrect_access_type(self):
share_server = fakes.SHARE_SERVER
share = fakes.NFS_SHARE
access = fake_share.fake_access(access_type='fake_type')
rw_hosts = copy.deepcopy(fakes.FakeData.rw_hosts)
rw_hosts.append(access['access_to'])
ssh_hook = utils.SSHSideEffect()
ssh_hook.append(self.nfs_share.output_get_succeed(
rw_hosts=rw_hosts,
ro_hosts=fakes.FakeData.ro_hosts))
ssh_hook.append(self.nfs_share.output_set_access_success())
ssh_hook.append(self.nfs_share.output_get_succeed(
rw_hosts=fakes.FakeData.rw_hosts,
ro_hosts=fakes.FakeData.ro_hosts))
ssh_cmd_mock = utils.EMCNFSShareMock(side_effect=ssh_hook)
self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock
self.connection.deny_access(None, share, access, share_server)
ssh_calls = []
ssh_cmd_mock.assert_has_calls(ssh_calls)
def test_update_share_stats(self):
hook = utils.RequestSideEffect()
hook.append(self.mover.resp_get_ref_succeed())
hook.append(self.pool.resp_get_succeed())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
self.connection.update_share_stats(fakes.STATS)
expected_calls = [
mock.call(self.mover.req_get_ref()),
mock.call(self.pool.req_get()),
]
xml_req_mock.assert_has_calls(expected_calls)
for pool in fakes.STATS['pools']:
if pool['pool_name'] == fakes.FakeData.pool_name:
self.assertEqual(fakes.FakeData.pool_total_size,
pool['total_capacity_gb'])
free_size = (fakes.FakeData.pool_total_size -
fakes.FakeData.pool_used_size)
self.assertEqual(free_size, pool['free_capacity_gb'])
def test_update_share_stats_without_matched_config_pools(self):
self.connection.pools = set('fake_pool')
hook = utils.RequestSideEffect()
hook.append(self.mover.resp_get_ref_succeed())
hook.append(self.pool.resp_get_succeed())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
self.assertRaises(exception.EMCVmaxXMLAPIError,
self.connection.update_share_stats,
fakes.STATS)
expected_calls = [
mock.call(self.mover.req_get_ref()),
mock.call(self.pool.req_get()),
]
xml_req_mock.assert_has_calls(expected_calls)
def test_get_pool(self):
share = fakes.CIFS_SHARE
hook = utils.RequestSideEffect()
hook.append(self.fs.resp_get_succeed())
hook.append(self.pool.resp_get_succeed())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
pool_name = self.connection.get_pool(share)
expected_calls = [
mock.call(self.fs.req_get()),
mock.call(self.pool.req_get()),
]
xml_req_mock.assert_has_calls(expected_calls)
self.assertEqual(fakes.FakeData.pool_name, pool_name)
def test_get_pool_failed_to_get_filesystem_info(self):
share = fakes.CIFS_SHARE
hook = utils.RequestSideEffect()
hook.append(self.fs.resp_get_error())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
self.assertRaises(exception.EMCVmaxXMLAPIError,
self.connection.get_pool,
share)
expected_calls = [mock.call(self.fs.req_get())]
xml_req_mock.assert_has_calls(expected_calls)
def test_get_pool_failed_to_get_pool_info(self):
share = fakes.CIFS_SHARE
hook = utils.RequestSideEffect()
hook.append(self.fs.resp_get_succeed())
hook.append(self.pool.resp_get_error())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
self.assertRaises(exception.EMCVmaxXMLAPIError,
self.connection.get_pool,
share)
expected_calls = [
mock.call(self.fs.req_get()),
mock.call(self.pool.req_get()),
]
xml_req_mock.assert_has_calls(expected_calls)
def test_get_pool_failed_to_find_matched_pool_name(self):
share = fakes.CIFS_SHARE
hook = utils.RequestSideEffect()
hook.append(self.fs.resp_get_succeed())
hook.append(self.pool.resp_get_succeed(name='unmatch_pool_name',
id='unmatch_pool_id'))
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
self.assertRaises(exception.EMCVmaxXMLAPIError,
self.connection.get_pool,
share)
expected_calls = [
mock.call(self.fs.req_get()),
mock.call(self.pool.req_get()),
]
xml_req_mock.assert_has_calls(expected_calls)
@ddt.data({'port_conf': None,
'managed_ports': ['cge-1-0', 'cge-1-3']},
{'port_conf': '*',
'managed_ports': ['cge-1-0', 'cge-1-3']},
{'port_conf': ['cge-1-*'],
'managed_ports': ['cge-1-0', 'cge-1-3']},
{'port_conf': ['cge-1-3'],
'managed_ports': ['cge-1-3']})
@ddt.unpack
def test_get_managed_ports_one_port(self, port_conf, managed_ports):
hook = utils.SSHSideEffect()
hook.append(self.mover.output_get_physical_devices())
ssh_cmd_mock = mock.Mock(side_effect=hook)
expected_calls = [
mock.call(self.mover.cmd_get_physical_devices(), False),
]
self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock
self.connection.port_conf = port_conf
ports = self.connection.get_managed_ports()
self.assertIsInstance(ports, list)
self.assertEqual(sorted(managed_ports), sorted(ports))
ssh_cmd_mock.assert_has_calls(expected_calls)
def test_get_managed_ports_no_valid_port(self):
hook = utils.SSHSideEffect()
hook.append(self.mover.output_get_physical_devices())
ssh_cmd_mock = mock.Mock(side_effect=hook)
self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock
self.connection.port_conf = ['cge-2-0']
self.assertRaises(exception.BadConfigurationException,
self.connection.get_managed_ports)
def test_get_managed_ports_query_devices_failed(self):
hook = utils.SSHSideEffect()
hook.append(self.mover.fake_output)
ssh_cmd_mock = mock.Mock(side_effect=hook)
self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock
self.connection.port_conf = ['cge-2-0']
self.assertRaises(exception.EMCVmaxXMLAPIError,
self.connection.get_managed_ports)
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from oslo_serialization import jsonutils as json
from six.moves import http_client
from six.moves.urllib import parse as urllib
from tempest.lib.common import api_version_utils
from tempest.lib.common import rest_client
# NOTE(vsaienko): concurrent tests work because they are launched in
# separate processes so global variables are not shared among them.
BAREMETAL_MICROVERSION = None
def set_baremetal_api_microversion(baremetal_microversion):
global BAREMETAL_MICROVERSION
BAREMETAL_MICROVERSION = baremetal_microversion
def reset_baremetal_api_microversion():
global BAREMETAL_MICROVERSION
BAREMETAL_MICROVERSION = None
def handle_errors(f):
"""A decorator that allows to ignore certain types of errors."""
@functools.wraps(f)
def wrapper(*args, **kwargs):
param_name = 'ignore_errors'
ignored_errors = kwargs.get(param_name, tuple())
if param_name in kwargs:
del kwargs[param_name]
try:
return f(*args, **kwargs)
except ignored_errors:
# Silently ignore errors
pass
return wrapper
class BaremetalClient(rest_client.RestClient):
"""Base Tempest REST client for Ironic API."""
api_microversion_header_name = 'X-OpenStack-Ironic-API-Version'
uri_prefix = ''
def get_headers(self):
headers = super(BaremetalClient, self).get_headers()
if BAREMETAL_MICROVERSION:
headers[self.api_microversion_header_name] = BAREMETAL_MICROVERSION
return headers
def request(self, *args, **kwargs):
resp, resp_body = super(BaremetalClient, self).request(*args, **kwargs)
if (BAREMETAL_MICROVERSION and
BAREMETAL_MICROVERSION != api_version_utils.LATEST_MICROVERSION):
api_version_utils.assert_version_header_matches_request(
self.api_microversion_header_name,
BAREMETAL_MICROVERSION,
resp)
return resp, resp_body
def serialize(self, object_dict):
"""Serialize an Ironic object."""
return json.dumps(object_dict)
def deserialize(self, object_str):
"""Deserialize an Ironic object."""
return json.loads(object_str)
def _get_uri(self, resource_name, uuid=None, permanent=False):
"""Get URI for a specific resource or object.
:param resource_name: The name of the REST resource, e.g., 'nodes'.
:param uuid: The unique identifier of an object in UUID format.
:returns: Relative URI for the resource or object.
"""
prefix = self.uri_prefix if not permanent else ''
return '{pref}/{res}{uuid}'.format(pref=prefix,
res=resource_name,
uuid='/%s' % uuid if uuid else '')
def _make_patch(self, allowed_attributes, **kwargs):
"""Create a JSON patch according to RFC 6902.
:param allowed_attributes: An iterable object that contains a set of
allowed attributes for an object.
:param **kwargs: Attributes and new values for them.
:returns: A JSON path that sets values of the specified attributes to
the new ones.
"""
def get_change(kwargs, path='/'):
for name, value in kwargs.items():
if isinstance(value, dict):
for ch in get_change(value, path + '%s/' % name):
yield ch
else:
if value is None:
yield {'path': path + name,
'op': 'remove'}
else:
yield {'path': path + name,
'value': value,
'op': 'replace'}
patch = [ch for ch in get_change(kwargs)
if ch['path'].lstrip('/') in allowed_attributes]
return patch
def _list_request(self, resource, permanent=False, headers=None,
extra_headers=False, **kwargs):
"""Get the list of objects of the specified type.
:param resource: The name of the REST resource, e.g., 'nodes'.
:param headers: List of headers to use in request.
:param extra_headers: Specify whether to use headers.
:param **kwargs: Parameters for the request.
:returns: A tuple with the server response and deserialized JSON list
of objects
"""
uri = self._get_uri(resource, permanent=permanent)
if kwargs:
uri += "?%s" % urllib.urlencode(kwargs)
resp, body = self.get(uri, headers=headers,
extra_headers=extra_headers)
self.expected_success(http_client.OK, resp.status)
return resp, self.deserialize(body)
def _show_request(self, resource, uuid, permanent=False, **kwargs):
"""Gets a specific object of the specified type.
:param uuid: Unique identifier of the object in UUID format.
:returns: Serialized object as a dictionary.
"""
if 'uri' in kwargs:
uri = kwargs['uri']
else:
uri = self._get_uri(resource, uuid=uuid, permanent=permanent)
resp, body = self.get(uri)
self.expected_success(http_client.OK, resp.status)
return resp, self.deserialize(body)
def _create_request(self, resource, object_dict):
"""Create an object of the specified type.
:param resource: The name of the REST resource, e.g., 'nodes'.
:param object_dict: A Python dict that represents an object of the
specified type.
:returns: A tuple with the server response and the deserialized created
object.
"""
body = self.serialize(object_dict)
uri = self._get_uri(resource)
resp, body = self.post(uri, body=body)
self.expected_success(http_client.CREATED, resp.status)
return resp, self.deserialize(body)
def _create_request_no_response_body(self, resource, object_dict):
"""Create an object of the specified type.
Do not expect any body in the response.
:param resource: The name of the REST resource, e.g., 'nodes'.
:param object_dict: A Python dict that represents an object of the
specified type.
:returns: The server response.
"""
body = self.serialize(object_dict)
uri = self._get_uri(resource)
resp, body = self.post(uri, body=body)
self.expected_success(http_client.NO_CONTENT, resp.status)
return resp
def _delete_request(self, resource, uuid):
"""Delete specified object.
:param resource: The name of the REST resource, e.g., 'nodes'.
:param uuid: The unique identifier of an object in UUID format.
:returns: A tuple with the server response and the response body.
"""
uri = self._get_uri(resource, uuid)
resp, body = self.delete(uri)
self.expected_success(http_client.NO_CONTENT, resp.status)
return resp, body
def _patch_request(self, resource, uuid, patch_object):
"""Update specified object with JSON-patch.
:param resource: The name of the REST resource, e.g., 'nodes'.
:param uuid: The unique identifier of an object in UUID format.
:returns: A tuple with the server response and the serialized patched
object.
"""
uri = self._get_uri(resource, uuid)
patch_body = json.dumps(patch_object)
resp, body = self.patch(uri, body=patch_body)
self.expected_success(http_client.OK, resp.status)
return resp, self.deserialize(body)
@handle_errors
def get_api_description(self):
"""Retrieves all versions of the Ironic API."""
return self._list_request('', permanent=True)
@handle_errors
def get_version_description(self, version='v1'):
"""Retrieves the description of the API.
:param version: The version of the API. Default: 'v1'.
:returns: Serialized description of API resources.
"""
return self._list_request(version, permanent=True)
def _put_request(self, resource, put_object):
"""Update specified object with JSON-patch."""
uri = self._get_uri(resource)
put_body = json.dumps(put_object)
resp, body = self.put(uri, body=put_body)
self.expected_success([http_client.ACCEPTED, http_client.NO_CONTENT],
resp.status)
return resp, body
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.