gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for `cinder.wsgi`."""
import mock
import os.path
import tempfile
import urllib2
from oslo.config import cfg
import testtools
import webob
import webob.dec
from cinder import exception
from cinder.openstack.common import gettextutils
from cinder import test
import cinder.wsgi
CONF = cfg.CONF
TEST_VAR_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
'var'))
class TestLoaderNothingExists(test.TestCase):
"""Loader tests where os.path.exists always returns False."""
def setUp(self):
super(TestLoaderNothingExists, self).setUp()
self.stubs.Set(os.path, 'exists', lambda _: False)
def test_config_not_found(self):
self.assertRaises(
cinder.exception.ConfigNotFound,
cinder.wsgi.Loader,
)
class TestLoaderNormalFilesystem(test.TestCase):
"""Loader tests with normal filesystem (unmodified os.path module)."""
_paste_config = """
[app:test_app]
use = egg:Paste#static
document_root = /tmp
"""
def setUp(self):
super(TestLoaderNormalFilesystem, self).setUp()
self.config = tempfile.NamedTemporaryFile(mode="w+t")
self.config.write(self._paste_config.lstrip())
self.config.seek(0)
self.config.flush()
self.loader = cinder.wsgi.Loader(self.config.name)
self.addCleanup(self.config.close)
def test_config_found(self):
self.assertEqual(self.config.name, self.loader.config_path)
def test_app_not_found(self):
self.assertRaises(
cinder.exception.PasteAppNotFound,
self.loader.load_app,
"non-existent app",
)
def test_app_found(self):
url_parser = self.loader.load_app("test_app")
self.assertEqual("/tmp", url_parser.directory)
class TestWSGIServer(test.TestCase):
"""WSGI server tests."""
def _ipv6_configured():
try:
with file('/proc/net/if_inet6') as f:
return len(f.read()) > 0
except IOError:
return False
def test_no_app(self):
server = cinder.wsgi.Server("test_app", None,
host="127.0.0.1", port=0)
self.assertEqual("test_app", server.name)
def test_start_random_port(self):
server = cinder.wsgi.Server("test_random_port", None, host="127.0.0.1")
server.start()
self.assertNotEqual(0, server.port)
server.stop()
server.wait()
@testtools.skipIf(not _ipv6_configured(),
"Test requires an IPV6 configured interface")
def test_start_random_port_with_ipv6(self):
server = cinder.wsgi.Server("test_random_port",
None,
host="::1")
server.start()
self.assertEqual("::1", server.host)
self.assertNotEqual(0, server.port)
server.stop()
server.wait()
def test_app(self):
greetings = 'Hello, World!!!'
def hello_world(env, start_response):
if env['PATH_INFO'] != '/':
start_response('404 Not Found',
[('Content-Type', 'text/plain')])
return ['Not Found\r\n']
start_response('200 OK', [('Content-Type', 'text/plain')])
return [greetings]
server = cinder.wsgi.Server("test_app", hello_world,
host="127.0.0.1", port=0)
server.start()
response = urllib2.urlopen('http://127.0.0.1:%d/' % server.port)
self.assertEqual(greetings, response.read())
server.stop()
def test_app_using_ssl(self):
CONF.set_default("ssl_cert_file",
os.path.join(TEST_VAR_DIR, 'certificate.crt'))
CONF.set_default("ssl_key_file",
os.path.join(TEST_VAR_DIR, 'privatekey.key'))
greetings = 'Hello, World!!!'
@webob.dec.wsgify
def hello_world(req):
return greetings
server = cinder.wsgi.Server("test_app", hello_world,
host="127.0.0.1", port=0)
server.start()
response = urllib2.urlopen('https://127.0.0.1:%d/' % server.port)
self.assertEqual(greetings, response.read())
server.stop()
@testtools.skipIf(not _ipv6_configured(),
"Test requires an IPV6 configured interface")
def test_app_using_ipv6_and_ssl(self):
CONF.set_default("ssl_cert_file",
os.path.join(TEST_VAR_DIR, 'certificate.crt'))
CONF.set_default("ssl_key_file",
os.path.join(TEST_VAR_DIR, 'privatekey.key'))
greetings = 'Hello, World!!!'
@webob.dec.wsgify
def hello_world(req):
return greetings
server = cinder.wsgi.Server("test_app",
hello_world,
host="::1",
port=0)
server.start()
response = urllib2.urlopen('https://[::1]:%d/' % server.port)
self.assertEqual(greetings, response.read())
server.stop()
def test_reset_pool_size_to_default(self):
server = cinder.wsgi.Server("test_resize", None, host="127.0.0.1")
server.start()
# Stopping the server, which in turn sets pool size to 0
server.stop()
self.assertEqual(server._pool.size, 0)
# Resetting pool size to default
server.reset()
server.start()
self.assertEqual(server._pool.size, 1000)
class ExceptionTest(test.TestCase):
def _wsgi_app(self, inner_app):
# NOTE(luisg): In order to test localization, we need to
# make sure the lazy _() is installed in the 'fault' module
# also we don't want to install the _() system-wide and
# potentially break other test cases, so we do it here for this
# test suite only.
gettextutils.install('')
gettextutils.enable_lazy()
from cinder.api.middleware import fault
return fault.FaultWrapper(inner_app)
def _do_test_exception_safety_reflected_in_faults(self, expose):
class ExceptionWithSafety(exception.CinderException):
safe = expose
@webob.dec.wsgify
def fail(req):
raise ExceptionWithSafety('some explanation')
api = self._wsgi_app(fail)
resp = webob.Request.blank('/').get_response(api)
self.assertIn('{"computeFault', resp.body)
expected = ('ExceptionWithSafety: some explanation' if expose else
'The server has either erred or is incapable '
'of performing the requested operation.')
self.assertIn(expected, resp.body)
self.assertEqual(resp.status_int, 500, resp.body)
def test_safe_exceptions_are_described_in_faults(self):
self._do_test_exception_safety_reflected_in_faults(True)
def test_unsafe_exceptions_are_not_described_in_faults(self):
self._do_test_exception_safety_reflected_in_faults(False)
def _do_test_exception_mapping(self, exception_type, msg):
@webob.dec.wsgify
def fail(req):
raise exception_type(msg)
api = self._wsgi_app(fail)
resp = webob.Request.blank('/').get_response(api)
self.assertIn(msg, resp.body)
self.assertEqual(resp.status_int, exception_type.code, resp.body)
if hasattr(exception_type, 'headers'):
for (key, value) in exception_type.headers.iteritems():
self.assertIn(key, resp.headers)
self.assertEqual(resp.headers[key], value)
def test_quota_error_mapping(self):
self._do_test_exception_mapping(exception.QuotaError, 'too many used')
def test_non_cinder_notfound_exception_mapping(self):
class ExceptionWithCode(Exception):
code = 404
self._do_test_exception_mapping(ExceptionWithCode,
'NotFound')
def test_non_cinder_exception_mapping(self):
class ExceptionWithCode(Exception):
code = 417
self._do_test_exception_mapping(ExceptionWithCode,
'Expectation failed')
def test_exception_with_none_code_throws_500(self):
class ExceptionWithNoneCode(Exception):
code = None
@webob.dec.wsgify
def fail(req):
raise ExceptionWithNoneCode()
api = self._wsgi_app(fail)
resp = webob.Request.blank('/').get_response(api)
self.assertEqual(500, resp.status_int)
@mock.patch('cinder.openstack.common.gettextutils.translate')
def test_cinder_exception_with_localized_explanation(self, mock_t9n):
msg = 'My Not Found'
msg_translation = 'Mi No Encontrado'
message = gettextutils.Message(msg, '')
@webob.dec.wsgify
def fail(req):
class MyVolumeNotFound(exception.NotFound):
def __init__(self):
self.msg = message
self.safe = True
raise MyVolumeNotFound()
# Test response without localization
def mock_get_non_localized_message(msgid, locale):
return msg
mock_t9n.side_effect = mock_get_non_localized_message
api = self._wsgi_app(fail)
resp = webob.Request.blank('/').get_response(api)
self.assertEqual(404, resp.status_int)
self.assertIn(msg, resp.body)
# Test response with localization
def mock_translate(msgid, locale):
if isinstance(msgid, gettextutils.Message):
return msg_translation
return msgid
mock_t9n.side_effect = mock_translate
api = self._wsgi_app(fail)
resp = webob.Request.blank('/').get_response(api)
self.assertEqual(404, resp.status_int)
self.assertIn(msg_translation, resp.body)
|
|
import base64
import logging
import pickle
from datetime import datetime
from django.core.mail import EmailMessage
from django.db import models
PRIORITIES = (
("1", "high"),
("2", "medium"),
("3", "low"),
("4", "deferred"),
)
class MessageManager(models.Manager):
def non_deferred(self):
"""
the messages in the queue not deferred
"""
return self.filter(priority__lt="4")
def deferred(self):
"""
the deferred messages in the queue
"""
return self.filter(priority="4")
def retry_deferred(self, new_priority=2):
count = 0
for message in self.deferred():
if message.retry(new_priority):
count += 1
return count
def email_to_db(email):
# pickle.dumps returns essentially binary data which we need to encode
# to store in a unicode field.
return base64.encodestring(pickle.dumps(email))
def db_to_email(data):
if data == u"":
return None
else:
try:
return pickle.loads(base64.decodestring(data))
except Exception:
try:
# previous method was to just do pickle.dumps(val)
return pickle.loads(data.encode("ascii"))
except Exception:
return None
class Message(models.Model):
# The actual data - a pickled EmailMessage
message_data = models.TextField()
when_added = models.DateTimeField(default=datetime.now)
priority = models.CharField(max_length=1, choices=PRIORITIES, default="2")
# @@@ campaign?
# @@@ content_type?
objects = MessageManager()
## Suggested index, very useful to the send queue:
## CREATE INDEX mailer_message_send_order ON mailer_message (priority, when_added) WHERE priority < '4';
def defer(self):
self.priority = "4"
self.save(force_update=True)
def retry(self, new_priority=2):
if self.priority == "4":
self.priority = new_priority
self.save(force_update=True)
return True
else:
return False
def _get_email(self):
return db_to_email(self.message_data)
def _set_email(self, val):
self.message_data = email_to_db(val)
email = property(_get_email, _set_email, doc=
"""EmailMessage object. If this is mutated, you will need to
set the attribute again to cause the underlying serialised data to be updated.""")
@property
def to_addresses(self):
email = self.email
if email is not None:
return email.to
else:
return []
@property
def subject(self):
email = self.email
if email is not None:
return email.subject
else:
return ""
def filter_recipient_list(lst):
if lst is None:
return None
retval = []
for e in lst:
if DontSendEntry.objects.has_address(e):
logging.info("skipping email to %s as on don't send list " % e.encode("utf-8"))
else:
retval.append(e)
return retval
def make_message(subject="", body="", from_email=None, to=None, bcc=None,
attachments=None, headers=None, priority=None):
"""
Creates a simple message for the email parameters supplied.
The 'to' and 'bcc' lists are filtered using DontSendEntry.
If needed, the 'email' attribute can be set to any instance of EmailMessage
if e-mails with attachments etc. need to be supported.
Call 'save()' on the result when it is ready to be sent, and not before.
"""
to = filter_recipient_list(to)
bcc = filter_recipient_list(bcc)
core_msg = EmailMessage(subject=subject, body=body, from_email=from_email,
to=to, bcc=bcc, attachments=attachments, headers=headers)
db_msg = Message(priority=priority)
db_msg.email = core_msg
return db_msg
class DontSendEntryManager(models.Manager):
def has_address(self, address):
"""
is the given address on the don't send list?
"""
if self.filter(to_address__iexact=address).exists():
return True
else:
return False
class DontSendEntry(models.Model):
to_address = models.EmailField()
when_added = models.DateTimeField()
# @@@ who added?
# @@@ comment field?
objects = DontSendEntryManager()
class Meta:
verbose_name = "don't send entry"
verbose_name_plural = "don't send entries"
RESULT_CODES = (
("1", "success"),
("2", "don't send"),
("3", "failure"),
# @@@ other types of failure?
)
class MessageLogManager(models.Manager):
def log(self, message, result_code, log_message=""):
"""
create a log entry for an attempt to send the given message and
record the given result and (optionally) a log message
"""
return self.create(
message_data = message.message_data,
when_added = message.when_added,
priority = message.priority,
# @@@ other fields from Message
result = result_code,
log_message = log_message,
)
class MessageLog(models.Model):
# fields from Message
message_data = models.TextField()
when_added = models.DateTimeField()
priority = models.CharField(max_length=1, choices=PRIORITIES)
# @@@ campaign?
# additional logging fields
when_attempted = models.DateTimeField(default=datetime.now, db_index=True)
result = models.CharField(max_length=1, choices=RESULT_CODES)
log_message = models.TextField()
objects = MessageLogManager()
@property
def email(self):
return db_to_email(self.message_data)
@property
def to_addresses(self):
email = self.email
if email is not None:
return email.to
else:
return []
@property
def subject(self):
email = self.email
if email is not None:
return email.subject
else:
return ""
|
|
###
# Copyright (c) 2009, Juju, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the author of this software nor the names of
# the contributors to the software may be used to endorse or
# promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###
import os
import re
import textwrap
from OrderedDict import OrderedDict
from optparse import OptionParser, OptionValueError
class InvalidSyntax(Exception):
def __init__(self, lineno, msg):
self.lineno = lineno
self.msg = msg
def __str__(self):
return '%s (on line %s)' % (self.msg, self.lineno)
class MissingName(InvalidSyntax):
def __init__(self, lineno):
InvalidSyntax.__init__(self, lineno, 'Could not find variable name')
class UnregisteredName(InvalidSyntax):
def __init__(self, lineno, name):
InvalidSyntax.__init__(self, lineno, 'Unregistered name: %r' % name)
class GroupExpectsNoValue(Exception):
def __init__(self, name):
self.name = name
def __str__(self):
return 'Group expects no value: %r' % self.name
def wrap(comment):
return textwrap.wrap(' '.join(comment.split()))
def writeComment(fp, comment):
for line in wrap(comment):
fp.write('# %s\n' % line)
def OptparseCallback(option, optString, valueString, parser, Value):
try:
Value.setFromString(valueString)
except ValueError, e:
raise OptionValueError('%s option expected %s, received %r (%s)' %
(optString, Value.type(), valueString, e))
class IgnoreValue(object):
"""Used non-strict Groups to ignore the value in readfp."""
def expectsValue(self):
return True
def setFromString(self, s):
return
class Group(object):
"""All configuration variables are groups, that is, all configuration variables can have other groups and variables registered under them. Experience (from the very similar configuration in Supybot) has shown that making non-group variables is simply not worth the trouble and inconsistency."""
def __init__(self, name, comment=None, Child=None, strict=True):
"""
@param name: The name for this group. An argument could be made for making the group itself name-agnostic and only giving it a name upon registration with another group, but that would cripple unregistered groups.
@param comment: A helpful comment indicating the usage/meaning of a particular group. This comment will be written to configuration files and used as the help text of the optparse OptionParser the group can generate.
@param Child: A callable (usually a class) which, if not None, will be used in the get() method to create a requested child rather than raising KeyError.
"""
# All of these are prefixed with underscores so they won't conflict with
# registered children.
if name.startswith('_'):
raise ValueError('Names beginning with an underscore are forbidden: %r'%name)
self._name = name
self._parent = None
self._Child = Child
self._strict = strict
self._comment = comment
self._children = OrderedDict()
def get(self, name):
"""Returns the child variable with the given name. If no such variable exists and the Child argument was given to __init__, a new variable will be created and returned.
@param name: The name of the child to retrieve.
"""
try:
return self._children[name]
except KeyError:
if self._Child is not None:
child = self._Child(name)
self.register(child)
return child
else:
raise
def __getattr__(self, name):
if name.startswith('_'):
return object.__getattr__(self, name)
try:
return self.get(name)
except KeyError:
raise AttributeError(name)
def __call__(self):
# Having this here offers a better exception/error message than __getattr__'s
# AttributeError.
raise GroupExpectsNoValue(self._fullname())
def register(self, child):
"""Registers the given child with this group. Any previously-registered children
with the same name are replaced.
@param child: The child to register.
"""
self._children[child._name] = child
child._parent = self
return child
def _fullname(self, parentName=None, childName=None):
if childName is None:
childName = self._name
if parentName is None and self._parent is not None:
parentName = self._parent._fullname()
if not parentName:
return childName
else:
return '%s.%s' % (parentName, childName)
def writefp(self, fp, annotate=True, parentName=None):
"""Writes this configuration group and its children in their current state to the given file(-like) object.
@param fp: The file(-like) object to write.
@param annotate: Flag determining whether to write comments to the given file object. Default values are still written, but commented out.
@param parentName: The name of the parent to prefix to this group's own name and the name of its children.
"""
if self._comment and annotate:
writeComment(fp, self._comment)
fp.write('\n')
myname = self._fullname(parentName)
for child in self.children():
child.writefp(fp, annotate=annotate, parentName=myname)
_sepRe = re.compile(r'\s*[:=]\s*')
def readfp(self, fp):
"""Reads the given file object, setting the state of this configuration group and its children appropriately. Comment lines and blank lines are ignored; comment lines are those which begin (apart from leading whitespace) with a '#' character. Comments cannot be initiated part way through a line: e.g., a line 'foo: bar # baz' gives the 'foo' configuration variable the literal value 'bar # baz'. Non-comment lines consist of a configuration variable name followed by optional whitespace, a separator of ':' or '=', more optional whitespace, and finally the value of that variable in string form."""
lineno = 0
for line in fp:
lineno += 1
line = line.strip()
if not line or line.startswith('#'):
continue
try:
(name, value) = self._sepRe.split(line, 1)
except ValueError:
raise MissingName(lineno)
value = value.strip()
parts = name.split('.')
if parts.pop(0) != self._name:
if not self._strict:
continue # Just ignore other names.
raise UnregisteredName(lineno, name)
group = self
for part in parts:
try:
group = group.get(part)
except KeyError:
if not self._strict:
group = IgnoreValue()
raise UnregisteredName(lineno, name)
if not group.expectsValue():
raise InvalidSyntax(lineno, '%s expects no value' % name)
group.setFromString(value)
def read(self, filename):
"""Calls readfp with a file object opened with the given name."""
fp = open(filename)
try:
self.readfp(fp)
finally:
fp.close()
def readenv(self, environ=None):
"""Reads the given environment dictionary, setting the state of this configuration group and its children appropriately. Unrecognized env variable names are ignored. Environment variables are expected to be capitalized, parts separated by underscores. For instance, if you would access the configuration variable via 'foo.bar.baz' in Python, the environment variable expected would be FOO_BAR_BAZ.
@param environ: The environment dictionary. Defaults to os.environ.
@type environ: dict
"""
if environ is None:
environ = os.environ
for (name, variable) in self:
if not variable.expectsValue():
continue
envName = name.replace('.', '_').upper()
try:
variable.setFromString(environ[envName])
except KeyError:
continue
except ValueError, e:
raise ValueError('Invalid environment variable %s: %s' % (envName, e))
def __iter__(self):
"""Generates a series of (fullname, configuration variable) pairs for this Group
and its children."""
yield (self._name, self)
for child in self.children():
for (childname, grandchild) in child:
yield (self._fullname(self._name, childname), grandchild)
def toOptionParser(self, parser=None, **kwargs):
"""Modifies or produces an optparse.OptionParser which will set the appropriate variables in this configuration tree when certain options are given. Options are converted to lowercase and separated by dashes, in accordance with the common practice for long options in *nix. For instance, if you would access the configuration variable via 'foo.bar.baz' in Python, the command line option associated with that variable would be --foo-bar-baz."""
if parser is None:
parser = OptionParser(**kwargs)
for (name, variable) in self:
if not variable.expectsValue():
continue
optionName = name.replace('.', '-').lower()
parser.add_option('', '--' + optionName, action="callback",
type="string", callback=OptparseCallback,
metavar=variable.type().upper(), help=variable._comment,
callback_args=(variable,))
return parser
def children(self):
return self._children.values()
def expectsValue(self):
return False
parent = object()
class Value(Group):
def __init__(self, name, default=None, **kwargs):
Group.__init__(self, name, **kwargs)
self._value = None
self._default = default
@property
def default(self):
if callable(self._default):
return self._default()
elif self._default is parent:
return self._parent()
else:
return self._default
def __call__(self):
if self._value is None:
return self.default
else:
return self._value
@classmethod
def type(cls):
if cls is Value:
return 'string'
else:
return cls.__name__.lower()
def set(self, v):
self._value = v
def setFromString(self, s):
self.set(self.fromString(s))
def fromString(self, s):
return s
def toString(self, v):
return str(v)
def __str__(self):
return self.toString(self())
def writefp(self, fp, annotate=True, parentName=None):
myname = self._fullname(parentName)
if self._comment is not None and annotate:
writeComment(fp, self._comment)
if self._value is None:
fp.write('# ') # Document the default value, but comment it out.
if self() is None:
stringValue = '(no default)'
else:
stringValue = str(self)
fp.write('%s: %s\n' % (myname, stringValue))
if annotate:
fp.write('\n') # Extra newline makes comments more easily distinguishable.
for child in self.children():
child.writefp(fp, annotate=annotate, parentName=myname)
def expectsValue(self):
return True
def isSet(self):
return self._value is not None or self.default is not None
def isDefault(self):
return self._value is None
def reset(self):
self._value = None
class Bool(Value):
def fromString(self, s):
if s.lower() in ['true', 'on', '1', 'yes']:
return True
elif s.lower() in ['false', 'off', '0', 'no']:
return False
else:
raise ValueError('%r cannot be converted to bool' % s)
class Int(Value):
def fromString(self, s):
if s.startswith('0x'):
return int(s[2:], 16)
elif s.startswith('0'):
return int(s, 8)
else:
return int(s)
class Float(Value):
fromString = float
|
|
# -*- coding: utf-8 -*-
"""Provides strategy object."""
from __future__ import absolute_import
from functools import partial
import re
from .. import t1types
from ..entity import Entity
from ..utils import suppress
PIXEL_PATTERN = re.compile(r'\[(\d+)\]')
OPERATOR_PATTERN = re.compile(r'(AND|OR)')
class Strategy(Entity):
"""Strategy entity."""
collection = 'strategies'
resource = 'strategy'
_relations = {
'campaign', 'currency', 'time_zone',
}
_seg_incexc_ops = t1types.enum({'AND', 'OR'}, 'OR')
_pacing_ints = t1types.enum({'hour', 'day', 'week', 'month', 'campaign',
'not-applicable'}, 'not-applicable')
_pacing_types = t1types.enum({'even', 'asap', 'no-limit'}, 'no-limit')
_goal_type = t1types.enum({'spend', 'reach', 'cpc', 'cpe', 'cpa', 'roi', 'ctr', 'vcpm', 'vcr', 'viewability_rate'},
'cpc')
_media_type = t1types.enum({'DISPLAY', 'VIDEO'}, 'DISPLAY')
_pac_int = t1types.enum({'hour', 'day'}, 'day')
_pac_type = t1types.enum({'even', 'asap'}, 'even')
_site_selec = t1types.enum({'MATHSELECT_250', 'EXCLUDE_UGC', 'ALL',
'REDUCED'}, 'REDUCED')
_supply_types = t1types.enum({'RTB', 'RMX_API', 'T1_RMX', 'MKT', 'BATCH'},
'RTB')
_type = t1types.enum({'REM', 'GBO', 'AUD'}, 'GBO')
_pull = {
'audience_segment_exclude_op': None,
'audience_segment_include_op': None,
'bid_aggressiveness': float,
'bid_price_is_media_only': t1types.int_to_bool,
'budget': float,
'campaign_id': int,
'created_on': t1types.strpt,
'currency_code': None,
'description': None,
'effective_goal_value': float,
'end_date': t1types.strpt,
'feature_compatibility': None,
'frequency_amount': int,
'frequency_interval': None,
'frequency_optimization': t1types.int_to_bool,
'frequency_type': None,
'goal_type': None,
'goal_value': float,
'id': int,
'impression_cap': int,
'impression_pacing_amount': int,
'impression_pacing_interval': None,
'impression_pacing_type': None,
'max_bid': float,
'max_bid_wm': float,
'min_bid': float,
'media_type': None,
'name': None,
'pacing_amount': float,
'pacing_interval': None,
'pacing_type': None,
'pixel_target_expr': None,
'roi_target': float,
'run_on_all_exchanges': t1types.int_to_bool,
'run_on_all_pmp': t1types.int_to_bool,
'run_on_display': t1types.int_to_bool,
'run_on_mobile': t1types.int_to_bool,
'run_on_streaming': t1types.int_to_bool,
'site_restriction_transparent_urls': t1types.int_to_bool,
'site_selectiveness': None,
'start_date': t1types.strpt,
'status': t1types.int_to_bool,
'supply_type': None,
'targeting_segment_exclude_op': None,
'targeting_segment_include_op': None,
'type': None,
'updated_on': t1types.strpt,
'use_campaign_end': t1types.int_to_bool,
'use_campaign_start': t1types.int_to_bool,
'use_mm_freq': t1types.int_to_bool,
'use_optimization': t1types.int_to_bool,
'version': int,
'zone_name': None,
}
_push = _pull.copy()
_push.update({
'audience_segment_exclude_op': _seg_incexc_ops,
'audience_segment_include_op': _seg_incexc_ops,
'bid_price_is_media_only': int,
'end_date': partial(t1types.strft, null_on_none=True),
'frequency_interval': _pacing_ints,
'frequency_optimization': int,
'frequency_type': _pacing_types,
'goal_type': _goal_type,
'impression_pacing_interval': _pacing_ints,
'impression_pacing_type': _pacing_types,
'media_type': _media_type,
'pacing_interval': _pac_int,
'pacing_type': _pac_type,
'run_on_all_exchanges': int,
'run_on_all_pmp': int,
'run_on_display': int,
'run_on_mobile': int,
'run_on_streaming': int,
'site_restriction_transparent_urls': int,
'site_selectiveness': _site_selec,
'start_date': partial(t1types.strft, null_on_none=True),
'status': int,
'supply_type': _supply_types,
'targeting_segment_exclude_op': _seg_incexc_ops,
'targeting_segment_include_op': _seg_incexc_ops,
'type': _type,
'use_campaign_end': int,
'use_campaign_start': int,
'use_mm_freq': int,
'use_optimization': int,
})
_readonly = Entity._readonly | {'effective_goal_value', 'zone_name'}
def __init__(self, session, properties=None, **kwargs):
super(Strategy, self).__init__(session, properties, **kwargs)
try:
self.pixel_target_expr
except AttributeError:
self.pixel_target_expr = ''
self._deserialize_target_expr()
def _deserialize_target_expr(self):
"""Deserialize pixel_target_expr string into dict"""
if 'AND NOT' in self.pixel_target_expr:
include_string, exclude_string = self.pixel_target_expr.split('AND NOT')
elif 'NOT' in self.pixel_target_expr:
include_string, exclude_string = self.pixel_target_expr.split('NOT')
elif self.pixel_target_expr:
include_string = self.pixel_target_expr
exclude_string = ''
else:
include_string = ''
exclude_string = ''
include_operator = OPERATOR_PATTERN.search(include_string)
exclude_operator = OPERATOR_PATTERN.search(exclude_string)
if include_operator:
include_operator = include_operator.group(0)
if exclude_operator:
exclude_operator = exclude_operator.group(0)
self.pixel_target_expr = {
'include': {
'pixels': [int(pix) for pix in PIXEL_PATTERN.findall(include_string)],
'operator': include_operator,
},
'exclude': {
'pixels': [int(pix) for pix in PIXEL_PATTERN.findall(exclude_string)],
'operator': exclude_operator,
},
}
def save_supplies(self, data):
url = self._construct_url(addl=['supplies', ])
self._save_related(data, url)
def save_deals(self, data):
url = self._construct_url(addl=['deals', ])
self._save_related(data, url)
def _save_related(self, data, url):
entity, _ = super(Strategy, self)._post(self._get_service_path(), url, data)
self._update_self(entity)
self._deserialize_target_expr()
if 'relations' in self._properties:
del self._properties['relations']
def save_domains(self, data):
url = self._construct_url(addl=['domain_restrictions', ])
# this endpoint doesn't return an entity like the supplies endpoint
# so we ignore the error
with suppress(AttributeError):
entity, _ = super(Strategy, self)._post(self._get_service_path(), url, data)
# you can't get these values so we don't need to reset anything
def save_audience_segments(self, data):
url = self._construct_url(addl=['audience_segments', ])
entity, _ = super(Strategy, self)._post(self._get_service_path(), url, data)
def save_targeting_segments(self, data):
url = self._construct_url(addl=['targeting_segments', ])
entity, _ = super(Strategy, self)._post(self._get_service_path(), url, data)
def remove_retired_audience_segments(self, ids):
"""Unassign the specified retired audience segments from the strategy."""
url = self._construct_url(addl=['retired_audience_segments', ])
data = {'retired_segments.{0}.id'.format(ind + 1): x for ind, x in enumerate(ids)}
self._post(self._get_service_path(), url, data)
def _serialize_target_expr(self):
"""Serialize pixel_target_expr dict into string"""
include_bool = '] {} ['.format(self.pixel_target_expr['include']['operator'] or 'OR')
include_pixels = self.pixel_target_expr['include']['pixels']
exclude_bool = '] {} ['.format(self.pixel_target_expr['exclude']['operator'] or 'OR')
exclude_pixels = self.pixel_target_expr['exclude']['pixels']
include_string = '( [{}] )'.format(include_bool.join(
str(pix) for pix in include_pixels)) if include_pixels else ''
exclude_string = 'NOT ( [{}] )'.format(exclude_bool.join(
str(pix) for pix in exclude_pixels)) if exclude_pixels else ''
if include_string and exclude_string:
return '{} AND {}'.format(include_string, exclude_string)
else:
return include_string + exclude_string
def save(self, data=None, url=None):
"""Save object to T1 accounting for fields an pixel target expr"""
if data is None:
data = self._properties.copy()
data['pixel_target_expr'] = self._serialize_target_expr()
if getattr(self, 'use_campaign_start', False) and 'start_date' in data:
self._properties.pop('start_date', None)
data['start_date'] = None
if getattr(self, 'use_campaign_end', False) and 'end_date' in data:
self._properties.pop('end_date', None)
data['end_date'] = None
super(Strategy, self).save(data=data, url=url)
# Re-set the fields so that if the same object get saved, we
# compare against the re-initialized values
self._deserialize_target_expr()
@property
def pixel_target_expr_string(self):
"""Return string version of pixel_target_expr"""
return self._serialize_target_expr()
|
|
#!/usr/bin/env vpython
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittests for xcodebuild_runner.py."""
import logging
import mock
import os
import unittest
import iossim_util
import result_sink_util
import test_apps
from test_result_util import ResultCollection, TestResult, TestStatus
import test_runner
import test_runner_test
import xcode_log_parser
import xcodebuild_runner
_ROOT_FOLDER_PATH = 'root/folder'
_XCODE_BUILD_VERSION = '10B61'
_DESTINATION = 'A4E66321-177A-450A-9BA1-488D85B7278E'
_OUT_DIR = 'out/dir'
_XTEST_RUN = '/tmp/temp_file.xctestrun'
_EGTESTS_APP_PATH = '%s/any_egtests.app' % _ROOT_FOLDER_PATH
_FLAKY_EGTEST_APP_PATH = 'path/to/ios_chrome_flaky_eg2test_module.app'
class XCodebuildRunnerTest(test_runner_test.TestCase):
"""Test case to test xcodebuild_runner."""
def setUp(self):
super(XCodebuildRunnerTest, self).setUp()
self.mock(os.path, 'exists', lambda _: True)
self.mock(xcode_log_parser,
'get_parser', lambda: xcode_log_parser.Xcode11LogParser())
self.mock(os, 'listdir', lambda _: ['any_egtests.xctest'])
self.mock(iossim_util, 'is_device_with_udid_simulator', lambda _: False)
self.mock(result_sink_util.ResultSinkClient,
'post', lambda *args, **kwargs: None)
self.mock(
test_apps.GTestsApp,
'get_all_tests', lambda _: ['Class1/passedTest1', 'Class1/passedTest2'])
self.mock(test_apps.EgtestsApp,
'fill_xctest_run', lambda _1, _2: 'xctestrun')
self.mock(iossim_util, 'get_simulator', lambda _1, _2: 'sim-UUID')
self.mock(test_apps, 'get_bundle_id', lambda _: "fake-bundle-id")
self.mock(test_apps, 'is_running_rosetta', lambda: False)
self.mock(test_apps.plistlib, 'writePlist', lambda _1, _2: '')
self.mock(test_runner.SimulatorTestRunner, 'tear_down', lambda _: None)
self.mock(test_runner.DeviceTestRunner, 'tear_down', lambda _: None)
self.mock(xcodebuild_runner.subprocess,
'Popen', lambda cmd, env, stdout, stderr: 'fake-out')
self.mock(test_runner, 'print_process_output', lambda _: [])
def tearDown(self):
super(XCodebuildRunnerTest, self).tearDown()
@mock.patch('xcode_log_parser.Xcode11LogParser.collect_test_results')
def testLaunchCommand_restartCrashed1stAttempt(self, mock_collect_results):
egtests = test_apps.EgtestsApp(_EGTESTS_APP_PATH)
crashed_collection = ResultCollection()
crashed_collection.crashed = True
mock_collect_results.side_effect = [
crashed_collection,
ResultCollection(test_results=[
TestResult('Class1/passedTest1', TestStatus.PASS),
TestResult('Class1/passedTest2', TestStatus.PASS)
])
]
launch_command = xcodebuild_runner.LaunchCommand(
egtests, _DESTINATION, shards=1, retries=3)
overall_result = launch_command.launch()
self.assertFalse(overall_result.crashed)
self.assertEqual(len(overall_result.all_test_names()), 2)
self.assertEqual(overall_result.expected_tests(),
set(['Class1/passedTest1', 'Class1/passedTest2']))
@mock.patch('xcode_log_parser.Xcode11LogParser.collect_test_results')
def testLaunchCommand_notRestartPassedTest(self, mock_collect_results):
egtests = test_apps.EgtestsApp(_EGTESTS_APP_PATH)
collection = ResultCollection(test_results=[
TestResult('Class1/passedTest1', TestStatus.PASS),
TestResult('Class1/passedTest2', TestStatus.PASS)
])
mock_collect_results.side_effect = [collection]
launch_command = xcodebuild_runner.LaunchCommand(
egtests, _DESTINATION, shards=1, retries=3)
launch_command.launch()
xcodebuild_runner.LaunchCommand(egtests, _DESTINATION, shards=1, retries=3)
self.assertEqual(1, len(mock_collect_results.mock_calls))
@mock.patch('xcode_log_parser.Xcode11LogParser.collect_test_results')
def test_launch_command_restart_failed_attempt(self, mock_collect_results):
egtests = test_apps.EgtestsApp(_EGTESTS_APP_PATH)
mock_collect_results.side_effect = [
ResultCollection(test_results=[
TestResult('Class1/passedTest1', TestStatus.FAIL),
TestResult('Class1/passedTest2', TestStatus.FAIL)
]),
ResultCollection(test_results=[
TestResult('Class1/passedTest1', TestStatus.PASS),
TestResult('Class1/passedTest2', TestStatus.PASS)
])
]
launch_command = xcodebuild_runner.LaunchCommand(
egtests, _DESTINATION, shards=1, retries=3)
overall_result = launch_command.launch()
self.assertEqual(len(overall_result.all_test_names()), 2)
self.assertEqual(overall_result.expected_tests(),
set(['Class1/passedTest1', 'Class1/passedTest2']))
@mock.patch('xcode_log_parser.Xcode11LogParser.collect_test_results')
def test_launch_command_not_restart_crashed_attempt(self,
mock_collect_results):
"""Crashed first attempt of runtime select test suite won't be retried."""
egtests = test_apps.EgtestsApp(_FLAKY_EGTEST_APP_PATH)
crashed_collection = ResultCollection()
crashed_collection.crashed = True
mock_collect_results.return_value = crashed_collection
launch_command = xcodebuild_runner.LaunchCommand(
egtests, _DESTINATION, shards=1, retries=3)
overall_result = launch_command.launch()
self.assertEqual(len(overall_result.all_test_names()), 0)
self.assertEqual(overall_result.expected_tests(), set([]))
self.assertTrue(overall_result.crashed)
class DeviceXcodeTestRunnerTest(test_runner_test.TestCase):
"""Test case to test xcodebuild_runner.DeviceXcodeTestRunner."""
def setUp(self):
super(DeviceXcodeTestRunnerTest, self).setUp()
self.mock(os.path, 'exists', lambda _: True)
self.mock(test_runner, 'get_current_xcode_info', lambda: {
'version': 'test version', 'build': 'test build', 'path': 'test/path'})
self.mock(os.path, 'abspath', lambda path: '/abs/path/to/%s' % path)
self.mock(result_sink_util.ResultSinkClient,
'post', lambda *args, **kwargs: None)
self.mock(test_runner.subprocess, 'check_output', lambda _: b'fake-output')
self.mock(test_runner.subprocess, 'check_call', lambda _: b'fake-out')
self.mock(test_runner.subprocess,
'Popen', lambda cmd, env, stdout, stderr: 'fake-out')
self.mock(test_runner.TestRunner, 'set_sigterm_handler',
lambda self, handler: 0)
self.mock(os, 'listdir', lambda _: [])
self.mock(xcodebuild_runner.subprocess,
'Popen', lambda cmd, env, stdout, stderr: 'fake-out')
self.mock(test_runner, 'print_process_output', lambda _: [])
self.mock(test_runner.TestRunner, 'start_proc', lambda self, cmd: 0)
self.mock(test_runner.DeviceTestRunner, 'get_installed_packages',
lambda self: [])
self.mock(test_runner.DeviceTestRunner, 'wipe_derived_data', lambda _: None)
self.mock(test_runner.TestRunner, 'retrieve_derived_data', lambda _: None)
self.mock(test_runner.TestRunner, 'process_xcresult_dir', lambda _: None)
self.mock(xcode_log_parser,
'get_parser', lambda: xcode_log_parser.Xcode11LogParser())
self.mock(test_apps.EgtestsApp,
'fill_xctest_run', lambda _1, _2: 'xctestrun')
self.mock(
test_apps.GTestsApp,
'get_all_tests', lambda _: ['Class1/passedTest1', 'Class1/passedTest2'])
self.mock(iossim_util, 'is_device_with_udid_simulator', lambda _: False)
@mock.patch('xcode_log_parser.Xcode11LogParser.collect_test_results')
def test_launch(self, mock_result):
"""Tests launch method in DeviceXcodeTestRunner"""
tr = xcodebuild_runner.DeviceXcodeTestRunner("fake-app-path",
"fake-host-app-path",
"fake-out-dir")
mock_result.return_value = ResultCollection(test_results=[
TestResult('Class1/passedTest1', TestStatus.PASS),
TestResult('Class1/passedTest2', TestStatus.PASS)
])
self.assertTrue(tr.launch())
self.assertEqual(len(tr.test_results['tests']), 2)
@mock.patch('xcode_log_parser.Xcode11LogParser.collect_test_results')
def test_unexpected_skipped_crash_reported(self, mock_result):
"""Tests launch method in DeviceXcodeTestRunner"""
tr = xcodebuild_runner.DeviceXcodeTestRunner("fake-app-path",
"fake-host-app-path",
"fake-out-dir")
crashed_collection = ResultCollection(
test_results=[TestResult('Class1/passedTest1', TestStatus.PASS)])
crashed_collection.crashed = True
mock_result.return_value = crashed_collection
self.assertFalse(tr.launch())
self.assertEqual(len(tr.test_results['tests']), 2)
tests = tr.test_results['tests']
self.assertEqual(tests['Class1/passedTest1']['actual'], 'PASS')
self.assertEqual(tests['Class1/passedTest2']['actual'], 'SKIP')
self.assertEqual(tests['Class1/passedTest2']['expected'], 'PASS')
@mock.patch('xcode_log_parser.Xcode11LogParser.collect_test_results')
def test_unexpected_skipped_not_reported(self, mock_result):
"""Unexpected skip not reported for these selecting tests at runtime."""
crashed_collection = ResultCollection(
test_results=[TestResult('Class1/passedTest1', TestStatus.PASS)])
crashed_collection.crashed = True
mock_result.return_value = crashed_collection
tr = xcodebuild_runner.DeviceXcodeTestRunner(_FLAKY_EGTEST_APP_PATH,
"fake-host-app-path",
"fake-out-dir")
self.assertFalse(tr.launch())
self.assertEqual(len(tr.test_results['tests']), 1)
tests = tr.test_results['tests']
self.assertEqual(tests['Class1/passedTest1']['actual'], 'PASS')
# Class1/passedTest2 doesn't appear in test results.
@mock.patch('xcodebuild_runner.isinstance', return_value=True)
@mock.patch('xcode_log_parser.Xcode11LogParser.collect_test_results')
@mock.patch('test_apps.EgtestsApp', autospec=True)
def test_disabled_reported(self, mock_test_app, mock_result, _):
"""Tests launch method in DeviceXcodeTestRunner"""
test_app = mock_test_app.return_value
test_app.test_app_path = _EGTESTS_APP_PATH
test_app.disabled_tests = ['Class2/disabled_test3']
test_app.get_all_tests.return_value = [
'Class1/passedTest1', 'Class1/passedTest2'
]
mock_result.return_value = ResultCollection(test_results=[
TestResult('Class1/passedTest1', TestStatus.PASS),
TestResult('Class1/passedTest2', TestStatus.PASS)
])
tr = xcodebuild_runner.DeviceXcodeTestRunner(
"fake-app-path", "fake-host-app-path", "fake-out-dir")
self.assertTrue(tr.launch())
self.assertEqual(len(tr.test_results['tests']), 3)
tests = tr.test_results['tests']
self.assertEqual(tests['Class1/passedTest1']['actual'], 'PASS')
self.assertEqual(tests['Class1/passedTest2']['actual'], 'PASS')
self.assertEqual(tests['Class2/disabled_test3']['actual'], 'SKIP')
self.assertEqual(tests['Class2/disabled_test3']['expected'], 'SKIP')
def test_tear_down(self):
tr = xcodebuild_runner.DeviceXcodeTestRunner(
"fake-app-path", "fake-host-app-path", "fake-out-dir")
tr.tear_down()
if __name__ == '__main__':
logging.basicConfig(
format='[%(asctime)s:%(levelname)s] %(message)s',
level=logging.DEBUG,
datefmt='%I:%M:%S')
unittest.main()
|
|
"""
:mod:`zsl.resource.guard`
-------------------------
Guard module defines tools to inject security checks into a resource. With
help of the ``guard`` class decorator and ``ResourcePolicy`` declarative
policy class a complex security resource behaviour can be achieved.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import *
from enum import Enum
from functools import wraps
import http.client
from typing import Any, Callable, Dict, List, Optional
from future.utils import raise_from
from zsl.interface.resource import ResourceResult
from zsl.service.service import _TX_HOLDER_ATTRIBUTE, SessionFactory, transactional
from zsl.utils.http import get_http_status_code_value
_HTTP_STATUS_FORBIDDEN = get_http_status_code_value(http.client.FORBIDDEN)
class Access(Enum):
ALLOW = 1
DENY = 2
CONTINUE = 3
class ResourcePolicy(object):
"""Declarative policy class.
Every CRUD method has is corespondent *can_method__before* and
*can_method__after* where *method* can be one of (*create*, *read*,
*update*, *delete*). *__before* method will get the CRUD method
parameters and *__after* will get the CRUD method result as parameter. On
returning ``Access.ALLOW`` access is granted. It should return
``Access.CONTINUE`` when the policy is not met, but is not broken, i. e. it
is not its responsibility to decide. On returning ``Access.DENY`` or raising
a ``PolicyException`` policy is broken and access is immediately denied.
The default implementation of these method lookup for corresponding
attribute *can_method*, so ``can_read = Access.ALLOW`` will allow access
for reading without the declaration of ``can_read__before`` or
``can_read__after``. *default* attribute is used if *can_method*
attribute is not declared. For more complex logic it can be declared as a
property, see examples:
.. code-block:: python
class SimplePolicy(ResourcePolicy):
'''Allow read and create'''
default = Access.ALLOW
can_delete = Access.CONTINUE
can_update = Access.CONTINUE
class AdminPolicy(ResourcePolicy):
'''Only admin has access'''
@inject(user_service=UserService)
def __init__(self, user_service):
self._user_service = user_service
@property
def default(self):
if self._user_service.current_user.is_admin:
return Access.ALLOW
"""
default = Access.CONTINUE
# can_create
# can_read
# can_update
# can_delete
def can_create__before(self, *args, **kwargs):
"""Check create method before executing."""
return self._check_default('can_create')
def can_create__after(self, *args, **kwargs):
"""Check create method after executing."""
return self._check_default('can_create')
def can_read__before(self, *args, **kwargs):
"""Check read method before executing."""
return self._check_default('can_read')
def can_read__after(self, *args, **kwargs):
"""Check read method after executing."""
return self._check_default('can_read')
def can_update__before(self, *args, **kwargs):
"""Check update method before executing."""
return self._check_default('can_update')
def can_update__after(self, *args, **kwargs):
"""Check update method after executing."""
return self._check_default('can_update')
def can_delete__before(self, *args, **kwargs):
"""Check delete method before executing."""
return self._check_default('can_delete')
def can_delete__after(self, *args, **kwargs):
"""Check delete method after executing."""
return self._check_default('can_delete')
def _check_default(self, prop):
# type: (str) -> Access
return getattr(self, prop, self.default)
class PolicyViolationError(Exception):
"""Error raised when policy is violated.
It can bear a HTTP status code, 403 is by default.
"""
def __init__(self, message, code=_HTTP_STATUS_FORBIDDEN):
self.code = code
super(PolicyViolationError, self).__init__(message)
class GuardedMixin(object):
"""Add guarded CRUD methods to resource.
The ``guard`` replaces the CRUD guarded methods with a wrapper with
security checks around these methods. It adds this mixin into the
resource automatically, but it can be declared on the resource manually
for IDEs to accept calls to the guarded methods.
"""
def guarded_create(self, params, args, data):
# type: (str, Dict[str, str], Dict[str, Any]) -> Dict[str, Any]
pass
def guarded_read(self, params, args, data):
# type: (str, Dict[str, str], Dict[str, Any]) -> Dict[str, Any]
pass
def guarded_update(self, params, args, data):
# type: (str, Dict[str, str], Dict[str, Any]) -> Dict[str, Any]
pass
def guarded_delete(self, params, args, data):
# type: (str, Dict[str, str], Dict[str, Any]) -> Dict[str, Any]
pass
def default_error_handler(e, *_):
# type: (PolicyViolationError, Any) -> ResourceResult
"""Default policy violation error handler.
It will create an empty resource result with an error HTTP code.
"""
return ResourceResult(
status=e.code,
body={}
)
class guard(object):
"""Guard decorator.
This decorator wraps the CRUD methods with security checks before and
after CRUD method execution, so that the response can be stopped or
manipulated. The original CRUD methods are renamed to *guarded_method*,
where *method* can be [*create*, *read*, *update*, *delete*], so by using a
`GuardedResource` as a base, you can still redeclare the *guarded_methods*
and won't loose the security checks.
It takes a list of policies, which will be always checked before and
after executing the CRUD method.
Policy is met, when it returns ``Access.ALLOW``, on ``Access.CONTINUE`` it
will continue to check others and on ``Access.DENY`` or raising a
``PolicyViolationError`` access will be restricted. If there is no policy
which grants the access a ``PolicyViolationError`` is raised and access
will be restricted.
Guard can have a custom exception handlers or method wrappers to _wrap the
CRUD method around.
.. code-block:: python
class Policy(ResourcePolicy):
default = Access.DENY
can_read = Access.ALLOW # allow only read
@guard([Policy()])
class GuardedResource(GuardedMixin):
def read(self, param, args, data):
return resources[param]
class SpecificResource(GuardedResource):
# override GuardedResource.read, but with its security checks
def guarded_read(self, param, args, data):
return specific_resources[param]
"""
method_wrappers = []
exception_handlers = [default_error_handler]
resource_methods = ['create', 'read', 'update', 'delete']
def __init__(self, policies=None, method_wrappers=None,
exception_handlers=None):
# type: (Optional[List[policies]]) -> None
self.policies = list(policies) if policies else []
if method_wrappers:
self._method_wrappers = self.method_wrappers + method_wrappers
else:
self._method_wrappers = list(self.method_wrappers)
if exception_handlers:
self._exception_handlers = \
self.exception_handlers + exception_handlers
else:
self._exception_handlers = list(self.exception_handlers)
@staticmethod
def _check_before_policies(res, name, *args, **kwargs):
for policy in res._guard_policies:
access = _call_before(policy, name)(*args, **kwargs)
if access == Access.ALLOW:
return
elif access == Access.DENY:
raise PolicyViolationError('Access denied for {} {}'.format(
name, 'before'), code=_HTTP_STATUS_FORBIDDEN)
elif access == Access.CONTINUE:
continue
else:
raise TypeError('Access has no value {}'.format(access))
raise PolicyViolationError(
"Access haven't been granted for {} {}".format(
name, 'before'), code=_HTTP_STATUS_FORBIDDEN)
@staticmethod
def _check_after_policies(res, name, result):
for policy in res._guard_policies:
access = _call_after(policy, name)(result)
if access == Access.ALLOW:
return
elif access == Access.DENY:
raise PolicyViolationError('Policy violation for {} {}'.format(
name, 'before'), code=_HTTP_STATUS_FORBIDDEN)
elif access == Access.CONTINUE:
continue
else:
raise TypeError('Access have no value {}'.format(access))
raise PolicyViolationError(
"Access haven't been granted for {} {}".format(
name, 'after'), code=_HTTP_STATUS_FORBIDDEN)
def _wrap(self, method):
# type: (Callable) -> Callable
name = method.__name__
@wraps(method)
def wrapped(*args, **kwargs):
res = args[0]
args = args[1:]
try:
self._check_before_policies(res, name, *args, **kwargs)
rv = _guarded_method(res, name)(*args, **kwargs)
self._check_after_policies(res, name, rv)
except PolicyViolationError as e:
rv = self._handle_exception(e, res)
return rv
for mw in reversed(self._method_wrappers):
wrapped = mw(wrapped)
return wrapped
def _handle_exception(self, error, resource):
rv = None
for handler in self._exception_handlers:
rv = handler(error, rv, resource)
return rv
def __call__(self, cls):
if hasattr(cls, '_guard_policies'):
self.policies += getattr(cls, '_guard_policies')
setattr(cls, '_guard_policies', list(self.policies))
return cls
setattr(cls, '_guard_policies', list(self.policies))
for method_name in self.resource_methods:
guarded_name = _guarded_name(method_name)
if hasattr(cls, method_name):
method = getattr(cls, method_name)
setattr(cls, method_name, self._wrap(method))
setattr(cls, guarded_name, method)
if issubclass(cls, GuardedMixin):
return cls
else:
return type(cls.__name__, (cls, GuardedMixin), {})
def transactional_error_handler(e, rv, _):
# type: (Any, Any, SessionFactory) -> Any
"""Re-raise a violation error to be handled in the
``_nested_transactional``.
"""
raise_from(_TransactionalPolicyViolationError(rv), e)
def _nested_transactional(fn):
# type: (Callable) -> Callable
"""In a transactional method create a nested transaction."""
@wraps(fn)
def wrapped(self, *args, **kwargs):
# type: (SessionFactory) -> Any
try:
rv = fn(self, *args, **kwargs)
except _TransactionalPolicyViolationError as e:
getattr(self, _TX_HOLDER_ATTRIBUTE).rollback()
rv = e.result
return rv
return wrapped
class transactional_guard(guard):
"""Security guard for ``ModelResource``.
This add a transactional method wrapper and error handler which calls the
rollback on ``PolicyViolationError``.
"""
method_wrappers = [transactional, _nested_transactional]
exception_handlers = guard.exception_handlers + [
transactional_error_handler]
class _TransactionalPolicyViolationError(PolicyViolationError):
"""Exception raised during """
def __init__(self, result):
# type: (ResourceResult) -> None
self.result = result
super(_TransactionalPolicyViolationError, self).__init__(
result.body,
result.status
)
def _guarded_method(res, method_name):
# type: (object, str) -> Callable
"""Return the guarded method from CRUD name"""
return getattr(res, _guarded_name(method_name))
def _guarded_name(method_name):
# type: (str) -> str
"""Return name for guarded CRUD method.
>>> _guarded_name('read')
'guarded_read'
"""
return 'guarded_' + method_name
def _before_name(method_name):
# type: (str) -> str
"""Return the name of the before check method.
>>> _before_name('read')
'can_read__before'
"""
return 'can_' + method_name + '__before'
def _after_name(method_name):
# type: (str) -> str
"""Return the name of after check method.
>>> _after_name('read')
'can_read__after'
"""
return 'can_' + method_name + '__after'
def _call_before(policy, method_name):
# type: (ResourcePolicy, str) -> Callable
"""Return the before check method.
>>> p = ResourcePolicy()
>>> _call_before(p, 'read')
p.can_read__before
"""
return getattr(policy, _before_name(method_name))
def _call_after(policy, method_name):
# type: (ResourcePolicy, str) -> Callable
"""Return the after check method.
>>> p = ResourcePolicy()
>>> _call_after(p, 'read')
p.can_after__before
"""
return getattr(policy, _after_name(method_name))
|
|
#!/usr/bin/python
#
# Autofocosing routines.
#
# You will need: scipy matplotlib sextractor
# This should work on Debian/ubuntu:
# sudo apt-get install python-matplotlib python-scipy python-pyfits sextractor
#
# If you would like to see sextractor results, get DS9 and pyds9:
#
# http://hea-www.harvard.edu/saord/ds9/
#
# Please be aware that current sextractor Ubuntu packages does not work
# properly. The best workaround is to install package, and the overwrite
# sextractor binary with one compiled from sources (so you will have access
# to sextractor configuration files, which program assumes).
#
# (C) 2002-2008 Stanislav Vitek
# (C) 2002-2010 Martin Jelinek
# (C) 2009-2010 Markus Wildi
# (C) 2010-2014 Petr Kubanek, Institute of Physics <[email protected]>
# (C) 2010 Francisco Forster Buron, Universidad de Chile
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
from rts2 import scriptcomm
from rts2 import sextractor
from scottSock import scottSock
sepPresent = False
try:
import sep
sepPresent = True
except Exception as ex:
pass
import os
from pylab import *
from scipy import *
from scipy import optimize
import numpy
import pickle
LINEAR = 0
"""Linear fit"""
P2 = 1
"""Fit using 2 power polynomial"""
P4 = 2
"""Fit using 4 power polynomial"""
H3 = 3
"""Fit using general Hyperbola (three free parameters)"""
H2 = 4
"""Fit using Hyperbola with fixed slope at infinity (two free parameters)"""
class Focusing (scriptcomm.Rts2Comm):
"""Take and process focussing data."""
def getTriestmp(self, pathway="/home/rts2obs/rts2images"):
focusfiles = [x for x in os.listdir(pathway) if "foc" in x]
tries = {}
for f in focusfiles:
num = f.split('_')[2].split(".")[0]
tries[float(num)] = f
return tries
def __init__(self,exptime = 10,step=20,attempts=10,filterGalaxies=False):
scriptcomm.Rts2Comm.__init__(self)
self.log('I', 'This is a test')
self.exptime = exptime
self.step = step
self.focuser = "F0"
self.attempts = attempts
# if |offset| is above this value, try linear fit
self.linear_fit = self.step * self.attempts / 2.0
# target FWHM for linear fit
self.linear_fit_fwhm = 3.5
self.filterGalaxies = filterGalaxies
def doFit(self,fit):
b = None
errfunc = None
fitfunc_r = None
p0 = None
# try to fit..
# this function is for flux..
#fitfunc = lambda p, x: p[0] * p[4] / (p[4] + p[3] * (abs(x - p[1])) ** (p[2]))
# prepare fit based on its type..
if fit == LINEAR:
fitfunc = lambda p, x: p[0] + p[1] * x
errfunc = lambda p, x, y: fitfunc(p, x) - y # LINEAR - distance to the target function
p0 = [1, 1]
fitfunc_r = lambda x, p0, p1: p0 + p1 * x
elif fit == P2:
fitfunc = lambda p, x: p[0] + p[1] * x + p[2] * (x ** 2)
errfunc = lambda p, x, y: fitfunc(p, x) - y # P2 - distance to the target function
p0 = [1, 1, 1]
fitfunc_r = lambda x, p0, p1, p2 : p0 + p1 * x + p2 * (x ** 2)
elif fit == P4:
fitfunc = lambda p, x: p[0] + p[1] * x + p[2] * (x ** 2) + p[3] * (x ** 3) + p[4] * (x ** 4)
errfunc = lambda p, x, y: fitfunc(p, x) - y # P4 - distance to the target function
p0 = [1, 1, 1, 1, 1]
fitfunc_r = lambda x, p0, p1: p0 + p1 * x + p2 * (x ** 2) + p3 * (x ** 3) + p4 * (x ** 4)
elif fit == H3:
fitfunc = lambda p, x: sqrt(p[0] ** 2 + p[1] ** 2 * (x - p[2])**2)
errfunc = lambda p, x, y: fitfunc(p, x) - y # H3 - distance to the target function
p0 = [400., 3.46407715307, self.fwhm_MinimumX] # initial guess based on real data
fitfunc_r = lambda x, p0, p1, p2 : sqrt(p0 ** 2 + p1 ** 2 * (x - p2) ** 2)
elif fit == H2:
fitfunc = lambda p, x: sqrt(p[0] ** 2 + 3.46407715307 ** 2 * (x - p[1])**2) # 3.46 based on H3 fits
errfunc = lambda p, x, y: fitfunc(p, x) - y # H2 - distance to the target function
p0 = [400., self.fwhm_MinimumX] # initial guess based on real data
fitfunc_r = lambda x, p0, p1 : sqrt(p0 ** 2 + 3.46407715307 ** 2 * (x - p1) ** 2)
else:
raise Exception('Unknow fit type {0}'.format(fit))
self.fwhm_poly, success = optimize.leastsq(errfunc, p0[:], args=(self.focpos, self.fwhm))
b = None
if fit == LINEAR:
b = (self.linear_fit_fwhm - self.fwhm_poly[0]) / self.fwhm_poly[1]
elif fit == H3:
b = self.fwhm_poly[2]
self.log('I', 'found minimum FWHM: {0}'.format(abs(self.fwhm_poly[0])))
self.log('I', 'found slope at infinity: {0}'.format(abs(self.fwhm_poly[1])))
elif fit == H2:
b = self.fwhm_poly[1]
self.log('I', 'found minimum FWHM: {0}'.format(abs(self.fwhm_poly[0])))
else:
b = optimize.fmin(fitfunc_r,self.fwhm_MinimumX,args=(self.fwhm_poly), disp=0)[0]
self.log('I', 'found FWHM minimum at offset {0}'.format(b))
return b
def tryFit(self,defaultFit):
"""Try fit, change to linear fit if outside allowed range."""
b = self.doFit(defaultFit)
if (abs(b - numpy.average(self.focpos)) >= self.linear_fit):
self.log('W','cannot do find best FWHM inside limits, trying H2 fit - best fit is {0}, average focuser position is {1}'.format(b, numpy.average(self.focpos)))
b = self.doFit(H2)
if (abs(b - numpy.average(self.focpos)) >= self.linear_fit):
self.log('W','cannot do find best FWHM inside limits, trying linear fit - best fit is {0}, average focuser position is {1}'.format(b, numpy.average(self.focpos)))
b = self.doFit(LINEAR)
return b,LINEAR
return b,H2
return b,defaultFit
def doFitOnArrays(self,fwhm,focpos,defaultFit):
self.fwhm = array(fwhm)
self.focpos = array(focpos)
self.fwhm_MinimumX = 0
min_fwhm=fwhm[0]
for x in range(0,len(fwhm)):
if fwhm[x] < min_fwhm:
self.fwhm_MinimumX = x
min_fwhm = fwhm[x]
return self.tryFit(defaultFit)
def findBestFWHM(self,tries,defaultFit=P2,min_stars=95,ds9display=False,threshold=2.7,deblendmin=0.03):
# X is FWHM, Y is offset value
self.focpos=[]
self.fwhm=[]
fwhm_min = None
self.fwhm_MinimumX = None
keys = list(tries.keys())
keys.sort()
sextr = sextractor.Sextractor(threshold=threshold,deblendmin=deblendmin)
for k in keys:
try:
sextr.runSExtractor(tries[k])
fwhm,fwhms,nstars = sextr.calculate_FWHM(min_stars,self.filterGalaxies)
except Exception as ex:
self.log('W','offset {0}: {1}'.format(k,ex))
continue
self.log('I','offset {0} fwhm {1} with {2} stars'.format(k,fwhm,nstars))
focpos.append(k)
fwhm.append(fwhm)
if (fwhm_min is None or fwhm < fwhm_min):
fwhm_MinimumX = k
fwhm_min = fwhm
return focpos,fwhm,fwhm_min,fwhm_MinimumX
def __sepFindFWHM(self,tries):
from astropy.io import fits
import math
import traceback
focpos=[]
fwhm=[]
fwhm_min=None
fwhm_MinimumX=None
keys = list(tries.keys())
keys.sort()
ln2=math.log(2)
for k in keys:
try:
fwhms=[]
ff=fits.open(tries[k])
# loop on images..
for i in range(1,len(ff)-1):
data=ff[i].data
bkg=sep.Background(numpy.array(data,numpy.float))
sources=sep.extract(data-bkg, 5.0 * bkg.globalrms)
self.log('I','bkg gobalrms {}'.format(bkg.globalrms))
for s in sources:
fwhms.append(2 * math.sqrt(ln2 * (s[15]**2 + s[16]**2)))
im_fwhm=numpy.median(fwhms)
# find median from fwhms measurements..
self.log('I','median fwhm {}'.format(numpy.median(fwhms)))
self.log('I','offset {0} fwhm {1} with {2} stars'.format(k,im_fwhm,len(fwhms)))
focpos.append(k)
fwhm.append(im_fwhm)
if (fwhm_min is None or im_fwhm < fwhm_min):
fwhm_MinimumX = k
fwhm_min = im_fwhm
except Exception as ex:
self.log('W','offset {0}: {1} {2}'.format(k,ex,traceback.format_exc()))
self.log('I','pickling')
fd = open( "rts2.pkl", 'w' )
pickle.dump(sources, fd)
fd.close()
return focpos,fwhm,fwhm_min,fwhm_MinimumX
def findBestFWHM(self,tries,defaultFit=H3,min_stars=15,ds9display=False,threshold=2.7,deblendmin=0.03):
# X is FWHM, Y is offset value
self.focpos=[]
self.fwhm=[]
self.fwhm_min = None
self.fwhm_MinimumX = None
if sepPresent:
self.focpos,self.fwhm,self.fwhm_min,self.fwhm_MinimumX = self.__sepFindFWHM(tries)
else:
self.focpos,self.fwhm,self.fwhm_min,self.fwhm_MinimumX = self.__sexFindFWHM(tries,threshold,deblendmin)
self.focpos = array(self.focpos)
self.fwhm = array(self.fwhm)
return self.tryFit(defaultFit)
def beforeReadout(self):
self.current_focus = self.getValueFloat('FOC_POS',self.focuser)
if (self.num == self.attempts):
self.setValue('FOC_TOFF',0,self.focuser)
else:
self.off += self.step
self.setValue('FOC_TOFF',self.off,self.focuser)
def takeImages(self):
self.setValue('exposure',self.exptime)
self.setValue('SHUTTER','LIGHT')
self.off = -1 * self.step * (self.attempts / 2)
self.setValue('FOC_TOFF',self.off,self.focuser)
tries = {}
# must be overwritten in beforeReadout
self.current_focus = None
for self.num in range(1,self.attempts+1):
self.log('I','starting {0}s exposure on offset {1}'.format(self.exptime,self.off))
img = self.exposure(self.beforeReadout,'%b/foc_%N_{0}.fits'.format(self.num))
tries[self.current_focus] = img
self.log('I','all focusing exposures finished, processing data')
return self.findBestFWHM(tries)
def run(self):
self.focuser = self.getValue('focuser')
# send to some other coordinates if you wish so, or disable this for target for fixed coordinates
#self.altaz (89,90)
b,fit = self.takeImages()
if fit == LINEAR:
self.setValue('FOC_DEF',b,self.focuser)
b,fit = self.takeImages()
self.setValue('FOC_DEF',b,self.focuser)
def plotFit(self,b,ftype):
"""Plot fit graph."""
fitfunc = None
if ftype == LINEAR:
fitfunc = lambda p, x: p[0] + p[1] * x
elif ftype == P2:
fitfunc = lambda p, x: p[0] + p[1] * x + p[2] * (x ** 2)
elif ftype == P4:
fitfunc = lambda p, x: p[0] + p[1] * x + p[2] * (x ** 2) + p[3] * (x ** 3) + p[4] * (x ** 4)
elif ftype == H3:
fitfunc = lambda p, x: sqrt(p[0] ** 2 + p[1] ** 2 * (x - p[2]) ** 2)
elif ftype == H2:
fitfunc = lambda p, x: sqrt(p[0] ** 2 + 3.46407715307 ** 2 * (x - p[1]) ** 2) # 3.46 based on HYPERBOLA fits
else:
raise Exception('Unknow fit type {0}'.format(ftype))
x = linspace(self.focpos.min() - 1, self.focpos.max() + 1)
plot (self.focpos, self.fwhm, "r+", x, fitfunc(self.fwhm_poly, x), "r-")
show()
def to_dataserver( fname, outfile='test.fits', clobber=True ):
fitsfd = fits.open( fname )
width = 0
height = 0
for ext in fitsfd:
if hasattr( ext, 'data' ):
if ext.data is not None:
width+=ext.data.shape[0]
height+=ext.data.shape[1]
fitsfd.close()
fsize = os.stat(fname).st_size
fd = open(fname, 'rb')
if clobber:
clobber_char = '!'
else:
clobber_char = ''
meta = " {} {}{} 1 {} {} 0".format( fsize, clobber_char, '/home/bigobs/data/rts2'+outfile, width, height )
meta = meta + (256-len(meta))*' '
data = meta+fd.read()
lendata = len(data)
soc = scottSock( '10.30.1.1', 6543 )
counter = 0
socsize = 1024
buffsize = 0
while buffsize < len(data):
sent = soc.send( data[buffsize:buffsize+1024] )
buffsize+=sent
|
|
import math
import copy
# counting functions ******************************************************
def n_c_k(n,k):
'''
Purpose
return C(n,k)
Precondition
n,k strictly positive integers
n >= k
'''
return math.factorial(n) / (math.factorial(k) * math.factorial(n-k))
# Group_list ******************************************************
class Group_list:
'''
A group list is of the form:
[
[g_0, h_0_0, h_0_1, ...,h_0_N-1 ]
[g_1, h_1_0, h_1_1, ...,h_1_N-1 ]
...
]
where
group name g_i is a string unique among the groups
each hotspot vector h_i_j is a list of length N > 0
TODO: factor out to here the pur/pre from the generate functions
'''
def __init__(self,group_list,vector_length):
'''
purpose
preconditions
group_list is a legal group list with vectors
of length vector_length
'''
self.vector_length = vector_length
self.group_list = list(group_list)
def cross_product(self,suffix,indexes):
'''
purpose
if name is None
generate new vectors in the existing groups by
applying cross_product_sublist to the
current vectors
else
generate one new group for each vector by
(1) applying cross_product_sublist to the
current vectors
(2) appending suffix % i to the current
group name, with i set to 0, 1, 2, ...
preconditions
if name is None
suffix: a string containing exactly one %i
indexes
each element in range(vector_length)
no duplicates, sorted ascending
'''
G = [ ]
for g in self.group_list:
if suffix == None:
g0 = [g[0]]
G.append(g0)
for h_vector in g[1:]:
for h in cross_product_sublist \
(h_vector,indexes):
g0.append(h)
else:
base_name = g[0]
for h_vector in g[1:]:
for i,g0 in enumerate \
(cross_product_sublist
(h_vector,indexes)):
g0 = [base_name+suffix%i,g0]
G.append(g0)
self.group_list = G
def generalize(self,suffix,indexes,count):
'''
purpose
if name is None
generate new vectors in the existing groups by
applying generalize_sublist, with count
hotspots, to the current vectors
else
generate one new group for each vector by
(1) applying generalize_sublist, with count
hotspots, to the current vectors
(2) appending suffix % i to the current
group name, with i set to 0, 1, 2, ...
preconditions
if name is None
suffix: a string containing exactly one %i
indexes
each element in range(vector_length)
no duplicates, sorted ascending
count: in [1,len(indexes)]
'''
G = [ ]
for g in self.group_list:
if suffix == None:
g0 = [g[0]]
G.append(g0)
for h_vector in g[1:]:
for h in generalize_sublist \
(h_vector,count,indexes):
g0.append(h)
else:
base_name = g[0]
for h_vector in g[1:]:
for i,g0 in enumerate \
(generalize_sublist
(h_vector,count,indexes)):
g0 = [base_name+suffix%i,g0]
G.append(g0)
self.group_list = G
def substitute(self,suffix,indexes,value_list):
'''
purpose
if name is None
generate new vectors in the existing groups by
applying substitute_sublist to the
current vectors
else
generate one new group for each vector by
(1) applying substitute_sublist to the
current vectors
(2) appending suffix % i to the current
group name, with i set to 0, 1, 2, ...
preconditions
if name is None
suffix: a string containing exactly one %i
indexes
each element in range(vector_length)
no duplicates, sorted ascending
'''
G = [ ]
for g in self.group_list:
if suffix == None:
g0 = [g[0]]
G.append(g0)
for h_vector in g[1:]:
for h in substitute_sublist \
(h_vector,indexes,value_list):
g0.append(h)
else:
base_name = g[0]
for h_vector in g[1:]:
for i,g0 in enumerate \
(substitute_sublist
(h_vector,indexes,value_list)):
g0 = [base_name+suffix%i,g0]
G.append(g0)
self.group_list = G
# sublist functions ******************************************************
def cross_product_sublist(L,indexes):
'''
purpose
return a list containing all L0 where
if i in indexes: L0[i] in L[i]
else: L0[i] == L[i]
preconditions
for i in range(len(L)):
if i in indexes: L[i] is a list of string or number
else: L[i] is a string
indexes
in range(len(L)), no duplicates, sorted ascending
'''
# extract sublist for cross product
sublist = [ L[i] for i in indexes ]
# generate sublist cross product
C = cross_product(sublist)
# embed each cross product element in a copy of L
LL = [ ]
for c in C:
# create a new group, selecting from L and c
c_iter = iter(c)
LL.append([ c_iter.next() if i in indexes else L[i]
for i in range(len(L))])
return LL
def generalize_sublist(L,n,indexes):
'''
purpose
return a list containing all the generalizations of L
with n holes of the sublist of L specified by sublist_indexes
return the generalizations "embedded" in the original L values
preconditions
L: list of numbers or strings
n in [0..len(sublist_indexes)-1]
indexes:
in [0..len(full_list)-1]
no duplicates
sorted ascending
'''
# extract sublist for generalization
sublist = [ L[i] for i in indexes ]
# generalize sublist
G = generalize(sublist,n)
# embed each generalization in a copy of L
LL = [ ]
for g in G:
# create a new group, selecting from L and g
g_iter = iter(g)
LL.append([ g_iter.next() if i in indexes else L[i]
for i in range(len(L)) ])
return LL
def substitute_sublist(L,indexes,values):
'''
purpose
Return a list of new lists using by substituting in L
the value_list elements at the positions in indexes
preconditions
L: list of numbers or strings
indexes:
in [0..len(full_list)-1]
no duplicates
sorted ascending
values
list of lists of numbers or strings
each sublist is of length len(indexes)
'''
LL = [ ]
for v in values:
v_iter = iter(v)
LL.append([ v_iter.next() if i in indexes else L[i]
for i in range(len(L)) ])
return LL
def two_cover_sublist(L,indexes):
'''
purpose
return a list containing all L0 where
if i in indexes: L0[i] in L[i]
else: L0[i] == L[i]
preconditions
for i in range(len(L)):
if i in indexes: L[i] is a list of string or number
else: L[i] is a string
indexes
in range(len(L)), no duplicates, sorted ascending
'''
# extract sublist for cross product
sublist = [ L[i] for i in indexes ]
# generate sublist two cover
C = two_cover(sublist)
# embed each cross product element in a copy of L
return substitute_sublist(L,indexes,C)
# primitives ******************************************************
def cross_product(domains):
'''
purpose
return a list containing the cross product of domains
precondition
domains is a list of lists containing string, float, or integer
examples
'''
L = []
for row in genCPRow(domains):
L.append(row)
return L
def generalize(L,n):
'''
Purpose
return a list containing every generalization of L with exactly
n occurrences of None
where
G is a generalization of L if G can be obtained
from L by replacing one or more elements of L with None
Precondition
L is a list of string, float, or integer
n is a non-negative integer
'''
G_list = []
if n == 0:
G_list.append(L)
elif n == len(L):
G_list.append([None]*len(L))
else:
for i in range(len(L)-n):
prefix = L[:i]
# add to G_list:
# all generalizations with None in position i
for suffix in generalize(L[i+1:],n-1):
G = prefix + [None] + suffix
if G not in G_list:
G_list.append(G)
# add to G_list:
# all generalizations with L[i] in position i
for suffix in generalize(L[i+1:],n): # with no None
G = prefix + [L[i]] + suffix
if G not in G_list:
G_list.append(G)
return G_list
def choose_k(L, k):
'''
Purpose
Return a list of the sublists of L of length k.
Precondition
L: list of string or number
k: integer, in [0..len(L)]
'''
return list(choose_k_yield(L,k))
def choose_k_yield(L,k):
for i in range(len(L)):
if k == 1:
yield [L[i],]
else:
for next in choose_k(L[i+1:len(L)], k-1):
yield [L[i],] + next
def two_cover(domains):
'''
Purpose
return a two-cover of domains
Precondition
domains is a list of lists containing string, float, or integer
'''
# calculate domain sizes
domain_sizes = []
for i in range(len(domains)):
domain_sizes.append( len(domains[i]) )
# generate a two-cover and return it
L = []
for row in gen2Cover(domain_sizes):
tuple = [None]*len(row)
for i in range(len(row)):
tuple[i] = domains[i][row[i]]
L.append(tuple)
return L
def genCPRow(domains):
'''
Purpose
yields each row in cross product of the n domains
Precondition
domains is a list containing n domains over which to form
cross product
must specify at least one domain
'''
# if there is more than one domain specified
if len(domains) > 1:
# get a row of the cross product of the first n-1 domains
for sublist in genCPRow(domains[:-1]):
# for every element in this domain, append it to row
for item in iter(domains[ len(domains)-1 ]):
yield sublist + [item]
# if only one domain given, yield its elements
else:
for item in iter(domains[ len(domains)-1 ]):
yield [item]
def gen2Cover(domainSizes):
'''
Purpose
generates a 2-cover of index vectors of a set of domains
yields rows in the 2-cover of the index vectors of the domains
Precondition
domainSizes is list of sizes of each domain
domain sizes must be nonzero.
'''
indexVectors = []
for i in range(len(domainSizes)):
indexVectors.append( range(domainSizes[i]) )
# yield each row in the 2-cover of index vectors.
hc = HillClimb(indexVectors)
for row in iter(hc):
yield row
STRENGTH = 2
class HillClimb:
'''
a heuristic search for finding the optimal solution for a pairwise
(strength 2) cover
'''
def __init__(self, dv):
self.domainVector = dv
self.pairSet = []
# yields test tuples from the given domains
def gen(self):
if len(self.domainVector) == 1:
for i in self.domainVector[0]:
yield [i]
else:
# calculate all pairs
self.pairSet = self.makePairs()
# yield first tuple and remove from pairs
tpl = [domain[0] for domain in self.domainVector]
yield tpl
self.removePairs(tpl)
# perform hill climbing while there are pairs to add
while len(self.pairSet) > 0:
# create initial tuple
p = self.pairSet[0]
tpl = []
indices = [d[1] for d in p]
values = [d[0] for d in p]
for i in xrange(len(self.domainVector)):
if i in indices:
tpl.append(
values[indices.index(i)])
else:
tpl.append(
self.domainVector[i][0])
# see if there is a better tuple by analyzing
# neighbours of tpl
numPairs = 0
maxTuple = tpl
maxPairs = self.numPairs(tpl)
while numPairs < maxPairs:
numPairs = maxPairs
tpl = maxTuple
# try to find new tuple and better
# newPairs2
for i in xrange(len(tpl)):
domain = self.domainVector[i]
for j in xrange(len(domain)):
newTuple = \
copy.copy(tpl)
newTuple[i] = domain[j]
# OPTIMIZATION NOTE:
# Could cache newPairs
# value as it may be
# recalculated many
# times
newPairs = \
self.numPairs(newTuple)
if newPairs > maxPairs:
maxPairs = \
newPairs
maxTuple = \
newTuple
# yield new tuple
yield tpl
self.removePairs(tpl)
# add pairs generated by a new domain to the pairSet
def makePairs(self):
pairs = []
# create the first element in the pair
for i in xrange(len(self.domainVector)-STRENGTH+1):
d1 = self.domainVector[i]
for i1 in xrange(len(d1)):
e1 = (d1[i1], i)
# add elements from subsequent domains
for j in xrange(i+1, len(self.domainVector)):
d2 = self.domainVector[j]
for j1 in xrange(len(d2)):
e2 = (d2[j1], j)
p = (e1, e2)
pairs.append(p)
return pairs
# remove pairs that are in tuple from the pairSet
def removePairs(self, tpl):
for i in xrange(len(tpl)-1):
e1 = (tpl[i], i)
for j in xrange(i+1, len(tpl)):
e2 = (tpl[j], j)
p = (e1, e2)
try:
self.pairSet.remove(p)
except ValueError:
pass
# count pairs that are in tuple from the pairSet
def numPairs(self, tpl):
count = 0
for i in xrange(len(tpl)-1):
e1 = (tpl[i], i)
for j in xrange(i+1, len(tpl)):
e2 = (tpl[j], j)
p = (e1, e2)
if p in self.pairSet:
count += 1
return count
# make this object iterable
def __iter__(self):
return self.gen()
|
|
# Copyright (c) 2013-2021 khal contributors
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""collection of icalendar helper functions"""
import datetime as dt
import dateutil.rrule
import icalendar
import logging
import pytz
from collections import defaultdict
from .exceptions import UnsupportedRecurrence
from .parse_datetime import guesstimedeltafstr, rrulefstr
from .utils import generate_random_uid, localize_strip_tz, to_unix_time
logger = logging.getLogger('khal')
def split_ics(ics, random_uid=False, default_timezone=None):
"""split an ics string into several according to VEVENT's UIDs
and sort the right VTIMEZONEs accordingly
ignores all other ics components
:type ics: str
:param random_uid: assign random uids to all events
:type random_uid: bool
:rtype list:
"""
cal = cal_from_ics(ics)
tzs = {}
events_grouped = defaultdict(list)
for item in cal.walk():
# Since some events could have a Windows format timezone (e.g. 'New Zealand
# Standard Time' for 'Pacific/Auckland' in Olson format), we convert any
# Windows format timezones to Olson.
if item.name == 'VTIMEZONE':
if item['TZID'] in icalendar.windows_to_olson.WINDOWS_TO_OLSON:
key = icalendar.windows_to_olson.WINDOWS_TO_OLSON[item['TZID']]
else:
key = item['TZID']
tzs[key] = item
if item.name == 'VEVENT':
events_grouped[item['UID']].append(item)
else:
continue
return [ics_from_list(events, tzs, random_uid, default_timezone) for uid, events in
sorted(events_grouped.items())]
def new_event(locale, dtstart=None, dtend=None, summary=None, timezone=None,
allday=False, description=None, location=None, categories=None,
repeat=None, until=None, alarms=None):
"""create a new event
:param dtstart: starttime of that event
:type dtstart: datetime
:param dtend: end time of that event, if this is a *date*, this value is
interpreted as being the last date the event is scheduled on, i.e.
the VEVENT DTEND will be *one day later*
:type dtend: datetime
:param summary: description of the event, used in the SUMMARY property
:type summary: unicode
:param timezone: timezone of the event (start and end)
:type timezone: pytz.timezone
:param allday: if set to True, we will not transform dtstart and dtend to
datetime
:type allday: bool
:returns: event
:rtype: icalendar.Event
"""
if dtstart is None:
raise ValueError("no start given")
if dtend is None:
raise ValueError("no end given")
if summary is None:
raise ValueError("no summary given")
if not allday and timezone is not None:
dtstart = timezone.localize(dtstart)
dtend = timezone.localize(dtend)
event = icalendar.Event()
event.add('dtstart', dtstart)
event.add('dtend', dtend)
event.add('dtstamp', dt.datetime.now())
event.add('summary', summary)
event.add('uid', generate_random_uid())
# event.add('sequence', 0)
if description:
event.add('description', description)
if location:
event.add('location', location)
if categories:
event.add('categories', categories)
if repeat and repeat != "none":
rrule = rrulefstr(repeat, until, locale)
event.add('rrule', rrule)
if alarms:
for alarm in alarms.split(","):
alarm = alarm.strip()
alarm_trig = -1 * guesstimedeltafstr(alarm)
new_alarm = icalendar.Alarm()
new_alarm.add('ACTION', 'DISPLAY')
new_alarm.add('TRIGGER', alarm_trig)
new_alarm.add('DESCRIPTION', description)
event.add_component(new_alarm)
return event
def ics_from_list(events, tzs, random_uid=False, default_timezone=None):
"""convert an iterable of icalendar.Events to an icalendar.Calendar
:params events: list of events all with the same uid
:type events: list(icalendar.cal.Event)
:param random_uid: assign random uids to all events
:type random_uid: bool
:param tzs: collection of timezones
:type tzs: dict(icalendar.cal.Vtimzone
"""
calendar = icalendar.Calendar()
calendar.add('version', '2.0')
calendar.add(
'prodid', '-//PIMUTILS.ORG//NONSGML khal / icalendar //EN'
)
if random_uid:
new_uid = generate_random_uid()
needed_tz, missing_tz = set(), set()
for sub_event in events:
sub_event = sanitize(sub_event, default_timezone=default_timezone)
if random_uid:
sub_event['UID'] = new_uid
# icalendar round-trip converts `TZID=a b` to `TZID="a b"` investigate, file bug XXX
for prop in ['DTSTART', 'DTEND', 'DUE', 'EXDATE', 'RDATE', 'RECURRENCE-ID', 'DUE']:
if isinstance(sub_event.get(prop), list):
items = sub_event.get(prop)
else:
items = [sub_event.get(prop)]
for item in items:
if not (hasattr(item, 'dt') or hasattr(item, 'dts')):
continue
# if prop is a list, all items have the same parameters
datetime_ = item.dts[0].dt if hasattr(item, 'dts') else item.dt
if not hasattr(datetime_, 'tzinfo'):
continue
# check for datetimes' timezones which are not understood by
# icalendar
if datetime_.tzinfo is None and 'TZID' in item.params and \
item.params['TZID'] not in missing_tz:
logger.warning(
'Cannot find timezone `{}` in .ics file, using default timezone. '
'This can lead to erroneous time shifts'.format(item.params['TZID'])
)
missing_tz.add(item.params['TZID'])
elif datetime_.tzinfo and datetime_.tzinfo != pytz.UTC and \
datetime_.tzinfo not in needed_tz:
needed_tz.add(datetime_.tzinfo)
for tzid in needed_tz:
if str(tzid) in tzs:
calendar.add_component(tzs[str(tzid)])
else:
logger.warning(
'Cannot find timezone `{}` in .ics file, this could be a bug, '
'please report this issue at http://github.com/pimutils/khal/.'.format(tzid))
for sub_event in events:
calendar.add_component(sub_event)
return calendar.to_ical().decode('utf-8')
def expand(vevent, href=''):
"""
Constructs a list of start and end dates for all recurring instances of the
event defined in vevent.
It considers RRULE as well as RDATE and EXDATE properties. In case of
unsupported recursion rules an UnsupportedRecurrence exception is thrown.
If the vevent contains a RECURRENCE-ID property, no expansion is done,
the function still returns a tuple of start and end (date)times.
:param vevent: vevent to be expanded
:type vevent: icalendar.cal.Event
:param href: the href of the vevent, used for more informative logging and
nothing else
:type href: str
:returns: list of start and end (date)times of the expanded event
:rtype: list(tuple(datetime, datetime))
"""
# we do this now and than never care about the "real" end time again
if 'DURATION' in vevent:
duration = vevent['DURATION'].dt
else:
duration = vevent['DTEND'].dt - vevent['DTSTART'].dt
# if this vevent has a RECURRENCE_ID property, no expansion will be
# performed
expand = not bool(vevent.get('RECURRENCE-ID'))
events_tz = getattr(vevent['DTSTART'].dt, 'tzinfo', None)
allday = not isinstance(vevent['DTSTART'].dt, dt.datetime)
def sanitize_datetime(date):
if allday and isinstance(date, dt.datetime):
date = date.date()
if events_tz is not None:
date = events_tz.localize(date)
return date
rrule_param = vevent.get('RRULE')
if expand and rrule_param is not None:
vevent = sanitize_rrule(vevent)
# dst causes problem while expanding the rrule, therefore we transform
# everything to naive datetime objects and transform back after
# expanding
# See https://github.com/dateutil/dateutil/issues/102
dtstart = vevent['DTSTART'].dt
if events_tz:
dtstart = dtstart.replace(tzinfo=None)
rrule = dateutil.rrule.rrulestr(
rrule_param.to_ical().decode(),
dtstart=dtstart,
ignoretz=True,
)
if rrule._until is None:
# rrule really doesn't like to calculate all recurrences until
# eternity, so we only do it until 2037, because a) I'm not sure
# if python can deal with larger datetime values yet and b) pytz
# doesn't know any larger transition times
rrule._until = dt.datetime(2037, 12, 31)
else:
if events_tz and 'Z' in rrule_param.to_ical().decode():
rrule._until = pytz.UTC.localize(
rrule._until).astimezone(events_tz).replace(tzinfo=None)
# rrule._until and dtstart could be dt.date or dt.datetime. They
# need to be the same for comparison
testuntil = rrule._until
if (type(dtstart) == dt.date and type(testuntil) == dt.datetime):
testuntil = testuntil.date()
teststart = dtstart
if (type(testuntil) == dt.date and type(teststart) == dt.datetime):
teststart = teststart.date()
if testuntil < teststart:
logger.warning(
'{0}: Unsupported recurrence. UNTIL is before DTSTART.\n'
'This event will not be available in khal.'.format(href))
return False
if rrule.count() == 0:
logger.warning(
'{0}: Recurrence defined but will never occur.\n'
'This event will not be available in khal.'.format(href))
return False
rrule = map(sanitize_datetime, rrule)
logger.debug('calculating recurrence dates for {}, this might take some time.'.format(href))
# RRULE and RDATE may specify the same date twice, it is recommended by
# the RFC to consider this as only one instance
dtstartl = set(rrule)
if not dtstartl:
raise UnsupportedRecurrence()
else:
dtstartl = {vevent['DTSTART'].dt}
def get_dates(vevent, key):
# TODO replace with get_all_properties
dates = vevent.get(key)
if dates is None:
return
if not isinstance(dates, list):
dates = [dates]
dates = (leaf.dt for tree in dates for leaf in tree.dts)
dates = localize_strip_tz(dates, events_tz)
return map(sanitize_datetime, dates)
# include explicitly specified recursion dates
if expand:
dtstartl.update(get_dates(vevent, 'RDATE') or ())
# remove excluded dates
if expand:
for date in get_dates(vevent, 'EXDATE') or ():
try:
dtstartl.remove(date)
except KeyError:
logger.warning(
'In event {}, excluded instance starting at {} not found, '
'event might be invalid.'.format(href, date))
dtstartend = [(start, start + duration) for start in dtstartl]
# not necessary, but I prefer deterministic output
dtstartend.sort()
return dtstartend
def assert_only_one_uid(cal: icalendar.Calendar):
"""assert the all VEVENTs in cal have the same UID"""
uids = set()
for item in cal.walk():
if item.name == 'VEVENT':
uids.add(item['UID'])
if len(uids) > 1:
return False
else:
return True
def sanitize(vevent, default_timezone, href='', calendar=''):
"""
clean up vevents we do not understand
:param vevent: the vevent that needs to be cleaned
:type vevent: icalendar.cal.Event
:param default_timezone: timezone to apply to start and/or end dates which
were supposed to be localized but which timezone was not understood
by icalendar
:type timezone: pytz.timezone
:param href: used for logging to inform user which .ics files are
problematic
:type href: str
:param calendar: used for logging to inform user which .ics files are
problematic
:type calendar: str
:returns: clean vevent
:rtype: icalendar.cal.Event
"""
# convert localized datetimes with timezone information we don't
# understand to the default timezone
# TODO do this for everything where a TZID can appear (RDATE, EXDATE)
for prop in ['DTSTART', 'DTEND', 'DUE', 'RECURRENCE-ID']:
if prop in vevent and invalid_timezone(vevent[prop]):
timezone = vevent[prop].params.get('TZID')
value = default_timezone.localize(vevent.pop(prop).dt)
vevent.add(prop, value)
logger.warning(
"{} localized in invalid or incomprehensible timezone `{}` in {}/{}. "
"This could lead to this event being wrongly displayed."
"".format(prop, timezone, calendar, href)
)
vdtstart = vevent.pop('DTSTART', None)
vdtend = vevent.pop('DTEND', None)
dtstart = getattr(vdtstart, 'dt', None)
dtend = getattr(vdtend, 'dt', None)
# event with missing DTSTART
if dtstart is None:
raise ValueError('Event has no start time (DTSTART).')
dtstart, dtend = sanitize_timerange(
dtstart, dtend, duration=vevent.get('DURATION', None))
vevent.add('DTSTART', dtstart)
if dtend is not None:
vevent.add('DTEND', dtend)
return vevent
def sanitize_timerange(dtstart, dtend, duration=None):
'''return sensible dtstart and end for events that have an invalid or
missing DTEND, assuming the event just lasts one hour.'''
if isinstance(dtstart, dt.datetime) and isinstance(dtend, dt.datetime):
if dtstart.tzinfo and not dtend.tzinfo:
logger.warning(
"Event end time has no timezone. "
"Assuming it's the same timezone as the start time"
)
dtend = dtstart.tzinfo.localize(dtend)
if not dtstart.tzinfo and dtend.tzinfo:
logger.warning(
"Event start time has no timezone. "
"Assuming it's the same timezone as the end time"
)
dtstart = dtend.tzinfo.localize(dtstart)
if dtend is None and duration is None:
if isinstance(dtstart, dt.datetime):
dtstart = dtstart.date()
dtend = dtstart + dt.timedelta(days=1)
elif dtend is not None:
if dtend < dtstart:
raise ValueError('The event\'s end time (DTEND) is older than '
'the event\'s start time (DTSTART).')
elif dtend == dtstart:
logger.warning(
"Event start time and end time are the same. "
"Assuming the event's duration is one hour."
)
dtend += dt.timedelta(hours=1)
return dtstart, dtend
def sanitize_rrule(vevent):
"""fix problems with RRULE:UNTIL"""
if 'rrule' in vevent and 'UNTIL' in vevent['rrule']:
until = vevent['rrule']['UNTIL'][0]
dtstart = vevent['dtstart'].dt
# DTSTART is date, UNTIL is datetime
if not isinstance(dtstart, dt.datetime) and isinstance(until, dt.datetime):
vevent['rrule']['until'] = until.date()
return vevent
def invalid_timezone(prop):
"""check if an icalendar property has a timezone attached we don't understand"""
if hasattr(prop.dt, 'tzinfo') and prop.dt.tzinfo is None and 'TZID' in prop.params:
return True
else:
return False
def _get_all_properties(vevent, prop):
"""Get all properties from a vevent, even if there are several entries
example input:
EXDATE:1234,4567
EXDATE:7890
returns: [1234, 4567, 7890]
:type vevent: icalendar.cal.Event
:type prop: str
"""
if prop not in vevent:
return list()
if isinstance(vevent[prop], list):
rdates = [leaf.dt for tree in vevent[prop] for leaf in tree.dts]
else:
rdates = [vddd.dt for vddd in vevent[prop].dts]
return rdates
def delete_instance(vevent, instance):
"""remove a recurrence instance from a VEVENT's RRDATE list or add it
to the EXDATE list
:type vevent: icalendar.cal.Event
:type instance: datetime.datetime
"""
# TODO check where this instance is coming from and only call the
# appropriate function
if 'RRULE' in vevent:
exdates = _get_all_properties(vevent, 'EXDATE')
exdates += [instance]
vevent.pop('EXDATE')
vevent.add('EXDATE', exdates)
if 'RDATE' in vevent:
rdates = [one for one in _get_all_properties(vevent, 'RDATE') if one != instance]
vevent.pop('RDATE')
if rdates != []:
vevent.add('RDATE', rdates)
def sort_key(vevent):
"""helper function to determine order of VEVENTS
so that recurrence-id events come after the corresponding rrule event, etc
:param vevent: icalendar.Event
:rtype: tuple(str, int)
"""
assert isinstance(vevent, icalendar.Event)
uid = str(vevent['UID'])
rec_id = vevent.get('RECURRENCE-ID')
if rec_id is None:
return uid, 0
rrange = rec_id.params.get('RANGE')
if rrange == 'THISANDFUTURE':
return uid, to_unix_time(rec_id.dt)
else:
return uid, 1
def cal_from_ics(ics):
try:
cal = icalendar.Calendar.from_ical(ics)
except ValueError as error:
if (len(error.args) > 0 and isinstance(error.args[0], str) and
error.args[0].startswith('Offset must be less than 24 hours')):
logger.warning(
'Invalid timezone offset encountered, '
'timezone information may be wrong: ' + str(error.args[0])
)
icalendar.vUTCOffset.ignore_exceptions = True
cal = icalendar.Calendar.from_ical(ics)
icalendar.vUTCOffset.ignore_exceptions = False
return cal
|
|
"""The main form for the application"""
from PythonCard import model
# Allow importing of our custom controls
import PythonCard.resource
PythonCard.resource.APP_COMPONENTS_PACKAGE = "vb2py.targets.pythoncard.vbcontrols"
class Background(model.Background):
def __getattr__(self, name):
"""If a name was not found then look for it in components"""
return getattr(self.components, name)
def __init__(self, *args, **kw):
"""Initialize the form"""
model.Background.__init__(self, *args, **kw)
# Call the VB Form_Load
# TODO: This is brittle - depends on how the private indicator is set
if hasattr(self, "_MAINFORM__Form_Load"):
self._MAINFORM__Form_Load()
elif hasattr(self, "Form_Load"):
self.Form_Load()
from vb2py.vbfunctions import *
from vb2py.vbdebug import *
import vb2py.custom.comctllib
class MAINFORM(Background):
""" Make sure we import the common controls for Python"""
def on_chkEnableAutoEdit_mouseClick(self, *args):
if self.chkEnableAutoEdit.Value:
self.tvTree.LabelEdit = tvwAutomatic
else:
self.tvTree.LabelEdit = vb2py.custom.comctllib.tvwManual
def on_cmdAdd_mouseClick(self, *args):
if self.tvTree.SelectedItem is None:
self.tvTree.Nodes.Add(VBGetMissingArgument(self.tvTree.Nodes.Add, 0), VBGetMissingArgument(self.tvTree.Nodes.Add, 1), self.txtName.Text, self.txtName.Text)
else:
self.tvTree.Nodes.Add(self.tvTree.SelectedItem.Key, vb2py.custom.comctllib.tvwChild, self.txtName.Text, self.txtName.Text)
def on_cmdAddTree_mouseClick(self, *args):
self.tvTree.Nodes.Clear()
self.__setTree(self.txtTree.Text)
def on_cmdClear_mouseClick(self, *args):
self.tvTree.Nodes.Clear()
def on_cmdEnable_mouseClick(self, *args):
self.tvTree.Enabled = not self.tvTree.Enabled
def on_cmdExpand_mouseClick(self, *args):
for Node in self.tvTree.Nodes:
vb2py.custom.comctllib.Node.Expanded = True
def on_cmdCollapse_mouseClick(self, *args):
for Node in self.tvTree.Nodes:
vb2py.custom.comctllib.Node.Expanded = False
def on_cmdLoadPicture_mouseClick(self, *args):
self.ilDynamic.ListImages.Add(VBGetMissingArgument(self.ilDynamic.ListImages.Add, 0), 'closed', LoadPicture(App.Path + '\\closedicon.ico'))
self.ilDynamic.ListImages.Add(VBGetMissingArgument(self.ilDynamic.ListImages.Add, 0), 'open', LoadPicture(App.Path + '\\openicon.ico'))
def on_cmdMove_mouseClick(self, *args):
self.tvTree.Left = self.tvTree.Left + 10
self.tvTree.Top = self.tvTree.Top + 10
def on_cmdRemove_mouseClick(self, *args):
if self.tvTree.SelectedItem is None:
MsgBox('No selection')
else:
self.tvTree.Nodes.Remove(self.tvTree.SelectedItem.Key)
def on_cmdSetAsDynamic_mouseClick(self, *args):
self.tvTree.ImageList = self.ilDynamic
def on_cmdSetAsPreload_mouseClick(self, *args):
self.tvTree.ImageList = self.imPreload
def on_cmdSetPictures_mouseClick(self, *args):
Nde = vb2py.custom.comctllib.Node()
for Nde in self.tvTree.Nodes:
Nde.Image = 'closed'
Nde.ExpandedImage = 'open'
def on_cmdSize_mouseClick(self, *args):
self.tvTree.Width = self.tvTree.Width + 10
self.tvTree.Height = self.tvTree.Height + 10
def on_cmdTestNode_mouseClick(self, *args):
This = vb2py.custom.comctllib.Node()
#
This = self.tvTree.Nodes(self.txtNodeName.Text)
This.Selected = True
self.txtResults.Text = 'text:' + This.Text + vbCrLf + 'tag:' + This.Tag + vbCrLf
self.txtResults.Text = self.txtResults.Text + 'visible:' + This.Visible + vbCrLf + 'children:' + This.Children + vbCrLf
if This.Children > 0:
self.txtResults.Text = self.txtResults.Text + 'childtext:' + This.Child.Text + vbCrLf
self.txtResults.Text = self.txtResults.Text + 'firstsib:' + This.FirstSibling.Text + vbCrLf + 'lastsib:' + This.LastSibling.Text + vbCrLf
self.txtResults.Text = self.txtResults.Text + 'path:' + This.FullPath + vbCrLf + 'next:' + This.Next.Text + vbCrLf
self.txtResults.Text = self.txtResults.Text + 'parent:' + This.Parent.Text + vbCrLf + 'previous:' + This.Previous.Text + vbCrLf
self.txtResults.Text = self.txtResults.Text + 'root:' + This.Root.Text + vbCrLf
This.EnsureVisible()
This.Selected = True
#
def on_cmdVisible_mouseClick(self, *args):
self.tvTree.Visible = not self.tvTree.Visible
def __Form_Load(self):
self.txtTree.Text = 'A=ROOT' + vbCrLf + 'A1=A' + vbCrLf + 'A2=A' + vbCrLf + 'A3=A' + vbCrLf + 'A3A=A3' + vbCrLf + 'A4=A' + vbCrLf + 'B=ROOT' + vbCrLf + 'B1=B'
def __setTree(self, Text):
Name = String()
Last = vb2py.custom.comctllib.Node()
Remainder = String()
# Set the tree up
# Get name
#
while Text <> '':
#
posn = InStr(Text, vbCrLf)
if posn <> 0:
parts = self.__strSplit(Text, vbCrLf)
Name = parts(0)
Text = parts(1)
else:
Name = Text
Text = ''
#
parts = self.__strSplit(Name, '=')
nodename = parts(0)
parentname = parts(1)
#
if parentname == 'ROOT':
self.tvTree.Nodes.Add(VBGetMissingArgument(self.tvTree.Nodes.Add, 0), VBGetMissingArgument(self.tvTree.Nodes.Add, 1), nodename, nodename)
else:
self.tvTree.Nodes.Add(parentname, vb2py.custom.comctllib.tvwChild, nodename, nodename)
#
#
def __strSplit(self, Text, Delim):
_ret = None
parts = vbObjectInitialize((1,), Variant)
posn = InStr(Text, Delim)
parts[0] = Left(Text, posn - 1)
parts[1] = Right(Text, Len(Text) - posn - Len(Delim) + 1)
_ret = parts
return _ret
def __tvTree_AfterLabelEdit(self, Cancel, NewString):
Debug.Print('After label edit on ' + self.tvTree.SelectedItem.Text + ' new name is ' + NewString)
if NewString == 'CCC':
Debug.Print('Cancelled')
Cancel = 1
else:
Debug.Print('OK')
Cancel = 0
def __tvTree_BeforeLabelEdit(self, Cancel):
Debug.Print('Before label edit on ' + self.tvTree.SelectedItem.Text)
if self.chkAllowEdits.Value:
Cancel = 0
else:
Cancel = 1
def on_tvTree_mouseClick(self, *args):
Debug.Print('Tree view click')
def __tvTree_Collapse(self, Node):
Debug.Print('Tree view collapse on ' + Node.Text)
def __tvTree_DblClick(self):
Debug.Print('Tree view double click')
def on_tvTree_DragDrop_NOTSUPPORTED(self, *args):
Debug.Print('Tree view drag drop')
def on_tvTree_DragOver_NOTSUPPORTED(self, *args):
Debug.Print('Tree view drag over')
def __tvTree_Expand(self, Node):
Debug.Print('Tree expand on ' + Node.Text)
def on_tvTree_gainFocus(self, *args):
Debug.Print('Tree view got focus')
def on_tvTree_keyDown_NOTSUPPORTED(self, *args):
Debug.Print('Tree view keydown (code, shift) ' + CStr(KeyCode) + ', ' + CStr(Shift))
def on_tvTree_keyPress_NOTSUPPORTED(self, *args):
Debug.Print('Tree view keypress (code) ' + CStr(KeyAscii))
def on_tvTree_keyUp_NOTSUPPORTED(self, *args):
Debug.Print('Tree view keyup (code, shift) ' + CStr(KeyCode) + ', ' + CStr(Shift))
def on_tvTree_loseFocus(self, *args):
Debug.Print('Tree view lost focus')
def on_tvTree_mouseDown(self, *args):
Button, Shift, x, y = vbGetEventArgs(["ButtonDown()", "ShiftDown()", "x", "y"], args)
Debug.Print('Tree view mouse down (button, shift, x, y) ' + CStr(Button) + ', ' + CStr(Shift) + ', ' + CStr(x) + ', ' + CStr(y))
def on_tvTree_mouseMove(self, *args):
Button, Shift, x, y = vbGetEventArgs(["ButtonDown()", "ShiftDown()", "x", "y"], args)
Debug.Print('Tree view mouse move (button, shift, x, y) ' + CStr(Button) + ', ' + CStr(Shift) + ', ' + CStr(x) + ', ' + CStr(y))
def on_tvTree_mouseUp(self, *args):
Button, Shift, x, y = vbGetEventArgs(["ButtonDown()", "ShiftDown()", "x", "y"], args)
Debug.Print('Tree view mouse up (button, shift, x, y) ' + CStr(Button) + ', ' + CStr(Shift) + ', ' + CStr(x) + ', ' + CStr(y))
def __tvTree_NodeClick(self, Node):
Debug.Print('Tree node click ' + Node.Text)
# VB2PY (UntranslatedCode) Attribute VB_Name = "frmMain"
# VB2PY (UntranslatedCode) Attribute VB_GlobalNameSpace = False
# VB2PY (UntranslatedCode) Attribute VB_Creatable = False
# VB2PY (UntranslatedCode) Attribute VB_PredeclaredId = True
# VB2PY (UntranslatedCode) Attribute VB_Exposed = False
if __name__ == '__main__':
app = model.Application(MAINFORM)
app.MainLoop()
|
|
# -*- coding: utf-8 -*-
'''
Installation of Python Packages Using pip
=========================================
These states manage system installed python packages. Note that pip must be
installed for these states to be available, so pip states should include a
requisite to a pkg.installed state for the package which provides pip
(``python-pip`` in most cases). Example:
.. code-block:: yaml
python-pip:
pkg.installed
virtualenvwrapper:
pip.installed:
- require:
- pkg: python-pip
'''
# Import python libs
from __future__ import absolute_import
import logging
# Import salt libs
import salt.utils
from salt.version import SaltStackVersion as _SaltStackVersion
from salt.exceptions import CommandExecutionError, CommandNotFoundError
# Import 3rd-party libs
import salt.ext.six as six
# pylint: disable=import-error
try:
import pip
HAS_PIP = True
except ImportError:
HAS_PIP = False
if HAS_PIP is True:
try:
import pip.req
except ImportError:
HAS_PIP = False
# Remove references to the loaded pip module above so reloading works
import sys
del pip
if 'pip' in sys.modules:
del sys.modules['pip']
ver = pip.__version__.split('.')
pip_ver = tuple([int(x) for x in ver if x.isdigit()])
if pip_ver >= (8, 0, 0):
from pip.exceptions import InstallationError
else:
InstallationError = ValueError
# pylint: enable=import-error
ver = pip.__version__.split('.')
pip_ver = tuple([int(x) for x in ver if x.isdigit()])
if pip_ver >= (8, 0, 0):
from pip.exceptions import InstallationError
else:
InstallationError = ValueError
logger = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'pip'
def __virtual__():
'''
Only load if the pip module is available in __salt__
'''
if 'pip.list' in __salt__:
return __virtualname__
return False
def _find_key(prefix, pip_list):
'''
Does a case-insensitive match in the pip_list for the desired package.
'''
try:
match = next(
iter(x for x in pip_list if x.lower() == prefix.lower())
)
except StopIteration:
return None
else:
return match
def _fulfills_version_spec(version, version_spec):
'''
Check version number against version specification info and return a
boolean value based on whether or not the version number meets the
specified version.
'''
for oper, spec in version_spec:
if oper is None:
continue
if not salt.utils.compare_versions(ver1=version, oper=oper, ver2=spec):
return False
return True
def _check_pkg_version_format(pkg):
'''
Takes a package name and version specification (if any) and checks it using
the pip library.
'''
ret = {'result': False, 'comment': None,
'prefix': None, 'version_spec': None}
if not HAS_PIP:
ret['comment'] = (
'An importable pip module is required but could not be found on '
'your system. This usually means that the system\'s pip package '
'is not installed properly.'
)
return ret
from_vcs = False
try:
# Get the requirement object from the pip library
try:
# With pip < 1.2, the __version__ attribute does not exist and
# vcs+URL urls are not properly parsed.
# The next line is meant to trigger an AttributeError and
# handle lower pip versions
logger.debug(
'Installed pip version: {0}'.format(pip.__version__)
)
install_req = pip.req.InstallRequirement.from_line(pkg)
except AttributeError:
logger.debug('Installed pip version is lower than 1.2')
supported_vcs = ('git', 'svn', 'hg', 'bzr')
if pkg.startswith(supported_vcs):
for vcs in supported_vcs:
if pkg.startswith(vcs):
from_vcs = True
install_req = pip.req.InstallRequirement.from_line(
pkg.split('{0}+'.format(vcs))[-1]
)
break
else:
install_req = pip.req.InstallRequirement.from_line(pkg)
except (ValueError, InstallationError) as exc:
ret['result'] = False
if not from_vcs and '=' in pkg and '==' not in pkg:
ret['comment'] = (
'Invalid version specification in package {0}. \'=\' is '
'not supported, use \'==\' instead.'.format(pkg)
)
return ret
ret['comment'] = (
'pip raised an exception while parsing {0!r}: {1}'.format(
pkg, exc
)
)
return ret
if install_req.req is None:
# This is most likely an url and there's no way to know what will
# be installed before actually installing it.
ret['result'] = True
ret['prefix'] = ''
ret['version_spec'] = []
else:
ret['result'] = True
ret['prefix'] = install_req.req.project_name
ret['version_spec'] = install_req.req.specs
return ret
def _check_if_installed(prefix, state_pkg_name, version_spec,
ignore_installed, force_reinstall,
upgrade, user, cwd, bin_env):
# result: None means the command failed to run
# result: True means the package is installed
# result: False means the package is not installed
ret = {'result': False, 'comment': None}
# Check if the requested package is already installed.
try:
pip_list = __salt__['pip.list'](prefix, bin_env=bin_env,
user=user, cwd=cwd)
prefix_realname = _find_key(prefix, pip_list)
except (CommandNotFoundError, CommandExecutionError) as err:
ret['result'] = None
ret['comment'] = 'Error installing {0!r}: {1}'.format(state_pkg_name,
err)
return ret
# If the package was already installed, check
# the ignore_installed and force_reinstall flags
if ignore_installed is False and prefix_realname is not None:
if force_reinstall is False and not upgrade:
# Check desired version (if any) against currently-installed
if (
any(version_spec) and
_fulfills_version_spec(pip_list[prefix_realname],
version_spec)
) or (not any(version_spec)):
ret['result'] = True
ret['comment'] = ('Python package {0} was already '
'installed'.format(state_pkg_name))
return ret
return ret
def installed(name,
pkgs=None,
pip_bin=None,
requirements=None,
env=None,
bin_env=None,
use_wheel=False,
no_use_wheel=False,
log=None,
proxy=None,
timeout=None,
repo=None,
editable=None,
find_links=None,
index_url=None,
extra_index_url=None,
no_index=False,
mirrors=None,
build=None,
target=None,
download=None,
download_cache=None,
source=None,
upgrade=False,
force_reinstall=False,
ignore_installed=False,
exists_action=None,
no_deps=False,
no_install=False,
no_download=False,
install_options=None,
global_options=None,
user=None,
no_chown=False,
cwd=None,
activate=False,
pre_releases=False,
cert=None,
allow_all_external=False,
allow_external=None,
allow_unverified=None,
process_dependency_links=False,
env_vars=None,
use_vt=False,
trusted_host=None):
'''
Make sure the package is installed
name
The name of the python package to install. You can also specify version
numbers here using the standard operators ``==, >=, <=``. If
``requirements`` is given, this parameter will be ignored.
Example:
.. code-block:: yaml
django:
pip.installed:
- name: django >= 1.6, <= 1.7
- require:
- pkg: python-pip
This will install the latest Django version greater than 1.6 but less
than 1.7.
requirements
Path to a pip requirements file. If the path begins with salt://
the file will be transferred from the master file server.
user
The user under which to run pip
use_wheel : False
Prefer wheel archives (requires pip>=1.4)
no_use_wheel : False
Force to not use wheel archives (requires pip>=1.4)
log
Log file where a complete (maximum verbosity) record will be kept
proxy
Specify a proxy in the form
user:[email protected]:port. Note that the
user:password@ is optional and required only if you
are behind an authenticated proxy. If you provide
[email protected]:port then you will be prompted for a
password.
timeout
Set the socket timeout (default 15 seconds)
editable
install something editable (i.e.
git+https://github.com/worldcompany/djangoembed.git#egg=djangoembed)
find_links
URL to look for packages at
index_url
Base URL of Python Package Index
extra_index_url
Extra URLs of package indexes to use in addition to ``index_url``
no_index
Ignore package index
mirrors
Specific mirror URL(s) to query (automatically adds --use-mirrors)
build
Unpack packages into ``build`` dir
target
Install packages into ``target`` dir
download
Download packages into ``download`` instead of installing them
download_cache
Cache downloaded packages in ``download_cache`` dir
source
Check out ``editable`` packages into ``source`` dir
upgrade
Upgrade all packages to the newest available version
force_reinstall
When upgrading, reinstall all packages even if they are already
up-to-date.
ignore_installed
Ignore the installed packages (reinstalling instead)
exists_action
Default action when a path already exists: (s)witch, (i)gnore, (w)ipe,
(b)ackup
no_deps
Ignore package dependencies
no_install
Download and unpack all packages, but don't actually install them
no_chown
When user is given, do not attempt to copy and chown
a requirements file
cwd
Current working directory to run pip from
activate
Activates the virtual environment, if given via bin_env,
before running install.
.. deprecated:: 2014.7.2
If `bin_env` is given, pip will already be sourced from that
virualenv, making `activate` effectively a noop.
pre_releases
Include pre-releases in the available versions
cert
Provide a path to an alternate CA bundle
allow_all_external
Allow the installation of all externally hosted files
allow_external
Allow the installation of externally hosted files (comma separated list)
allow_unverified
Allow the installation of insecure and unverifiable files (comma separated list)
process_dependency_links
Enable the processing of dependency links
bin_env : None
Absolute path to a virtual environment directory or absolute path to
a pip executable. The example below assumes a virtual environment
has been created at ``/foo/.virtualenvs/bar``.
env_vars
Add or modify environment variables. Useful for tweaking build steps,
such as specifying INCLUDE or LIBRARY paths in Makefiles, build scripts or
compiler calls. This must be in the form of a dictionary or a mapping.
Example:
.. code-block:: yaml
django:
pip.installed:
- name: django_app
- env_vars:
CUSTOM_PATH: /opt/django_app
VERBOSE: True
use_vt
Use VT terminal emulation (see ouptut while installing)
trusted_host
Mark this host as trusted, even though it does not have valid or any
HTTPS.
Example:
.. code-block:: yaml
django:
pip.installed:
- name: django >= 1.6, <= 1.7
- bin_env: /foo/.virtualenvs/bar
- require:
- pkg: python-pip
Or
Example:
.. code-block:: yaml
django:
pip.installed:
- name: django >= 1.6, <= 1.7
- bin_env: /foo/.virtualenvs/bar/bin/pip
- require:
- pkg: python-pip
.. admonition:: Attention
The following arguments are deprecated, do not use.
pip_bin : None
Deprecated, use ``bin_env``
env : None
Deprecated, use ``bin_env``
.. versionchanged:: 0.17.0
``use_wheel`` option added.
install_options
Extra arguments to be supplied to the setup.py install command.
If you are using an option with a directory path, be sure to use
absolute path.
Example:
.. code-block:: yaml
django:
pip.installed:
- name: django
- install_options:
- --prefix=/blah
- require:
- pkg: python-pip
global_options
Extra global options to be supplied to the setup.py call before the
install command.
.. versionadded:: 2014.1.3
.. admonition:: Attention
As of Salt 0.17.0 the pip state **needs** an importable pip module.
This usually means having the system's pip package installed or running
Salt from an active `virtualenv`_.
The reason for this requirement is because ``pip`` already does a
pretty good job parsing its own requirements. It makes no sense for
Salt to do ``pip`` requirements parsing and validation before passing
them to the ``pip`` library. It's functionality duplication and it's
more error prone.
.. admonition:: Attention
Please set ``reload_modules: True`` to have the salt minion
import this module after installation.
Example:
.. code-block:: yaml
pyopenssl:
pip.installed:
- name: pyOpenSSL
- reload_modules: True
- exists_action: i
.. _`virtualenv`: http://www.virtualenv.org/en/latest/
'''
if pip_bin and not bin_env:
bin_env = pip_bin
elif env and not bin_env:
bin_env = env
# If pkgs is present, ignore name
if pkgs:
if not isinstance(pkgs, list):
return {'name': name,
'result': False,
'changes': {},
'comment': 'pkgs argument must be formatted as a list'}
else:
pkgs = [name]
# Assumption: If `pkg` is not an `string`, it's a `collections.OrderedDict`
# prepro = lambda pkg: pkg if type(pkg) == str else \
# ' '.join((pkg.items()[0][0], pkg.items()[0][1].replace(',', ';')))
# pkgs = ','.join([prepro(pkg) for pkg in pkgs])
prepro = lambda pkg: pkg if isinstance(pkg, str) else \
' '.join((six.iteritems(pkg)[0][0], six.iteritems(pkg)[0][1]))
pkgs = [prepro(pkg) for pkg in pkgs]
ret = {'name': ';'.join(pkgs), 'result': None,
'comment': '', 'changes': {}}
# Check that the pip binary supports the 'use_wheel' option
if use_wheel:
min_version = '1.4'
cur_version = __salt__['pip.version'](bin_env)
if not salt.utils.compare_versions(ver1=cur_version, oper='>=',
ver2=min_version):
ret['result'] = False
ret['comment'] = ('The \'use_wheel\' option is only supported in '
'pip {0} and newer. The version of pip detected '
'was {1}.').format(min_version, cur_version)
return ret
# Check that the pip binary supports the 'no_use_wheel' option
if no_use_wheel:
min_version = '1.4'
cur_version = __salt__['pip.version'](bin_env)
if not salt.utils.compare_versions(ver1=cur_version, oper='>=',
ver2=min_version):
ret['result'] = False
ret['comment'] = ('The \'no_use_wheel\' option is only supported in '
'pip {0} and newer. The version of pip detected '
'was {1}.').format(min_version, cur_version)
return ret
# Deprecation warning for the repo option
if repo is not None:
msg = ('The \'repo\' argument to pip.installed is deprecated and will '
'be removed in Salt {version}. Please use \'name\' instead. '
'The current value for name, {0!r} will be replaced by the '
'value of repo, {1!r}'.format(
name,
repo,
version=_SaltStackVersion.from_name('Lithium').formatted_version
))
salt.utils.warn_until('Lithium', msg)
ret.setdefault('warnings', []).append(msg)
name = repo
# Get the packages parsed name and version from the pip library.
# This only is done when there is no requirements or editable parameter.
pkgs_details = []
if pkgs and not (requirements or editable):
comments = []
for pkg in iter(pkgs):
out = _check_pkg_version_format(pkg)
if out['result'] is False:
ret['result'] = False
comments.append(out['comment'])
elif out['result'] is True:
pkgs_details.append((out['prefix'], pkg, out['version_spec']))
if ret['result'] is False:
ret['comment'] = '\n'.join(comments)
return ret
# If a requirements file is specified, only install the contents of the
# requirements file. Similarly, using the --editable flag with pip should
# also ignore the "name" and "pkgs" parameters.
target_pkgs = []
already_installed_comments = []
if requirements or editable:
comments = []
# Append comments if this is a dry run.
if __opts__['test']:
ret['result'] = None
if requirements:
# TODO: Check requirements file against currently-installed
# packages to provide more accurate state output.
comments.append('Requirements file {0!r} will be '
'processed.'.format(requirements))
if editable:
comments.append(
'Package will be installed in editable mode (i.e. '
'setuptools "develop mode") from {0}.'.format(editable)
)
ret['comment'] = ' '.join(comments)
return ret
# No requirements case.
# Check pre-existence of the requested packages.
else:
for prefix, state_pkg_name, version_spec in pkgs_details:
if prefix:
state_pkg_name = state_pkg_name
version_spec = version_spec
out = _check_if_installed(prefix, state_pkg_name, version_spec,
ignore_installed, force_reinstall,
upgrade, user, cwd, bin_env)
# If _check_if_installed result is None, something went wrong with
# the command running. This way we keep stateful output.
if out['result'] is None:
ret['result'] = False
ret['comment'] = out['comment']
return ret
else:
out = {'result': False, 'comment': None}
result = out['result']
# The package is not present. Add it to the pkgs to install.
if result is False:
# Replace commas (used for version ranges) with semicolons
# (which are not supported) in name so it does not treat
# them as multiple packages.
target_pkgs.append((prefix, state_pkg_name.replace(',', ';')))
# Append comments if this is a dry run.
if __opts__['test']:
msg = 'Python package {0} is set to be installed'
ret['result'] = None
ret['comment'] = msg.format(state_pkg_name)
return ret
# The package is already present and will not be reinstalled.
elif result is True:
# Append comment stating its presence
already_installed_comments.append(out['comment'])
# The command pip.list failed. Abort.
elif result is None:
ret['result'] = None
ret['comment'] = out['comment']
return ret
# Construct the string that will get passed to the install call
pkgs_str = ','.join([state_name for _, state_name in target_pkgs])
# Call to install the package. Actual installation takes place here
pip_install_call = __salt__['pip.install'](
pkgs='{0}'.format(pkgs_str) if pkgs_str else '',
requirements=requirements,
bin_env=bin_env,
use_wheel=use_wheel,
no_use_wheel=no_use_wheel,
log=log,
proxy=proxy,
timeout=timeout,
editable=editable,
find_links=find_links,
index_url=index_url,
extra_index_url=extra_index_url,
no_index=no_index,
mirrors=mirrors,
build=build,
target=target,
download=download,
download_cache=download_cache,
source=source,
upgrade=upgrade,
force_reinstall=force_reinstall,
ignore_installed=ignore_installed,
exists_action=exists_action,
no_deps=no_deps,
no_install=no_install,
no_download=no_download,
install_options=install_options,
global_options=global_options,
user=user,
no_chown=no_chown,
cwd=cwd,
activate=activate,
pre_releases=pre_releases,
cert=cert,
allow_all_external=allow_all_external,
allow_external=allow_external,
allow_unverified=allow_unverified,
process_dependency_links=process_dependency_links,
saltenv=__env__,
env_vars=env_vars,
use_vt=use_vt,
trusted_host=trusted_host
)
# Check the retcode for success, but don't fail if using pip1 and the package is
# already present. Pip1 returns a retcode of 1 (instead of 0 for pip2) if you run
# "pip install" without any arguments. See issue #21845.
if pip_install_call and \
(pip_install_call.get('retcode', 1) == 0 or pip_install_call.get('stdout', '').startswith(
'You must give at least one requirement to install')):
ret['result'] = True
if requirements or editable:
comments = []
if requirements:
for line in pip_install_call.get('stdout', '').split('\n'):
if not line.startswith('Requirement already satisfied') \
and line != 'Cleaning up...':
ret['changes']['requirements'] = True
if ret['changes'].get('requirements'):
comments.append('Successfully processed requirements file '
'{0}.'.format(requirements))
else:
comments.append('Requirements were already installed.')
if editable:
comments.append('Package successfully installed from VCS '
'checkout {0}.'.format(editable))
ret['changes']['editable'] = True
ret['comment'] = ' '.join(comments)
else:
# Check that the packages set to be installed were installed.
# Create comments reporting success and failures
pkg_404_comms = []
for prefix, state_name in target_pkgs:
# Case for packages that are not an URL
if prefix:
pipsearch = __salt__['pip.list'](prefix, bin_env,
user=user, cwd=cwd)
# If we didnt find the package in the system after
# installing it report it
if not pipsearch:
pkg_404_comms.append(
'There was no error installing package \'{0}\' '
'although it does not show when calling '
'\'pip.freeze\'.'.format(pkg)
)
else:
pkg_name = _find_key(prefix, pipsearch)
ver = pipsearch[pkg_name]
ret['changes']['{0}=={1}'.format(pkg_name,
ver)] = 'Installed'
# Case for packages that are an URL
else:
ret['changes']['{0}==???'.format(state_name)] = 'Installed'
# Set comments
aicomms = '\n'.join(already_installed_comments)
succ_comm = 'All packages were successfully installed'\
if not pkg_404_comms else '\n'.join(pkg_404_comms)
ret['comment'] = aicomms + ('\n' if aicomms else '') + succ_comm
return ret
elif pip_install_call:
ret['result'] = False
if 'stdout' in pip_install_call:
error = 'Error: {0} {1}'.format(pip_install_call['stdout'],
pip_install_call['stderr'])
else:
error = 'Error: {0}'.format(pip_install_call['comment'])
if requirements or editable:
comments = []
if requirements:
comments.append('Unable to process requirements file '
'"{0}".'.format(requirements))
if editable:
comments.append('Unable to install from VCS checkout'
'{0}.'.format(editable))
comments.append(error)
ret['comment'] = ' '.join(comments)
else:
pkgs_str = ', '.join([state_name for _, state_name in target_pkgs])
aicomms = '\n'.join(already_installed_comments)
error_comm = ('Failed to install packages: {0}. '
'{1}'.format(pkgs_str, error))
ret['comment'] = aicomms + ('\n' if aicomms else '') + error_comm
else:
ret['result'] = False
ret['comment'] = 'Could not install package'
return ret
def removed(name,
requirements=None,
bin_env=None,
log=None,
proxy=None,
timeout=None,
user=None,
cwd=None,
use_vt=False):
'''
Make sure that a package is not installed.
name
The name of the package to uninstall
user
The user under which to run pip
bin_env : None
the pip executable or virtualenenv to use
use_vt
Use VT terminal emulation (see ouptut while installing)
'''
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
try:
pip_list = __salt__['pip.list'](bin_env=bin_env, user=user, cwd=cwd)
except (CommandExecutionError, CommandNotFoundError) as err:
ret['result'] = False
ret['comment'] = 'Error uninstalling \'{0}\': {1}'.format(name, err)
return ret
if name not in pip_list:
ret['result'] = True
ret['comment'] = 'Package is not installed.'
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Package {0} is set to be removed'.format(name)
return ret
if __salt__['pip.uninstall'](pkgs=name,
requirements=requirements,
bin_env=bin_env,
log=log,
proxy=proxy,
timeout=timeout,
user=user,
cwd=cwd,
use_vt=use_vt):
ret['result'] = True
ret['changes'][name] = 'Removed'
ret['comment'] = 'Package was successfully removed.'
else:
ret['result'] = False
ret['comment'] = 'Could not remove package.'
return ret
def uptodate(name,
bin_env=None,
user=None,
cwd=None,
use_vt=False):
'''
.. versionadded:: 2015.5.0
Verify that the system is completely up to date.
name
The name has no functional value and is only used as a tracking
reference
user
The user under which to run pip
bin_env
the pip executable or virtualenenv to use
use_vt
Use VT terminal emulation (see ouptut while installing)
'''
ret = {'name': name,
'changes': {},
'result': False,
'comment': 'Failed to update.'}
try:
packages = __salt__['pip.list_upgrades'](bin_env=bin_env, user=user, cwd=cwd)
except Exception as e:
ret['comment'] = str(e)
return ret
if not packages:
ret['comment'] = 'System is already up-to-date.'
ret['result'] = True
return ret
elif __opts__['test']:
ret['comment'] = 'System update will be performed'
ret['result'] = None
return ret
updated = __salt__['pip.upgrade'](bin_env=bin_env, user=user, cwd=cwd, use_vt=use_vt)
if updated.get('result') is False:
ret.update(updated)
elif updated:
ret['changes'] = updated
ret['comment'] = 'Upgrade successful.'
ret['result'] = True
else:
ret['comment'] = 'Upgrade failed.'
return ret
|
|
#!/usr/bin/env python
from django.db import models
import json
import os.path
import time
from datetime import datetime, timedelta
import random
from .resources import Project, Account, Allocation
from qmpy.analysis.vasp import Calculation
from qmpy.db.custom import *
import qmpy
class TaskError(Exception):
"""A project was needed but not provided"""
class ResourceUnavailableError(Exception):
"""Resource is occupied"""
class Task(models.Model):
"""
Model for a :Task: to be done.
A :Task: consists of a module, which is the name
of a computing script, and a set of keyword arguments, specified as a
dictionary as the `kwargs` attribute of the task. In order for a Task for
be completed, it must also be assigned one or more :Project:s.
Relationships:
| :mod:`~qmpy.Entry` via entry
| :mod:`~qmpy.Job` via job_set
| :mod:`~qmpy.Project` via project_set
Attributes:
| id
| created: datetime object for when the task was created.
| finished: datetime object for when the task was completed.
| module: The name of a function in :mod:`~qmpy.computing.scripts`
| kwargs: dict of keyword:value pairs to pass to the calculation
| module.
| priority: Priority of the task. Lower values are more urgent.
| state: State code, given by the table below.
Task codes:
+------+-------------------+
| Code | Description |
+======+===================+
| -2 | being held |
+------+-------------------+
| -1 | encountered error |
+------+-------------------+
| 0 | ready to run |
+------+-------------------+
| 1 | jobs running |
+------+-------------------+
| 2 | completed |
+------+-------------------+
"""
module = models.CharField(max_length=60)
kwargs = DictField()
state = models.IntegerField(default=0)
priority = models.IntegerField(default=50)
created = models.DateTimeField(blank=True, auto_now_add=True)
finished = models.DateTimeField(blank=True, null=True)
entry = models.ForeignKey("Entry", on_delete=models.CASCADE)
project_set = models.ManyToManyField(Project)
_projects = None
class Meta:
app_label = "qmpy"
db_table = "tasks"
def save(self, *args, **kwargs):
super(Task, self).save(*args, **kwargs)
self.project_set.set([Project.get(p) for p in self.projects])
@property
def projects(self):
"""List of related projects."""
if self._projects is None:
self._projects = list(self.project_set.all())
return self._projects
@projects.setter
def projects(self, projects):
self._projects = projects
def get_project(self):
projects = self.project_set.filter(state=1)
projects = [p for p in projects if p.active]
if not projects:
return
return random.choice(projects)
@property
def eligible_to_run(self):
if self.state != 0:
return False
if self.entry.holds:
return False
return True
@staticmethod
def create(entry, module="static", kwargs={}, priority=None, projects=None):
if projects is None:
projects = entry.projects
elif isinstance(projects, str):
projects = Project.objects.get(name=projects)
if priority is None:
priority = len(entry.input)
task, created = Task.objects.get_or_create(
entry=entry, kwargs=kwargs, module=module
)
if created:
task.projects = projects
else:
task.projects += projects
task.priority = priority
return task
def complete(self):
"""Sets the Task state to 2 and populates the finished field."""
self.state = 2
self.finished = datetime.now()
def hold(self):
self.state = -2
def fail(self):
self.state = -1
def __str__(self):
return "%s (%s: %s)" % (self.module, self.entry, self.entry.path)
@property
def jobs(self):
"""List of jobs related to the task."""
return self.job_set.all()
@property
def last_job_state(self):
if self.job_set.all():
return self.job_set.all().order_by("-id")[0].state
@property
def errors(self):
"""List of errors encountered by related calculations."""
return self.entry.errors
def get_jobs(self, project=None, allocation=None, account=None, host=None):
"""
Check the calculation module specified by the `Task`, and returns
a list of :class:`Job` objects accordingly.
Calls the task's entry's "do" method with the `Task.module` as the
first argument, and passing `Task.kwargs` as keyword arguments.
Returns:
List of Job objects. When nothing is left to do for the
task, returns empty.
Raises:
ResourceUnavailableError:
Raise if for the specified project, allocation, account and/or host
there are no available cores.
"""
if host != None:
if not project:
projects = self.project_set.filter(allocations__host=host, state=1)
project = random.choice(list(projects))
if not allocation:
allocations = project.allocations.filter(host=host, state=1)
allocation = random.choice(list(allocations))
elif project != None:
allocation = project.get_allocation()
if not allocation:
raise ResourceUnavailableError
else:
project = self.get_project()
if account is None:
if project is None:
account = allocation.get_account()
elif not allocation is None:
account = allocation.get_account(users=list(project.users.all()))
calc = self.entry.do(self.module, **self.kwargs)
# Special case: Adjustments for certain clusters
if not allocation is None:
if host.name == "quest":
# Special MPI call for quest Slurm
calc.instructions["mpi"] = "mpirun -np $NPROCS"
if allocation.name == "b1004":
# Can only run parallel VASP on b1004 allocation
calc.instructions["serial"] = False
calc.instructions["binary"] = "vasp_53"
calc.instructions[
"queuetype"
] = "buyin" # queue type for b1004 is 'buyin'
elif allocation.name == "d20829":
# Sheel doesn't have access to b1004 binaries
calc.instructions["binary"] = "~/vasp_53"
calc.instructions["queuetype"] = "normal"
elif allocation.name == "p30919":
calc.instructions["queuetype"] = "short"
calc.instructions["serial"] = False
calc.instructions["nodes"] = 1
calc.instructions["ntasks"] = 4
calc.instructions["walltime"] = 3600*4
#calc.instructions["binary"] = "vasp_53"
elif allocation.name == "p31151":
calc.instructions["queuetype"] = "short"
calc.instructions["serial"] = False
calc.instructions["nodes"] = 1
calc.instructions["ntasks"] = 4
calc.instructions["walltime"] = 3600*4
elif allocation.name == "p30475":
calc.instructions["queuetype"] = "short"
calc.instructions["serial"] = False
calc.instructions["nodes"] = 1
calc.instructions["ntasks"] = 16
calc.instructions["walltime"] = 3600*4
elif allocation.name == "p30649":
calc.instructions["queuetype"] = "short"
calc.instructions["serial"] = False
if 'fast' in self.entry.keywords:
calc.instructions["nodes"] = 2
calc.instructions["ntasks"] = 32
else:
calc.instructions["nodes"] = 1
calc.instructions["ntasks"] = 16
calc.instructions["walltime"] = 3600*4
calc.settings["kpar"] = 4
elif allocation.name == "p31102":
calc.instructions["queuetype"] = "short"
calc.instructions["serial"] = False
calc.instructions["nodes"] = 1
calc.instructions["ntasks"] = 16
calc.instructions["walltime"] = 3600*4
elif calc.entry.natoms < 9:
calc.instructions["queuetype"] = "short"
calc.instructions["serial"] = False
calc.instructions["nodes"] = 1
calc.instructions["ntasks"] = 4
calc.instructions["walltime"] = 3600*4
elif calc.entry.natoms < 13:
calc.instructions["queuetype"] = "short"
calc.instructions["serial"] = False
calc.instructions["nodes"] = 1
calc.instructions["ntasks"] = 16
calc.instructions["walltime"] = 3600*4
elif Project.get("pyrochlore") in calc.entry.projects:
calc.instructions["queuetype"] = "short"
calc.instructions["serial"] = False
calc.instructions["nodes"] = 1
calc.instructions["ntasks"] = 16
calc.instructions["walltime"] = 3600*4
elif calc.entry.natoms > 19:
calc.instructions["queuetype"] = "normal"
calc.instructions["nodes"] = 1
calc.instructions["ntasks"] = 16
else:
calc.instructions["queuetype"] = "normal"
if allocation.name == "bebop":
# Special MPI call for bebop
calc.instructions["mpi"] = "mpirun -psm2 -np $NPROCS"
if allocation.name == "xsede":
# Special MPI call for xsede
calc.instructions["mpi"] = "mpirun -np $NPROCS"
if allocation.name == "babbage":
# Check if calculation is parallel
if "serial" in calc.instructions and not calc.instructions["serial"]:
# Different MPI call on Babbage
calc.instructions[
"mpi"
] = "mpirun -np $NPROCS -machinefile $PBS_NODEFILE -tmpdir /scratch"
jobs = []
if calc.instructions:
self.state = 1
new_job = Job.create(
task=self,
allocation=allocation,
account=account,
entry=self.entry,
**calc.instructions,
)
jobs.append(new_job)
calc.save()
elif calc.converged:
self.complete()
else:
self.state = -1
return jobs
class Job(models.Model):
"""
Base class for job submitted to a compute cluster.
Relationships:
| :mod:`~qmpy.Task` via task
| :mod:`~qmpy.Account` via account. The account the calculation is
| performed on.
| :mod:`~qmpy.Allocation` via allocation. The allocation on which the
| calculation is being performed.
| :mod:`~qmpy.Entry` via entry
Attributes:
| id
| created: datetime object for when the task was created.
| finished: datetime object for when the task was completed.
| ncpus: # of processors assigned.
| path: Origination path of the calculation.
| run_path: Path of the calculation on the compute resource.
| qid: PBS queue ID number.
| walltime: Max walltime (in seconds).
| state: State code, defined as in the table below.
Job codes
+------+-------------------+
| Code | Description |
+======+===================+
| -1 | encountered error |
+------+-------------------+
| 0 | ready to submit |
+------+-------------------+
| 1 | currently running |
+------+-------------------+
| 2 | completed |
+------+-------------------+
"""
qid = models.IntegerField(default=0)
walltime = models.DateTimeField(blank=True)
path = models.CharField(max_length=200)
run_path = models.CharField(max_length=200)
ncpus = models.IntegerField(blank=True)
created = models.DateTimeField(blank=True, auto_now_add=True)
finished = models.DateTimeField(blank=True, null=True)
state = models.IntegerField(default=0)
task = models.ForeignKey(Task, on_delete=models.CASCADE)
entry = models.ForeignKey("Entry", on_delete=models.CASCADE)
account = models.ForeignKey(Account, on_delete=models.CASCADE)
allocation = models.ForeignKey(Allocation, on_delete=models.CASCADE)
class Meta:
app_label = "qmpy"
db_table = "jobs"
@staticmethod
def create(
task=None,
allocation=None,
entry=None,
account=None,
path=None,
serial=None,
walltime=3600,
queuetype=None,
nodes=None,
ntasks=None,
header=None,
mpi=None,
binary=None,
pipes=None,
footer=None,
):
if entry is None:
entry = task.entry
# assert isinstance(allocation, Allocation)
# assert isinstance(task, Task)
# assert path is not None
# if account is None:
# account = allocation.get_account()
job = Job(
path=path,
walltime=walltime,
allocation=allocation,
account=account,
entry=entry,
task=task,
)
# if walltime < 3600:
# nodes = 1
# ppn = int(walltime/3600.*job.account.host.ppn)
# walltime = walltime/ppn
# else:
# ppn = job.account.host.ppn
# nodes = 1+int(walltime/float(job.account.host.walltime))
# walltime = walltime/float(ppn*nodes)
if serial:
ppn = 1
nodes = 1
walltime = 3600 * 24 * 4
# change queuetype to long for quest machine
if job.allocation.host.name == "quest":
queuetype = "long"
if job.allocation.name == "p20746":
walltime = 3600 * 24
if job.allocation.name == "p20747":
walltime = 3600 * 24
else:
if nodes is None:
nodes = 1
ppn = job.account.host.ppn
if job.allocation.name == "b1004":
ppn = 4
if walltime is None:
walltime = job.account.host.walltime
# < Mohan
# Set a HARD upper bound for walltime
# If longer walltime is needed, please modify the following codes!
walltime = min(walltime, job.account.host.walltime)
# Mohan >
binary = job.account.host.get_binary(binary)
if not binary:
raise AllocationError
sec = timedelta(seconds=walltime)
d = datetime(1, 1, 1) + sec
job.walltime = d
## walltime format for quest is hh:mm:ss (Mohan)
if job.allocation.host.name == "quest":
walltime = "%d:%02d:%02d" % ((d.day - 1) * 24 + d.hour, d.minute, d.second)
else:
walltime = "%02d:%02d:%02d:%02d" % (d.day - 1, d.hour, d.minute, d.second)
if not ntasks and job.allocation.host.name == "quest":
ntasks = nodes * ppn
qp = qmpy.INSTALL_PATH + "/configuration/qfiles/"
text = open(qp + job.account.host.sub_text + ".q", "r").read()
qfile = text.format(
host=allocation.host.name,
key=allocation.key,
name=job.description,
queuetype=queuetype,
ntasks=ntasks,
walltime=walltime,
nodes=nodes,
ppn=ppn,
header=header,
mpi=mpi,
binary=binary,
pipes=pipes,
footer=footer,
)
qf = open(job.path + "/auto.q", "w")
qf.write(qfile)
qf.close()
job.ncpus = ppn * nodes
job.run_path = job.account.run_path + "/" + job.description
return job
@property
def walltime_expired(self):
from datetime import datetime, timedelta
elapsed = datetime.now() - self.created
if elapsed.total_seconds() > self.walltime:
return True
else:
return False
@property
def calculation(self):
try:
return Calculation.objects.get(path=self.path)
except:
return
@property
def subdir(self):
return self.path.replace(self.entry.path, "")
@property
def description(self):
uniq = ""
if self.task.kwargs:
uniq = "_" + "_".join(
["%s:%s" % (k, v) for k, v in list(self.task.kwargs.items())]
)
return "{entry}_{subdir}{uniq}".format(
entry=self.entry.id,
subdir=self.subdir.strip("/").replace("/", "_"),
uniq=uniq,
)
def __str__(self):
return "%s on %s" % (self.description, self.account)
def is_done(self):
# Ensure the calculation has had time to show up showq
if datetime.now() + timedelta(seconds=-600) < self.created:
return False
# then check to see if it is still there
if self.qid in self.account.host.running_now:
return False
else:
return True
def submit(self):
if not self.account.host.active:
return
self.created = datetime.now()
self.qid = self.account.submit(
path=self.path, run_path=self.run_path, qfile="auto.q"
)
self.task.state = 1
self.state = 1
def collect(self):
self.task.state = 0
self.task.save()
self.state = 2
self.account.copy(
move=True, to="local", destination=self.path, folder=self.run_path, file="*"
)
self.account.execute("rm -rf %s" % self.run_path, ignore_output=True)
self.finished = datetime.now()
self.save()
|
|
"""
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
import datetime
from typing import Any, Dict, Final, List, Protocol, TYPE_CHECKING, Type, TypeVar, Union
from . import utils
from .colour import Colour
__all__ = (
'Embed',
)
class _EmptyEmbed:
def __bool__(self) -> bool:
return False
def __repr__(self) -> str:
return 'Embed.Empty'
def __len__(self) -> int:
return 0
EmptyEmbed: Final = _EmptyEmbed()
class EmbedProxy:
def __init__(self, layer: Dict[str, Any]):
self.__dict__.update(layer)
def __len__(self) -> int:
return len(self.__dict__)
def __repr__(self) -> str:
inner = ', '.join((f'{k}={v!r}' for k, v in self.__dict__.items() if not k.startswith('_')))
return f'EmbedProxy({inner})'
def __getattr__(self, attr: str) -> _EmptyEmbed:
return EmptyEmbed
E = TypeVar('E', bound='Embed')
if TYPE_CHECKING:
from discord.types.embed import Embed as EmbedData, EmbedType
T = TypeVar('T')
MaybeEmpty = Union[T, _EmptyEmbed]
class _EmbedFooterProxy(Protocol):
text: MaybeEmpty[str]
icon_url: MaybeEmpty[str]
class _EmbedFieldProxy(Protocol):
name: MaybeEmpty[str]
value: MaybeEmpty[str]
inline: bool
class _EmbedMediaProxy(Protocol):
url: MaybeEmpty[str]
proxy_url: MaybeEmpty[str]
height: MaybeEmpty[int]
width: MaybeEmpty[int]
class _EmbedVideoProxy(Protocol):
url: MaybeEmpty[str]
height: MaybeEmpty[int]
width: MaybeEmpty[int]
class _EmbedProviderProxy(Protocol):
name: MaybeEmpty[str]
url: MaybeEmpty[str]
class _EmbedAuthorProxy(Protocol):
name: MaybeEmpty[str]
url: MaybeEmpty[str]
icon_url: MaybeEmpty[str]
proxy_icon_url: MaybeEmpty[str]
class Embed:
"""Represents a Discord embed.
.. container:: operations
.. describe:: len(x)
Returns the total size of the embed.
Useful for checking if it's within the 6000 character limit.
.. describe:: bool(b)
Returns whether the embed has any data set.
.. versionadded:: 2.0
Certain properties return an ``EmbedProxy``, a type
that acts similar to a regular :class:`dict` except using dotted access,
e.g. ``embed.author.icon_url``. If the attribute
is invalid or empty, then a special sentinel value is returned,
:attr:`Embed.Empty`.
For ease of use, all parameters that expect a :class:`str` are implicitly
casted to :class:`str` for you.
Attributes
-----------
title: :class:`str`
The title of the embed.
This can be set during initialisation.
type: :class:`str`
The type of embed. Usually "rich".
This can be set during initialisation.
Possible strings for embed types can be found on discord's
`api docs <https://discord.com/developers/docs/resources/channel#embed-object-embed-types>`_
description: :class:`str`
The description of the embed.
This can be set during initialisation.
url: :class:`str`
The URL of the embed.
This can be set during initialisation.
timestamp: :class:`datetime.datetime`
The timestamp of the embed content. This is an aware datetime.
If a naive datetime is passed, it is converted to an aware
datetime with the local timezone.
colour: Union[:class:`Colour`, :class:`int`]
The colour code of the embed. Aliased to ``color`` as well.
This can be set during initialisation.
Empty
A special sentinel value used by ``EmbedProxy`` and this class
to denote that the value or attribute is empty.
"""
__slots__ = (
'title',
'url',
'type',
'_timestamp',
'_colour',
'_footer',
'_image',
'_thumbnail',
'_video',
'_provider',
'_author',
'_fields',
'description',
)
Empty: Final = EmptyEmbed
def __init__(
self,
*,
colour: Union[int, Colour, _EmptyEmbed] = EmptyEmbed,
color: Union[int, Colour, _EmptyEmbed] = EmptyEmbed,
title: MaybeEmpty[Any] = EmptyEmbed,
type: EmbedType = 'rich',
url: MaybeEmpty[Any] = EmptyEmbed,
description: MaybeEmpty[Any] = EmptyEmbed,
timestamp: datetime.datetime = None,
):
self.colour = colour if colour is not EmptyEmbed else color
self.title = title
self.type = type
self.url = url
self.description = description
if self.title is not EmptyEmbed:
self.title = str(self.title)
if self.description is not EmptyEmbed:
self.description = str(self.description)
if self.url is not EmptyEmbed:
self.url = str(self.url)
if timestamp:
self.timestamp = timestamp
@classmethod
def from_dict(cls: Type[E], data: EmbedData) -> E:
"""Converts a :class:`dict` to a :class:`Embed` provided it is in the
format that Discord expects it to be in.
You can find out about this format in the `official Discord documentation`__.
.. _DiscordDocs: https://discord.com/developers/docs/resources/channel#embed-object
__ DiscordDocs_
Parameters
-----------
data: :class:`dict`
The dictionary to convert into an embed.
"""
# we are bypassing __init__ here since it doesn't apply here
self: E = cls.__new__(cls)
# fill in the basic fields
self.title = data.get('title', EmptyEmbed)
self.type = data.get('type', EmptyEmbed)
self.description = data.get('description', EmptyEmbed)
self.url = data.get('url', EmptyEmbed)
if self.title is not EmptyEmbed:
self.title = str(self.title)
if self.description is not EmptyEmbed:
self.description = str(self.description)
if self.url is not EmptyEmbed:
self.url = str(self.url)
# try to fill in the more rich fields
try:
self._colour = Colour(value=data['color'])
except KeyError:
pass
try:
self._timestamp = utils.parse_time(data['timestamp'])
except KeyError:
pass
for attr in ('thumbnail', 'video', 'provider', 'author', 'fields', 'image', 'footer'):
try:
value = data[attr]
except KeyError:
continue
else:
setattr(self, '_' + attr, value)
return self
def copy(self: E) -> E:
"""Returns a shallow copy of the embed."""
return self.__class__.from_dict(self.to_dict())
def __len__(self) -> int:
total = len(self.title) + len(self.description)
for field in getattr(self, '_fields', []):
total += len(field['name']) + len(field['value'])
try:
footer_text = self._footer['text']
except (AttributeError, KeyError):
pass
else:
total += len(footer_text)
try:
author = self._author
except AttributeError:
pass
else:
total += len(author['name'])
return total
def __bool__(self) -> bool:
return any(
(
self.title,
self.url,
self.description,
self.colour,
self.fields,
self.timestamp,
self.author,
self.thumbnail,
self.footer,
self.image,
self.provider,
self.video,
)
)
@property
def colour(self) -> MaybeEmpty[Colour]:
return getattr(self, '_colour', EmptyEmbed)
@colour.setter
def colour(self, value: Union[int, Colour, _EmptyEmbed]): # type: ignore
if isinstance(value, (Colour, _EmptyEmbed)):
self._colour = value
elif isinstance(value, int):
self._colour = Colour(value=value)
else:
raise TypeError(f'Expected discord.Colour, int, or Embed.Empty but received {value.__class__.__name__} instead.')
color = colour
@property
def timestamp(self) -> MaybeEmpty[datetime.datetime]:
return getattr(self, '_timestamp', EmptyEmbed)
@timestamp.setter
def timestamp(self, value: MaybeEmpty[datetime.datetime]):
if isinstance(value, datetime.datetime):
if value.tzinfo is None:
value = value.astimezone()
self._timestamp = value
elif isinstance(value, _EmptyEmbed):
self._timestamp = value
else:
raise TypeError(f"Expected datetime.datetime or Embed.Empty received {value.__class__.__name__} instead")
@property
def footer(self) -> _EmbedFooterProxy:
"""Returns an ``EmbedProxy`` denoting the footer contents.
See :meth:`set_footer` for possible values you can access.
If the attribute has no value then :attr:`Empty` is returned.
"""
return EmbedProxy(getattr(self, '_footer', {})) # type: ignore
def set_footer(self: E, *, text: MaybeEmpty[Any] = EmptyEmbed, icon_url: MaybeEmpty[Any] = EmptyEmbed) -> E:
"""Sets the footer for the embed content.
This function returns the class instance to allow for fluent-style
chaining.
Parameters
-----------
text: :class:`str`
The footer text.
icon_url: :class:`str`
The URL of the footer icon. Only HTTP(S) is supported.
"""
self._footer = {}
if text is not EmptyEmbed:
self._footer['text'] = str(text)
if icon_url is not EmptyEmbed:
self._footer['icon_url'] = str(icon_url)
return self
def remove_footer(self: E) -> E:
"""Clears embed's footer information.
This function returns the class instance to allow for fluent-style
chaining.
.. versionadded:: 2.0
"""
try:
del self._footer
except AttributeError:
pass
return self
@property
def image(self) -> _EmbedMediaProxy:
"""Returns an ``EmbedProxy`` denoting the image contents.
Possible attributes you can access are:
- ``url``
- ``proxy_url``
- ``width``
- ``height``
If the attribute has no value then :attr:`Empty` is returned.
"""
return EmbedProxy(getattr(self, '_image', {})) # type: ignore
def set_image(self: E, *, url: MaybeEmpty[Any]) -> E:
"""Sets the image for the embed content.
This function returns the class instance to allow for fluent-style
chaining.
.. versionchanged:: 1.4
Passing :attr:`Empty` removes the image.
Parameters
-----------
url: :class:`str`
The source URL for the image. Only HTTP(S) is supported.
"""
if url is EmptyEmbed:
try:
del self._image
except AttributeError:
pass
else:
self._image = {
'url': str(url),
}
return self
@property
def thumbnail(self) -> _EmbedMediaProxy:
"""Returns an ``EmbedProxy`` denoting the thumbnail contents.
Possible attributes you can access are:
- ``url``
- ``proxy_url``
- ``width``
- ``height``
If the attribute has no value then :attr:`Empty` is returned.
"""
return EmbedProxy(getattr(self, '_thumbnail', {})) # type: ignore
def set_thumbnail(self: E, *, url: MaybeEmpty[Any]) -> E:
"""Sets the thumbnail for the embed content.
This function returns the class instance to allow for fluent-style
chaining.
.. versionchanged:: 1.4
Passing :attr:`Empty` removes the thumbnail.
Parameters
-----------
url: :class:`str`
The source URL for the thumbnail. Only HTTP(S) is supported.
"""
if url is EmptyEmbed:
try:
del self._thumbnail
except AttributeError:
pass
else:
self._thumbnail = {
'url': str(url),
}
return self
@property
def video(self) -> _EmbedVideoProxy:
"""Returns an ``EmbedProxy`` denoting the video contents.
Possible attributes include:
- ``url`` for the video URL.
- ``height`` for the video height.
- ``width`` for the video width.
If the attribute has no value then :attr:`Empty` is returned.
"""
return EmbedProxy(getattr(self, '_video', {})) # type: ignore
@property
def provider(self) -> _EmbedProviderProxy:
"""Returns an ``EmbedProxy`` denoting the provider contents.
The only attributes that might be accessed are ``name`` and ``url``.
If the attribute has no value then :attr:`Empty` is returned.
"""
return EmbedProxy(getattr(self, '_provider', {})) # type: ignore
@property
def author(self) -> _EmbedAuthorProxy:
"""Returns an ``EmbedProxy`` denoting the author contents.
See :meth:`set_author` for possible values you can access.
If the attribute has no value then :attr:`Empty` is returned.
"""
return EmbedProxy(getattr(self, '_author', {})) # type: ignore
def set_author(self: E, *, name: Any, url: MaybeEmpty[Any] = EmptyEmbed, icon_url: MaybeEmpty[Any] = EmptyEmbed) -> E:
"""Sets the author for the embed content.
This function returns the class instance to allow for fluent-style
chaining.
Parameters
-----------
name: :class:`str`
The name of the author.
url: :class:`str`
The URL for the author.
icon_url: :class:`str`
The URL of the author icon. Only HTTP(S) is supported.
"""
self._author = {
'name': str(name),
}
if url is not EmptyEmbed:
self._author['url'] = str(url)
if icon_url is not EmptyEmbed:
self._author['icon_url'] = str(icon_url)
return self
def remove_author(self: E) -> E:
"""Clears embed's author information.
This function returns the class instance to allow for fluent-style
chaining.
.. versionadded:: 1.4
"""
try:
del self._author
except AttributeError:
pass
return self
@property
def fields(self) -> List[_EmbedFieldProxy]:
"""List[Union[``EmbedProxy``, :attr:`Empty`]]: Returns a :class:`list` of ``EmbedProxy`` denoting the field contents.
See :meth:`add_field` for possible values you can access.
If the attribute has no value then :attr:`Empty` is returned.
"""
return [EmbedProxy(d) for d in getattr(self, '_fields', [])] # type: ignore
def add_field(self: E, *, name: Any, value: Any, inline: bool = True) -> E:
"""Adds a field to the embed object.
This function returns the class instance to allow for fluent-style
chaining.
Parameters
-----------
name: :class:`str`
The name of the field.
value: :class:`str`
The value of the field.
inline: :class:`bool`
Whether the field should be displayed inline.
"""
field = {
'inline': inline,
'name': str(name),
'value': str(value),
}
try:
self._fields.append(field)
except AttributeError:
self._fields = [field]
return self
def insert_field_at(self: E, index: int, *, name: Any, value: Any, inline: bool = True) -> E:
"""Inserts a field before a specified index to the embed.
This function returns the class instance to allow for fluent-style
chaining.
.. versionadded:: 1.2
Parameters
-----------
index: :class:`int`
The index of where to insert the field.
name: :class:`str`
The name of the field.
value: :class:`str`
The value of the field.
inline: :class:`bool`
Whether the field should be displayed inline.
"""
field = {
'inline': inline,
'name': str(name),
'value': str(value),
}
try:
self._fields.insert(index, field)
except AttributeError:
self._fields = [field]
return self
def clear_fields(self) -> None:
"""Removes all fields from this embed."""
try:
self._fields.clear()
except AttributeError:
self._fields = []
def remove_field(self, index: int) -> None:
"""Removes a field at a specified index.
If the index is invalid or out of bounds then the error is
silently swallowed.
.. note::
When deleting a field by index, the index of the other fields
shift to fill the gap just like a regular list.
Parameters
-----------
index: :class:`int`
The index of the field to remove.
"""
try:
del self._fields[index]
except (AttributeError, IndexError):
pass
def set_field_at(self: E, index: int, *, name: Any, value: Any, inline: bool = True) -> E:
"""Modifies a field to the embed object.
The index must point to a valid pre-existing field.
This function returns the class instance to allow for fluent-style
chaining.
Parameters
-----------
index: :class:`int`
The index of the field to modify.
name: :class:`str`
The name of the field.
value: :class:`str`
The value of the field.
inline: :class:`bool`
Whether the field should be displayed inline.
Raises
-------
IndexError
An invalid index was provided.
"""
try:
field = self._fields[index]
except (TypeError, IndexError, AttributeError):
raise IndexError('field index out of range')
field['name'] = str(name)
field['value'] = str(value)
field['inline'] = inline
return self
def to_dict(self) -> EmbedData:
"""Converts this embed object into a dict."""
# add in the raw data into the dict
# fmt: off
result = {
key[1:]: getattr(self, key)
for key in self.__slots__
if key[0] == '_' and hasattr(self, key)
}
# fmt: on
# deal with basic convenience wrappers
try:
colour = result.pop('colour')
except KeyError:
pass
else:
if colour:
result['color'] = colour.value
try:
timestamp = result.pop('timestamp')
except KeyError:
pass
else:
if timestamp:
if timestamp.tzinfo:
result['timestamp'] = timestamp.astimezone(tz=datetime.timezone.utc).isoformat()
else:
result['timestamp'] = timestamp.replace(tzinfo=datetime.timezone.utc).isoformat()
# add in the non raw attribute ones
if self.type:
result['type'] = self.type
if self.description:
result['description'] = self.description
if self.url:
result['url'] = self.url
if self.title:
result['title'] = self.title
return result # type: ignore
|
|
######################
# CARTRIDGE SETTINGS #
######################
# The following settings are already defined in cartridge.shop.defaults
# with default values, but are common enough to be put here, commented
# out, for convenient overriding.
# Sequence of available credit card types for payment.
# SHOP_CARD_TYPES = ("Mastercard", "Visa", "Diners", "Amex")
# Setting to turn on featured images for shop categories. Defaults to False.
# SHOP_CATEGORY_USE_FEATURED_IMAGE = True
# Set an alternative OrderForm class for the checkout process.
# SHOP_CHECKOUT_FORM_CLASS = 'cartridge.shop.forms.OrderForm'
# If True, the checkout process is split into separate
# billing/shipping and payment steps.
SHOP_CHECKOUT_STEPS_SPLIT = True
# If True, the checkout process has a final confirmation step before
# completion.
SHOP_CHECKOUT_STEPS_CONFIRMATION = True
SHOP_DISCOUNT_FIELD_IN_CHECKOUT = True
SHOP_PAYMENT_STEP_ENABLED = False
# Controls the formatting of monetary values accord to the locale
# module in the python standard library. If an empty string is
# used, will fall back to the system's locale.
# SHOP_CURRENCY_LOCALE = "CAD"
# Dotted package path and class name of the function that
# is called on submit of the billing/shipping checkout step. This
# is where shipping calculation can be performed and set using the
# function ``cartridge.shop.utils.set_shipping``.
SHOP_HANDLER_BILLING_SHIPPING = "bccf.util.memberutil.billship_handler"
# Dotted package path and class name of the function that
# is called once an order is successful and all of the order
# object's data has been created. This is where any custom order
# processing should be implemented.
SHOP_HANDLER_ORDER = "bccf.util.memberutil.order_handler"
# Dotted package path and class name of the function that
# is called on submit of the payment checkout step. This is where
# integration with a payment gateway should be implemented.
SHOP_HANDLER_PAYMENT = "bccf.util.memberutil.payment_handler"
#
SHOP_HANDLER_TAX = "bccf.util.memberutil.tax_handler"
# Sequence of value/name pairs for order statuses.
SHOP_ORDER_STATUS_CHOICES = (
(1, "Unprocessed"),
(2, "Processed"),
(3, "Cancelled"),
)
# Sequence of value/name pairs for types of product options,
# eg Size, Colour.
# Option names
OPTION_SUBSCRIPTION_TERM = 'Subscription Term'
OPTION_BCCF_VOTING = 'BCCF Voting'
OPTION_CREATE_EVENTS_FOR_PARENTS = 'Create Events for Parents'
OPTION_DIRECTORY_LISTING = 'Directory Listing'
OPTION_STORE_DISCOUNT = 'Store Discount'
SHOP_OPTION_TYPE_CHOICES = [(i+1, label) for i, label in enumerate([
# Period of subscription - annual, quarterly, monthly
OPTION_SUBSCRIPTION_TERM,
# Parent membership perks
OPTION_BCCF_VOTING,
# Professional membership perks
OPTION_CREATE_EVENTS_FOR_PARENTS, # Level 2: accredited programs only; Level 3: +other program types
OPTION_DIRECTORY_LISTING, # Level 1: basic listing; Level 2: Business Card style; Level 3: High Profile Listing
OPTION_STORE_DISCOUNT, # Level 3: 15% discount
])]
def get_option_number(option_name):
for num, name in SHOP_OPTION_TYPE_CHOICES:
if name == option_name:
return num
def get_option_name(option_number):
for num, name in SHOP_OPTION_TYPE_CHOICES:
if num == option_number:
return name
#TAXES
SHOP_USE_WISHLIST = False
######################
# MEZZANINE SETTINGS #
######################
# The following settings are already defined with default values in
# the ``defaults.py`` module within each of Mezzanine's apps, but are
# common enough to be put here, commented out, for convenient
# overriding. Please consult the settings documentation for a full list
# of settings Mezzanine implements:
# http://mezzanine.jupo.org/docs/configuration.html#default-settings
# Controls the ordering and grouping of the admin menu.
#
ADMIN_MENU_ORDER = (
("Content", ("pages.Page", "bccf.BCCFGenericPage", "bccf.BCCFTopic",
"generic.ThreadedComment", ("Media Library", "fb_browse"),)),
("Site", ("sites.Site", "redirects.Redirect", "bccf.Settings", "conf.Setting")),
("Users", ("auth.User", "auth.Group",)),
("Blogs", ("bccf.Blog",)),
("Campaigns", ("bccf.Campaign",)),
("Trainings", ("bccf.Event",)),
("Marquees", ("bccf.HomeMarquee", "bccf.HomeMarqueeSlide", "bccf.PageMarquee", "bccf.PageMarqueeSlide", "bccf.FooterMarquee", "bccf.FooterMarqueeSlide")),
("News", ("news.NewsPost",)),
("Programs", ("bccf.Program", "bccf.ProgramRequest")),
("Resources", ("bccf.Article", "bccf.DownloadableForm", "bccf.Magazine", "bccf.Podcast", "bccf.TipSheet", "bccf.Video")),
("Builder", (("Create Survey", "formable-create-form"), "formable.FormFilled", "formable.FormPublished", "formable.FormStructure", "formable.Question")),
("Forum", ("pybb.Forum", "pybb.Topic", "pybb.Post", "pybb.Profile")),
)
# A three item sequence, each containing a sequence of template tags
# used to render the admin dashboard.
#
# DASHBOARD_TAGS = (
# ("blog_tags.quick_blog", "mezzanine_tags.app_list"),
# ("comment_tags.recent_comments",),
# ("mezzanine_tags.recent_actions",),
# )
# A sequence of templates used by the ``page_menu`` template tag. Each
# item in the sequence is a three item sequence, containing a unique ID
# for the template, a label for the template, and the template path.
# These templates are then available for selection when editing which
# menus a page should appear in. Note that if a menu template is used
# that doesn't appear in this setting, all pages will appear in it.
# PAGE_MENU_TEMPLATES = (
# (1, "Top navigation bar", "pages/menus/dropdown.html"),
# (2, "Left-hand tree", "pages/menus/tree.html"),
# (3, "Footer", "pages/menus/footer.html"),
# )
# A sequence of fields that will be injected into Mezzanine's (or any
# library's) models. Each item in the sequence is a four item sequence.
# The first two items are the dotted path to the model and its field
# name to be added, and the dotted path to the field class to use for
# the field. The third and fourth items are a sequence of positional
# args and a dictionary of keyword args, to use when creating the
# field instance. When specifying the field class, the path
# ``django.models.db.`` can be omitted for regular Django model fields.
#
# EXTRA_MODEL_FIELDS = (
# (
# # Dotted path to field.
# "mezzanine.blog.models.BlogPost.image",
# # Dotted path to field class.
# "somelib.fields.ImageField",
# # Positional args for field class.
# ("Image",),
# # Keyword args for field class.
# {"blank": True, "upload_to": "blog"},
# ),
# # Example of adding a field to *all* of Mezzanine's content types:
# (
# "mezzanine.pages.models.Page.another_field",
# "IntegerField", # 'django.db.models.' is implied if path is omitted.
# ("Another name",),
# {"blank": True, "default": 1},
# ),
# )
# Setting to turn on featured images for blog posts. Defaults to False.
#
# BLOG_USE_FEATURED_IMAGE = True
# If True, the south application will be automatically added to the
# INSTALLED_APPS setting.
USE_SOUTH = True
########################
# MAIN DJANGO SETTINGS #
########################
# People who get code error notifications.
# In the format (('Full Name', '[email protected]'),
# ('Full Name', '[email protected]'))
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = None
# If you set this to True, Django will use timezone-aware datetimes.
USE_TZ = False
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = "en"
# A boolean that turns on/off debug mode. When set to ``True``, stack traces
# are displayed for error pages. Should always be set to ``False`` in
# production. Best set to ``True`` in local_settings.py
#DEBUG = False
# Whether a user's session cookie expires when the Web browser is closed.
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
# Make these unique, and don't share it with anybody.
SECRET_KEY = "ac80eaea-1f51-42ed-ab04-821a5126563f5828551c-1116-44df-9dd4-72809374476d4b168d32-46df-4462-942a-959cdf9c8bcc"
NEVERCACHE_KEY = "2985023f-d904-479b-8c2d-fa0f2034b44f4fb12480-8a99-49e0-88bc-bc763f2245cfa2234156-1a5b-43f5-999c-71bc47751b1a"
# Tuple of IP addresses, as strings, that:
# * See debug comments, when DEBUG is true
# * Receive x-headers
INTERNAL_IPS = ("127.0.0.1", "67.231.18.161")
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
)
AUTHENTICATION_BACKENDS = ("mezzanine.core.auth_backends.MezzanineBackend",)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# The numeric mode to set newly-uploaded files to. The value should be
# a mode you'd pass directly to os.chmod.
FILE_UPLOAD_PERMISSIONS = 0644
#############
# DATABASES #
#############
DATABASES = {
"default": {
# Add "postgresql_psycopg2", "mysql", "sqlite3" or "oracle".
"ENGINE": "django.db.backends.",
# DB name or path to database file if using sqlite3.
"NAME": "",
# Not used with sqlite3.
"USER": "",
# Not used with sqlite3.
"PASSWORD": "",
# Set to empty string for localhost. Not used with sqlite3.
"HOST": "",
# Set to empty string for default. Not used with sqlite3.
"PORT": "",
}
}
#########
# PATHS #
#########
import os
# Full filesystem path to the project.
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
# Name of the directory for the project.
PROJECT_DIRNAME = PROJECT_ROOT.split(os.sep)[-1]
# Every cache key will get prefixed with this value - here we set it to
# the name of the directory the project is in to try and use something
# project specific.
CACHE_MIDDLEWARE_KEY_PREFIX = PROJECT_DIRNAME
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = "/static/"
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, STATIC_URL.strip("/"))
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = STATIC_URL + "media/"
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, *MEDIA_URL.strip("/").split("/"))
# Package/module name to import the root urlpatterns from for the project.
ROOT_URLCONF = "%s.urls" % PROJECT_DIRNAME
# Put strings here, like "/home/html/django_templates"
# or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
TEMPLATE_DIRS = (os.path.join(PROJECT_ROOT, "templates"),)
################
# APPLICATIONS #
################
INSTALLED_APPS = (
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.redirects",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.sitemaps",
"django.contrib.staticfiles",
'ckeditor',
"mezzanine.boot",
"mezzanine.conf",
"mezzanine.core",
"mezzanine.generic",
"mezzanine.forms",
"mezzanine.pages",
#"mezzanine.galleries",
"mezzanine.twitter",
"mezzanine.accounts",
#"mezzanine.blog",
#"mezzanine.mobile",
'news',
'pybb',
'bccf',
"cartridge.shop",
'formable.builder',
# install via pip or easy_install django-form-utils
'form_utils', # required by builder to call template tags
'embed_video',
'django_cron',
'bccf_mc',
)
# List of processors used by RequestContext to populate the context.
# Each one should be a callable that takes the request object as its
# only parameter and returns a dictionary to add to the context.
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.static",
"django.core.context_processors.media",
"django.core.context_processors.request",
"django.core.context_processors.tz",
"mezzanine.conf.context_processors.settings",
'pybb.context_processors.processor',
)
# List of middleware classes to use. Order is important; in the request phase,
# these middleware classes will be applied in the order given, and in the
# response phase the middleware will be applied in reverse order.
MIDDLEWARE_CLASSES = (
"mezzanine.core.middleware.UpdateCacheMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"cartridge.shop.middleware.ShopMiddleware",
"mezzanine.core.request.CurrentRequestMiddleware",
"mezzanine.core.middleware.RedirectFallbackMiddleware",
"mezzanine.core.middleware.TemplateForDeviceMiddleware",
"mezzanine.core.middleware.TemplateForHostMiddleware",
"mezzanine.core.middleware.AdminLoginInterfaceSelectorMiddleware",
"mezzanine.core.middleware.SitePermissionMiddleware",
# Uncomment the following if using any of the SSL settings:
# "mezzanine.core.middleware.SSLRedirectMiddleware",
"mezzanine.pages.middleware.PageMiddleware",
"mezzanine.core.middleware.FetchFromCacheMiddleware",
'pybb.middleware.PybbMiddleware',
)
CRON_CLASSES = [
'bccf.cron.EventPaymentReminder',
'bccf.cron.EventFreeRemind',
'bccf.cron.EventClose',
'bccf.cron.UserMembershipReminder',
'bccf.cron.UserMembershipExpire',
]
# Store these package names here as they may change in the future since
# at the moment we are using custom forks of them.
PACKAGE_NAME_FILEBROWSER = "filebrowser_safe"
PACKAGE_NAME_GRAPPELLI = "grappelli_safe"
def forum_instant_post(user, post):
if user.profile:
return user.profile.can_post_on_forum(post)
return False
PYBB_PREMODERATION = forum_instant_post
PYBB_PROFILE_RELATED_NAME = 'profile'
#########################
# OPTIONAL APPLICATIONS #
#########################
# These will be added to ``INSTALLED_APPS``, only if available.
OPTIONAL_APPS = (
"debug_toolbar",
"django_extensions",
"compressor",
PACKAGE_NAME_FILEBROWSER,
PACKAGE_NAME_GRAPPELLI,
'ckeditor',
)
DEBUG_TOOLBAR_CONFIG = {"INTERCEPT_REDIRECTS": False}
DEBUG_TOOLBAR_PATCH_SETTINGS = True
###################
# DEPLOY SETTINGS #
###################
# These settings are used by the default fabfile.py provided.
# Check fabfile.py for defaults.
# FABRIC = {
# "SSH_USER": "", # SSH username
# "SSH_PASS": "", # SSH password (consider key-based authentication)
# "SSH_KEY_PATH": "", # Local path to SSH key file, for key-based auth
# "HOSTS": [], # List of hosts to deploy to
# "VIRTUALENV_HOME": "", # Absolute remote path for virtualenvs
# "PROJECT_NAME": "", # Unique identifier for project
# "REQUIREMENTS_PATH": "", # Path to pip requirements, relative to project
# "GUNICORN_PORT": 8000, # Port gunicorn will listen on
# "LOCALE": "en_US.UTF-8", # Should end with ".UTF-8"
# "LIVE_HOSTNAME": "www.example.com", # Host for public site.
# "REPO_URL": "", # Git or Mercurial remote repo URL for the project
# "DB_PASS": "", # Live database password
# "ADMIN_PASS": "", # Live admin user password
# "SECRET_KEY": SECRET_KEY,
# "NEVERCACHE_KEY": NEVERCACHE_KEY,
# }
AUTH_PROFILE_MODULE = 'bccf.UserProfile'
ACCOUNTS_PROFILE_FORM_EXCLUDE_FIELDS = [
'membership_order',
'is_forum_moderator',
#'membership_type',
'membership_level',
'requested_cancellation',
'signature',
'signature_html',
'time_zone',
'language',
'show_signatures',
'post_count',
'avatar',
'autosubscribe',
'job_title',
'website',
'facebook',
'twitter',
'linkedin',
'account_number',
'accreditation',
'street',
'street_2',
'street_3',
'fax',
'phone_primary',
'phone_work',
'phone_mobile',
'city',
'province',
'country',
'description',
'organization',
'region',
'gender',
]
ACCOUNTS_PROFILE_VIEWS_ENABLED = True
ACCOUNTS_VERIFICATION_REQUIRED = True
GRAPPELLI_ADMIN_TITLE = 'BCCF'
GRAPPELLI_ADMIN_HEADLINE = 'BCCF'
ALLOWED_HOSTS = ['*']
#######################
# MEMBERSHIP SETTINGS #
#######################
PARENT_MEMBERSHIP_CATEGORY = 'membership-parents'
PROFESSIONAL_MEMBERSHIP_CATEGORY = 'membership-professionals'
ORGANIZATION_MEMBERSHIP_CATEGORY = 'membership-organizations'
CORPORATE_MEMBERSHIP_CATEGORY = 'membership-corporate'
EMPLOYEE_MEMBERSHIP_CATEGORY = 'membership-corporate-employee'
########################
# Server email #
########################
SERVER_EMAIL = '[email protected]'
ADMIN_EMAIL = '[email protected]'
##################
# LOCAL SETTINGS #
##################
# Allow any settings to be defined in local_settings.py which should be
# ignored in your version control system allowing for settings to be
# defined per machine.
try:
from local_settings import *
except ImportError:
pass
####################
# DYNAMIC SETTINGS #
####################
# set_dynamic_settings() will rewrite globals based on what has been
# defined so far, in order to provide some better defaults where
# applicable. We also allow this settings module to be imported
# without Mezzanine installed, as the case may be when using the
# fabfile, where setting the dynamic settings below isn't strictly
# required.
try:
from mezzanine.utils.conf import set_dynamic_settings
except ImportError:
pass
else:
set_dynamic_settings(globals())
##################
## OWN SETTINGS ##
##################
import datetime
COMMENTS_USE_RATINGS = False
BCCF_RESOURCE_TYPES = '(article|downloadableform|magazine|podcast|tipsheet|video)'
BCCF_SPECIAL_PAGES = ['trainings','resources','tag','programs']
BCCF_CORE_PAGES = ['trainings','resources','tag','programs','blog','news']
SEARCH_MODEL_CHOICES = (
'bccf.BCCFChildPage',
'bccf.BCCFTopic',
'bccf.BCCFPage',
'shop.product',
)
# CKEditor
CKEDITOR_CONFIGS = {
'default': {
'toolbarGroups': [
{ 'name': 'clipboard', 'groups': [ 'clipboard', 'undo' ] },
{ 'name': 'editing', 'groups': [ 'find', 'selection', 'spellchecker' ] },
{ 'name': 'links' },
{ 'name': 'insert' },
{ 'name': 'forms' },
{ 'name': 'tools' },
{ 'name': 'document', 'groups': [ 'mode', 'document', 'doctools' ] },
{ 'name': 'others' },
'/',
{ 'name': 'basicstyles', 'groups': [ 'basicstyles', 'cleanup' ] },
{ 'name': 'paragraph', 'groups': [ 'list', 'indent', 'blocks', 'align', 'bidi' ] },
{ 'name': 'styles' },
{ 'name': 'colors' },
{ 'name': 'about' }
],
'width': '100%',
'height': 300,
'allowedContent': True,
},
'basic': { # not for superusers
'toolbar': 'Basic',
'toolbarGroups': [
# { 'name': 'clipboard', 'groups': [ 'clipboard', 'undo' ] },
# { 'name': 'editing', 'groups': [ 'find', 'selection', 'spellchecker' ] },
{ 'name': 'links' },
# { 'name': 'insert' },
# { 'name': 'forms' },
# { 'name': 'tools' },
# { 'name': 'document', 'groups': [ 'mode', 'document', 'doctools' ] },
# { 'name': 'others' },
# '/',
{ 'name': 'basicstyles', 'groups': [ 'basicstyles', 'cleanup' ] },
# { 'name': 'paragraph', 'groups': [ 'list', 'indent', 'blocks', 'align', 'bidi' ] },
# { 'name': 'styles' },
# { 'name': 'colors' },
# { 'name': 'about' }
],
'width': '100%',
'height': 300,
'allowedContent': True,
}
}
RICHTEXT_WIDGET_CLASS = 'ckeditor.widgets.CKEditor'
|
|
import datetime
import json
import os
import pickle
import glob
import numpy as np
from dialogue import qa_pipeline, data_helpers
from dialogue.errors import ParameterMissingError
from daphne_context.models import UserInformation, DialogueHistory, DialogueContext
from dialogue.nn_models import nn_models
from .mycroft_utils import forward_to_mycroft
def classify_command_role(command, daphne_version):
cleaned_command = data_helpers.clean_str(command)
# Get model
loaded_model = nn_models[daphne_version]["general"]
# Map data into vocabulary
model_folder_path = os.path.join(os.getcwd(), "dialogue", "models", daphne_version, "general")
vocab_path = os.path.join(model_folder_path, "tokenizer.pickle")
with open(vocab_path, 'rb') as handle:
tokenizer = pickle.load(handle)
x = tokenizer.texts_to_sequences([cleaned_command])
expected_input_length = loaded_model.layers[0].input_shape[0][1]
x = np.array([x[0] + [0] * (expected_input_length - len(x[0]))])
print("\nEvaluating...\n")
# Evaluation
# ==================================================
# evaluate loaded model on test data
result_logits = loaded_model.predict(x)
prediction = data_helpers.get_label_using_logits(result_logits, top_number=1)
return prediction[0]
def command_type_predictions(processed_command, daphne_version, module_name):
cleaned_question = data_helpers.clean_str(processed_command)
# Get model
loaded_model = nn_models[daphne_version][module_name]
# Map data into vocabulary
model_folder_path = os.path.join(os.getcwd(), "dialogue", "models", daphne_version, module_name)
vocab_path = os.path.join(model_folder_path, "tokenizer.pickle")
with open(vocab_path, 'rb') as handle:
tokenizer = pickle.load(handle)
x = tokenizer.texts_to_sequences([cleaned_question])
expected_input_length = loaded_model.layers[0].input_shape[0][1]
x = np.array([x[0] + [0] * (expected_input_length - len(x[0]))])
print("\nEvaluating...\n")
# Evaluation
# ==================================================
# evaluate loaded model on test data
result_logits = loaded_model.predict(x)
return result_logits
def get_top_types(logits, daphne_version, module_name, top_number):
numerical_labels = data_helpers.get_label_using_logits(logits, top_number=top_number)[0]
named_labels = []
type_info_folder = os.path.join(os.getcwd(), daphne_version, "dialogue", "command_types", module_name)
for filename in sorted(glob.glob(os.path.join(type_info_folder, "*.json"))):
specific_label = int(os.path.basename(filename).split('.', 1)[0])
named_labels.append(specific_label)
command_types = []
for label in numerical_labels:
command_types.append(named_labels[label])
return command_types
def error_answers(objective, missing_param):
return {
'voice_answer': 'Sorry, but I can\'t answer your question. I interpreted that you are trying to ' + objective +
'. I can\'t do this because a ' + missing_param + ' parameter doesn\'t have a valid value. If '
'you think I\'m not doing the correct thing, please report this to someone.',
'visual_answer_type': ['text'],
'visual_answer': ['Sorry, but I can\'t answer your question. I interpreted that you are trying to ' + objective +
'. I can\'t do this because a ' + missing_param + ' parameter doesn\'t have a valid value. If '
'you think I\'m not doing the correct thing, please report this to someone.']
}
def not_allowed_condition(user_information: UserInformation, command_class, command_type):
if len(user_information.allowedcommand_set.all()) == 0:
return False
for allowed_command in user_information.allowedcommand_set.all():
if command_class == allowed_command.command_type and command_type == str(allowed_command.command_descriptor):
return False
return True
def not_allowed_answers():
return {
'voice_answer': 'This command is restricted right now.',
'visual_answer_type': ['text'],
'visual_answer': ['This command is restricted right now.']
}
def answer_command(processed_command, question_type, command_class, condition_name, user_info: UserInformation,
context, new_dialogue_contexts, session):
# Create a DialogueContext for the user to fill
answer = command(processed_command, question_type, command_class, condition_name, user_info, context,
new_dialogue_contexts, session)
dialogue_history = DialogueHistory.objects.create(user_information=user_info,
voice_message=answer["voice_answer"],
visual_message_type=json.dumps(answer["visual_answer_type"]),
visual_message=json.dumps(answer["visual_answer"]),
dwriter="daphne",
date=datetime.datetime.utcnow())
forward_to_mycroft(user_info, 'Here is what I have found')
return dialogue_history
def choose_command(command_types, daphne_version, command_role, command_class, context: UserInformation):
# Load information on the three commands
answer = {
'voice_answer': 'I\'m not confident enough in my interpretation of your question. Please help me by choosing'
' what you are trying to accomplish from the following options.',
'visual_answer_type': ['list'],
'visual_answer': [{
"begin": 'I\'m not confident enough in my interpretation of your question. Please help me by choosing'
' what you are trying to accomplish from the following options. You can either click on the '
'objective or type first/second/third',
"list": []
}]
}
for command_type in command_types:
information = qa_pipeline.load_type_info(command_type, daphne_version, command_class)
answer["visual_answer"][0]["list"].append("You want me to " + information["objective"] + ".")
dialogue_history = DialogueHistory.objects.create(user_information=context,
voice_message=answer["voice_answer"],
visual_message_type=json.dumps(answer["visual_answer_type"]),
visual_message=json.dumps(answer["visual_answer"]),
dwriter="daphne",
date=datetime.datetime.utcnow())
DialogueContext.objects.create(dialogue_history=dialogue_history,
is_clarifying_input=True,
clarifying_role=command_role,
clarifying_commands=json.dumps(command_types))
forward_to_mycroft(context, answer["voice_answer"])
def not_answerable(context: UserInformation):
# Load information on the three commands
answer = {
'voice_answer': 'I don\'t understand your command. Please rephrase it.',
'visual_answer_type': ['text'],
'visual_answer': ['I don\'t understand your command. Please rephrase it.']
}
dialogue_history = DialogueHistory.objects.create(user_information=context,
voice_message=answer["voice_answer"],
visual_message_type=json.dumps(answer["visual_answer_type"]),
visual_message=json.dumps(answer["visual_answer"]),
dwriter="daphne",
date=datetime.datetime.utcnow())
DialogueContext.objects.create(dialogue_history=dialogue_history,
is_clarifying_input=False)
forward_to_mycroft(context, answer["voice_answer"])
def command(processed_command, question_type, command_class, condition_name, user_information: UserInformation, context,
new_dialogue_contexts, session):
if not_allowed_condition(user_information, condition_name, str(question_type)):
return not_allowed_answers()
daphne_version = user_information.daphne_version
# Load list of required and optional parameters from question, query and response format for question type
information = qa_pipeline.load_type_info(question_type, daphne_version, command_class)
# Extract required and optional parameters
try:
data = qa_pipeline.extract_data(processed_command, information["params"], user_information, context)
except ParameterMissingError as error:
print(error)
return error_answers(information["objective"], error.missing_param)
# Add extra parameters to data
data = qa_pipeline.augment_data(data, user_information, session)
# Query the database
if information["type"] == "db_query":
results = qa_pipeline.query(information["query"], data, command_class)
elif information["type"] == "run_function":
results = qa_pipeline.run_function(information["function"], data, daphne_version, context, new_dialogue_contexts, user_information, session)
elif information["type"] == "neo4j_query":
results = qa_pipeline.neo4j_query(information["neo4j_query"], data, command_class)
else:
raise ValueError("JSON format not supported!")
# Construct the response from the database query and the response format
answers = qa_pipeline.build_answers(information["voice_response"], information["visual_response"], results, data)
# Return the answer to the client
return answers
def think_response(context: UserInformation):
# TODO: Make this intelligent, e.g. hook this to a rule based engine
db_answer = context.dialoguehistory_set.order_by("-date")[:1].get()
frontend_answer = {
"voice_message": db_answer.voice_message,
"visual_message_type": json.loads(db_answer.visual_message_type),
"visual_message": json.loads(db_answer.visual_message),
"writer": "daphne",
}
return frontend_answer
|
|
import datetime
import string
import random
import re
import sys
from django.core.management.color import no_style
from django.db import transaction, models
from django.db.utils import DatabaseError
from django.db.backends.util import truncate_name
from django.db.backends.creation import BaseDatabaseCreation
from django.db.models.fields import NOT_PROVIDED
from django.dispatch import dispatcher
from django.conf import settings
from django.utils.datastructures import SortedDict
try:
from django.utils.functional import cached_property
except ImportError:
class cached_property(object):
"""
Decorator that creates converts a method with a single
self argument into a property cached on the instance.
"""
def __init__(self, func):
self.func = func
def __get__(self, instance, type):
res = instance.__dict__[self.func.__name__] = self.func(instance)
return res
from south.logger import get_logger
def alias(attrname):
"""
Returns a function which calls 'attrname' - for function aliasing.
We can't just use foo = bar, as this breaks subclassing.
"""
def func(self, *args, **kwds):
return getattr(self, attrname)(*args, **kwds)
return func
def invalidate_table_constraints(func):
def _cache_clear(self, table, *args, **opts):
self._set_cache(table, value=INVALID)
return func(self, table, *args, **opts)
return _cache_clear
def delete_column_constraints(func):
def _column_rm(self, table, column, *args, **opts):
self._set_cache(table, column, value=[])
return func(self, table, column, *args, **opts)
return _column_rm
def copy_column_constraints(func):
def _column_cp(self, table, column_old, column_new, *args, **opts):
db_name = self._get_setting('NAME')
self._set_cache(table, column_new, value=self.lookup_constraint(db_name, table, column_old))
return func(self, table, column_old, column_new, *args, **opts)
return _column_cp
class INVALID(Exception):
def __repr__(self):
return 'INVALID'
class DryRunError(ValueError):
pass
class DatabaseOperations(object):
"""
Generic SQL implementation of the DatabaseOperations.
Some of this code comes from Django Evolution.
"""
alter_string_set_type = 'ALTER COLUMN %(column)s TYPE %(type)s'
alter_string_set_null = 'ALTER COLUMN %(column)s DROP NOT NULL'
alter_string_drop_null = 'ALTER COLUMN %(column)s SET NOT NULL'
delete_check_sql = 'ALTER TABLE %(table)s DROP CONSTRAINT %(constraint)s'
add_column_string = 'ALTER TABLE %s ADD COLUMN %s;'
delete_unique_sql = "ALTER TABLE %s DROP CONSTRAINT %s"
delete_foreign_key_sql = 'ALTER TABLE %(table)s DROP CONSTRAINT %(constraint)s'
max_index_name_length = 63
drop_index_string = 'DROP INDEX %(index_name)s'
delete_column_string = 'ALTER TABLE %s DROP COLUMN %s CASCADE;'
create_primary_key_string = "ALTER TABLE %(table)s ADD CONSTRAINT %(constraint)s PRIMARY KEY (%(columns)s)"
delete_primary_key_sql = "ALTER TABLE %(table)s DROP CONSTRAINT %(constraint)s"
add_check_constraint_fragment = "ADD CONSTRAINT %(constraint)s CHECK (%(check)s)"
rename_table_sql = "ALTER TABLE %s RENAME TO %s;"
backend_name = None
default_schema_name = "public"
# Features
allows_combined_alters = True
supports_foreign_keys = True
has_check_constraints = True
@cached_property
def has_ddl_transactions(self):
"Tests the database using feature detection to see if it has DDL transactional support"
self._possibly_initialise()
connection = self._get_connection()
# Django 1.3's MySQLdb backend doesn't raise DatabaseError
exceptions = (DatabaseError, )
try:
from MySQLdb import OperationalError
exceptions += (OperationalError, )
except ImportError:
pass
# Now do the test
if connection.features.supports_transactions:
cursor = connection.cursor()
self.start_transaction()
cursor.execute('CREATE TABLE DDL_TRANSACTION_TEST (X INT)')
self.rollback_transaction()
try:
cursor.execute('CREATE TABLE DDL_TRANSACTION_TEST (X INT)')
except exceptions:
return False
else:
return True
finally:
cursor.execute('DROP TABLE DDL_TRANSACTION_TEST')
else:
return False
def __init__(self, db_alias):
self.debug = False
self.deferred_sql = []
self.dry_run = False
self.pending_transactions = 0
self.pending_create_signals = []
self.db_alias = db_alias
self._constraint_cache = {}
self._initialised = False
def lookup_constraint(self, db_name, table_name, column_name=None):
""" return a set() of constraints for db_name.table_name.column_name """
def _lookup():
table = self._constraint_cache[db_name][table_name]
if table is INVALID:
raise INVALID
elif column_name is None:
return table.items()
else:
return table[column_name]
try:
ret = _lookup()
return ret
except INVALID, e:
del self._constraint_cache[db_name][table_name]
self._fill_constraint_cache(db_name, table_name)
except KeyError, e:
if self._is_valid_cache(db_name, table_name):
return []
self._fill_constraint_cache(db_name, table_name)
return self.lookup_constraint(db_name, table_name, column_name)
def _set_cache(self, table_name, column_name=None, value=INVALID):
db_name = self._get_setting('NAME')
try:
if column_name is not None:
self._constraint_cache[db_name][table_name][column_name] = value
else:
self._constraint_cache[db_name][table_name] = value
except (LookupError, TypeError):
pass
def _is_valid_cache(self, db_name, table_name):
# we cache per-table so if the table is there it is valid
try:
return self._constraint_cache[db_name][table_name] is not INVALID
except KeyError:
return False
def _is_multidb(self):
try:
from django.db import connections
except ImportError:
return False
else:
return True
def _get_connection(self):
"""
Returns a django connection for a given DB Alias
"""
if self._is_multidb():
from django.db import connections
return connections[self.db_alias]
else:
from django.db import connection
return connection
def _get_setting(self, setting_name):
"""
Allows code to get a setting (like, for example, STORAGE_ENGINE)
"""
setting_name = setting_name.upper()
connection = self._get_connection()
if self._is_multidb():
# Django 1.2 and above
return connection.settings_dict[setting_name]
else:
# Django 1.1 and below
return getattr(settings, "DATABASE_%s" % setting_name)
def _has_setting(self, setting_name):
"""
Existence-checking version of _get_setting.
"""
try:
self._get_setting(setting_name)
except (KeyError, AttributeError):
return False
else:
return True
def _get_schema_name(self):
try:
return self._get_setting('schema')
except (KeyError, AttributeError):
return self.default_schema_name
def _possibly_initialise(self):
if not self._initialised:
self.connection_init()
self._initialised = True
def connection_init(self):
"""
Run before any SQL to let database-specific config be sent as a command,
e.g. which storage engine (MySQL) or transaction serialisability level.
"""
pass
def quote_name(self, name):
"""
Uses the database backend to quote the given table/column name.
"""
return self._get_connection().ops.quote_name(name)
def execute(self, sql, params=[]):
"""
Executes the given SQL statement, with optional parameters.
If the instance's debug attribute is True, prints out what it executes.
"""
self._possibly_initialise()
cursor = self._get_connection().cursor()
if self.debug:
print " = %s" % sql, params
if self.dry_run:
return []
get_logger().debug('execute "%s" with params "%s"' % (sql, params))
try:
cursor.execute(sql, params)
except DatabaseError, e:
print >> sys.stderr, 'FATAL ERROR - The following SQL query failed: %s' % sql
print >> sys.stderr, 'The error was: %s' % e
sys.exit(1)
try:
return cursor.fetchall()
except:
return []
def execute_many(self, sql, regex=r"(?mx) ([^';]* (?:'[^']*'[^';]*)*)", comment_regex=r"(?mx) (?:^\s*$)|(?:--.*$)"):
"""
Takes a SQL file and executes it as many separate statements.
(Some backends, such as Postgres, don't work otherwise.)
"""
# Be warned: This function is full of dark magic. Make sure you really
# know regexes before trying to edit it.
# First, strip comments
sql = "\n".join([x.strip().replace("%", "%%") for x in re.split(comment_regex, sql) if x.strip()])
# Now execute each statement
for st in re.split(regex, sql)[1:][::2]:
self.execute(st)
def add_deferred_sql(self, sql):
"""
Add a SQL statement to the deferred list, that won't be executed until
this instance's execute_deferred_sql method is run.
"""
self.deferred_sql.append(sql)
def execute_deferred_sql(self):
"""
Executes all deferred SQL, resetting the deferred_sql list
"""
for sql in self.deferred_sql:
self.execute(sql)
self.deferred_sql = []
def clear_deferred_sql(self):
"""
Resets the deferred_sql list to empty.
"""
self.deferred_sql = []
def clear_run_data(self, pending_creates = None):
"""
Resets variables to how they should be before a run. Used for dry runs.
If you want, pass in an old panding_creates to reset to.
"""
self.clear_deferred_sql()
self.pending_create_signals = pending_creates or []
def get_pending_creates(self):
return self.pending_create_signals
@invalidate_table_constraints
def create_table(self, table_name, fields):
"""
Creates the table 'table_name'. 'fields' is a tuple of fields,
each repsented by a 2-part tuple of field name and a
django.db.models.fields.Field object
"""
if len(table_name) > 63:
print " ! WARNING: You have a table name longer than 63 characters; this will not fully work on PostgreSQL or MySQL."
columns = [
self.column_sql(table_name, field_name, field)
for field_name, field in fields
]
self.execute('CREATE TABLE %s (%s);' % (
self.quote_name(table_name),
', '.join([col for col in columns if col]),
))
add_table = alias('create_table') # Alias for consistency's sake
@invalidate_table_constraints
def rename_table(self, old_table_name, table_name):
"""
Renames the table 'old_table_name' to 'table_name'.
"""
if old_table_name == table_name:
# Short-circuit out.
return
params = (self.quote_name(old_table_name), self.quote_name(table_name))
self.execute(self.rename_table_sql % params)
# Invalidate the not-yet-indexed table
self._set_cache(table_name, value=INVALID)
@invalidate_table_constraints
def delete_table(self, table_name, cascade=True):
"""
Deletes the table 'table_name'.
"""
params = (self.quote_name(table_name), )
if cascade:
self.execute('DROP TABLE %s CASCADE;' % params)
else:
self.execute('DROP TABLE %s;' % params)
drop_table = alias('delete_table')
@invalidate_table_constraints
def clear_table(self, table_name):
"""
Deletes all rows from 'table_name'.
"""
params = (self.quote_name(table_name), )
self.execute('DELETE FROM %s;' % params)
@invalidate_table_constraints
def add_column(self, table_name, name, field, keep_default=True):
"""
Adds the column 'name' to the table 'table_name'.
Uses the 'field' paramater, a django.db.models.fields.Field instance,
to generate the necessary sql
@param table_name: The name of the table to add the column to
@param name: The name of the column to add
@param field: The field to use
"""
sql = self.column_sql(table_name, name, field)
if sql:
params = (
self.quote_name(table_name),
sql,
)
sql = self.add_column_string % params
self.execute(sql)
# Now, drop the default if we need to
if not keep_default and field.default is not None:
field.default = NOT_PROVIDED
self.alter_column(table_name, name, field, explicit_name=False, ignore_constraints=True)
def _db_type_for_alter_column(self, field):
"""
Returns a field's type suitable for ALTER COLUMN.
By default it just returns field.db_type().
To be overriden by backend specific subclasses
@param field: The field to generate type for
"""
try:
return field.db_type(connection=self._get_connection())
except TypeError:
return field.db_type()
def _alter_add_column_mods(self, field, name, params, sqls):
"""
Subcommand of alter_column that modifies column definitions beyond
the type string -- e.g. adding constraints where they cannot be specified
as part of the type (overrideable)
"""
pass
def _alter_set_defaults(self, field, name, params, sqls):
"Subcommand of alter_column that sets default values (overrideable)"
# Next, set any default
if not field.null and field.has_default():
default = field.get_default()
sqls.append(('ALTER COLUMN %s SET DEFAULT %%s ' % (self.quote_name(name),), [default]))
else:
sqls.append(('ALTER COLUMN %s DROP DEFAULT' % (self.quote_name(name),), []))
@invalidate_table_constraints
def alter_column(self, table_name, name, field, explicit_name=True, ignore_constraints=False):
"""
Alters the given column name so it will match the given field.
Note that conversion between the two by the database must be possible.
Will not automatically add _id by default; to have this behavour, pass
explicit_name=False.
@param table_name: The name of the table to add the column to
@param name: The name of the column to alter
@param field: The new field definition to use
"""
if self.dry_run:
if self.debug:
print ' - no dry run output for alter_column() due to dynamic DDL, sorry'
return
# hook for the field to do any resolution prior to it's attributes being queried
if hasattr(field, 'south_init'):
field.south_init()
# Add _id or whatever if we need to
field.set_attributes_from_name(name)
if not explicit_name:
name = field.column
else:
field.column = name
if not ignore_constraints:
# Drop all check constraints. Note that constraints will be added back
# with self.alter_string_set_type and self.alter_string_drop_null.
if self.has_check_constraints:
check_constraints = self._constraints_affecting_columns(table_name, [name], "CHECK")
for constraint in check_constraints:
self.execute(self.delete_check_sql % {
'table': self.quote_name(table_name),
'constraint': self.quote_name(constraint),
})
# Drop or add UNIQUE constraint
unique_constraint = list(self._constraints_affecting_columns(table_name, [name], "UNIQUE"))
if field.unique and not unique_constraint:
self.create_unique(table_name, [name])
elif not field.unique and unique_constraint:
self.delete_unique(table_name, [name])
# Drop all foreign key constraints
try:
self.delete_foreign_key(table_name, name)
except ValueError:
# There weren't any
pass
# First, change the type
params = {
"column": self.quote_name(name),
"type": self._db_type_for_alter_column(field),
"table_name": table_name
}
# SQLs is a list of (SQL, values) pairs.
sqls = []
# Only alter the column if it has a type (Geometry ones sometimes don't)
if params["type"] is not None:
sqls.append((self.alter_string_set_type % params, []))
# Add any field- and backend- specific modifications
self._alter_add_column_mods(field, name, params, sqls)
# Next, nullity
if field.null:
sqls.append((self.alter_string_set_null % params, []))
else:
sqls.append((self.alter_string_drop_null % params, []))
# Next, set any default
self._alter_set_defaults(field, name, params, sqls)
# Finally, actually change the column
if self.allows_combined_alters:
sqls, values = zip(*sqls)
self.execute(
"ALTER TABLE %s %s;" % (self.quote_name(table_name), ", ".join(sqls)),
flatten(values),
)
else:
# Databases like e.g. MySQL don't like more than one alter at once.
for sql, values in sqls:
self.execute("ALTER TABLE %s %s;" % (self.quote_name(table_name), sql), values)
if not ignore_constraints:
# Add back FK constraints if needed
if field.rel and self.supports_foreign_keys:
self.execute(
self.foreign_key_sql(
table_name,
field.column,
field.rel.to._meta.db_table,
field.rel.to._meta.get_field(field.rel.field_name).column
)
)
def _fill_constraint_cache(self, db_name, table_name):
schema = self._get_schema_name()
ifsc_tables = ["constraint_column_usage", "key_column_usage"]
self._constraint_cache.setdefault(db_name, {})
self._constraint_cache[db_name][table_name] = {}
for ifsc_table in ifsc_tables:
rows = self.execute("""
SELECT kc.constraint_name, kc.column_name, c.constraint_type
FROM information_schema.%s AS kc
JOIN information_schema.table_constraints AS c ON
kc.table_schema = c.table_schema AND
kc.table_name = c.table_name AND
kc.constraint_name = c.constraint_name
WHERE
kc.table_schema = %%s AND
kc.table_name = %%s
""" % ifsc_table, [schema, table_name])
for constraint, column, kind in rows:
self._constraint_cache[db_name][table_name].setdefault(column, set())
self._constraint_cache[db_name][table_name][column].add((kind, constraint))
return
def _constraints_affecting_columns(self, table_name, columns, type="UNIQUE"):
"""
Gets the names of the constraints affecting the given columns.
If columns is None, returns all constraints of the type on the table.
"""
if self.dry_run:
raise DryRunError("Cannot get constraints for columns.")
if columns is not None:
columns = set(map(lambda s: s.lower(), columns))
db_name = self._get_setting('NAME')
cnames = {}
for col, constraints in self.lookup_constraint(db_name, table_name):
for kind, cname in constraints:
if kind == type:
cnames.setdefault(cname, set())
cnames[cname].add(col.lower())
for cname, cols in cnames.items():
if cols == columns or columns is None:
yield cname
@invalidate_table_constraints
def create_unique(self, table_name, columns):
"""
Creates a UNIQUE constraint on the columns on the given table.
"""
if not isinstance(columns, (list, tuple)):
columns = [columns]
name = self.create_index_name(table_name, columns, suffix="_uniq")
cols = ", ".join(map(self.quote_name, columns))
self.execute("ALTER TABLE %s ADD CONSTRAINT %s UNIQUE (%s)" % (
self.quote_name(table_name),
self.quote_name(name),
cols,
))
return name
@invalidate_table_constraints
def delete_unique(self, table_name, columns):
"""
Deletes a UNIQUE constraint on precisely the columns on the given table.
"""
if not isinstance(columns, (list, tuple)):
columns = [columns]
# Dry runs mean we can't do anything.
if self.dry_run:
if self.debug:
print ' - no dry run output for delete_unique_column() due to dynamic DDL, sorry'
return
constraints = list(self._constraints_affecting_columns(table_name, columns))
if not constraints:
raise ValueError("Cannot find a UNIQUE constraint on table %s, columns %r" % (table_name, columns))
for constraint in constraints:
self.execute(self.delete_unique_sql % (
self.quote_name(table_name),
self.quote_name(constraint),
))
def column_sql(self, table_name, field_name, field, tablespace='', with_name=True, field_prepared=False):
"""
Creates the SQL snippet for a column. Used by add_column and add_table.
"""
# If the field hasn't already been told its attribute name, do so.
if not field_prepared:
field.set_attributes_from_name(field_name)
# hook for the field to do any resolution prior to it's attributes being queried
if hasattr(field, 'south_init'):
field.south_init()
# Possible hook to fiddle with the fields (e.g. defaults & TEXT on MySQL)
field = self._field_sanity(field)
try:
sql = field.db_type(connection=self._get_connection())
except TypeError:
sql = field.db_type()
if sql:
# Some callers, like the sqlite stuff, just want the extended type.
if with_name:
field_output = [self.quote_name(field.column), sql]
else:
field_output = [sql]
field_output.append('%sNULL' % (not field.null and 'NOT ' or ''))
if field.primary_key:
field_output.append('PRIMARY KEY')
elif field.unique:
# Just use UNIQUE (no indexes any more, we have delete_unique)
field_output.append('UNIQUE')
tablespace = field.db_tablespace or tablespace
if tablespace and getattr(self._get_connection().features, "supports_tablespaces", False) and field.unique:
# We must specify the index tablespace inline, because we
# won't be generating a CREATE INDEX statement for this field.
field_output.append(self._get_connection().ops.tablespace_sql(tablespace, inline=True))
sql = ' '.join(field_output)
sqlparams = ()
# if the field is "NOT NULL" and a default value is provided, create the column with it
# this allows the addition of a NOT NULL field to a table with existing rows
if not getattr(field, '_suppress_default', False):
if field.has_default():
default = field.get_default()
# If the default is actually None, don't add a default term
if default is not None:
# If the default is a callable, then call it!
if callable(default):
default = default()
default = field.get_db_prep_save(default, connection=self._get_connection())
# Now do some very cheap quoting. TODO: Redesign return values to avoid this.
if isinstance(default, basestring):
default = "'%s'" % default.replace("'", "''")
# Escape any % signs in the output (bug #317)
if isinstance(default, basestring):
default = default.replace("%", "%%")
# Add it in
sql += " DEFAULT %s"
sqlparams = (default)
elif (not field.null and field.blank) or (field.get_default() == ''):
if field.empty_strings_allowed and self._get_connection().features.interprets_empty_strings_as_nulls:
sql += " DEFAULT ''"
# Error here would be nice, but doesn't seem to play fair.
#else:
# raise ValueError("Attempting to add a non null column that isn't character based without an explicit default value.")
if field.rel and self.supports_foreign_keys:
self.add_deferred_sql(
self.foreign_key_sql(
table_name,
field.column,
field.rel.to._meta.db_table,
field.rel.to._meta.get_field(field.rel.field_name).column
)
)
# Things like the contrib.gis module fields have this in 1.1 and below
if hasattr(field, 'post_create_sql'):
for stmt in field.post_create_sql(no_style(), table_name):
self.add_deferred_sql(stmt)
# In 1.2 and above, you have to ask the DatabaseCreation stuff for it.
# This also creates normal indexes in 1.1.
if hasattr(self._get_connection().creation, "sql_indexes_for_field"):
# Make a fake model to pass in, with only db_table
model = self.mock_model("FakeModelForGISCreation", table_name)
for stmt in self._get_connection().creation.sql_indexes_for_field(model, field, no_style()):
self.add_deferred_sql(stmt)
if sql:
return sql % sqlparams
else:
return None
def _field_sanity(self, field):
"""
Placeholder for DBMS-specific field alterations (some combos aren't valid,
e.g. DEFAULT and TEXT on MySQL)
"""
return field
def foreign_key_sql(self, from_table_name, from_column_name, to_table_name, to_column_name):
"""
Generates a full SQL statement to add a foreign key constraint
"""
constraint_name = '%s_refs_%s_%x' % (from_column_name, to_column_name, abs(hash((from_table_name, to_table_name))))
return 'ALTER TABLE %s ADD CONSTRAINT %s FOREIGN KEY (%s) REFERENCES %s (%s)%s;' % (
self.quote_name(from_table_name),
self.quote_name(truncate_name(constraint_name, self._get_connection().ops.max_name_length())),
self.quote_name(from_column_name),
self.quote_name(to_table_name),
self.quote_name(to_column_name),
self._get_connection().ops.deferrable_sql() # Django knows this
)
@invalidate_table_constraints
def delete_foreign_key(self, table_name, column):
"Drop a foreign key constraint"
if self.dry_run:
if self.debug:
print ' - no dry run output for delete_foreign_key() due to dynamic DDL, sorry'
return # We can't look at the DB to get the constraints
constraints = self._find_foreign_constraints(table_name, column)
if not constraints:
raise ValueError("Cannot find a FOREIGN KEY constraint on table %s, column %s" % (table_name, column))
for constraint_name in constraints:
self.execute(self.delete_foreign_key_sql % {
"table": self.quote_name(table_name),
"constraint": self.quote_name(constraint_name),
})
drop_foreign_key = alias('delete_foreign_key')
def _find_foreign_constraints(self, table_name, column_name=None):
return list(self._constraints_affecting_columns(
table_name, [column_name], "FOREIGN KEY"))
def _digest(self, *args):
"""
Use django.db.backends.creation.BaseDatabaseCreation._digest
to create index name in Django style. An evil hack :(
"""
if not hasattr(self, '_django_db_creation'):
self._django_db_creation = BaseDatabaseCreation(self._get_connection())
return self._django_db_creation._digest(*args)
def create_index_name(self, table_name, column_names, suffix=""):
"""
Generate a unique name for the index
"""
# If there is just one column in the index, use a default algorithm from Django
if len(column_names) == 1:
return truncate_name(
'%s_%s' % (table_name, self._digest(column_names[0])),
self._get_connection().ops.max_name_length()
)
# Else generate the name for the index by South
table_name = table_name.replace('"', '').replace('.', '_')
index_unique_name = '_%x' % abs(hash((table_name, ','.join(column_names))))
# If the index name is too long, truncate it
index_name = ('%s_%s%s%s' % (table_name, column_names[0], index_unique_name, suffix)).replace('"', '').replace('.', '_')
if len(index_name) > self.max_index_name_length:
part = ('_%s%s%s' % (column_names[0], index_unique_name, suffix))
index_name = '%s%s' % (table_name[:(self.max_index_name_length-len(part))], part)
return index_name
def create_index_sql(self, table_name, column_names, unique=False, db_tablespace=''):
"""
Generates a create index statement on 'table_name' for a list of 'column_names'
"""
if not column_names:
print "No column names supplied on which to create an index"
return ''
connection = self._get_connection()
if db_tablespace and connection.features.supports_tablespaces:
tablespace_sql = ' ' + connection.ops.tablespace_sql(db_tablespace)
else:
tablespace_sql = ''
index_name = self.create_index_name(table_name, column_names)
return 'CREATE %sINDEX %s ON %s (%s)%s;' % (
unique and 'UNIQUE ' or '',
self.quote_name(index_name),
self.quote_name(table_name),
','.join([self.quote_name(field) for field in column_names]),
tablespace_sql
)
@invalidate_table_constraints
def create_index(self, table_name, column_names, unique=False, db_tablespace=''):
""" Executes a create index statement """
sql = self.create_index_sql(table_name, column_names, unique, db_tablespace)
self.execute(sql)
@invalidate_table_constraints
def delete_index(self, table_name, column_names, db_tablespace=''):
"""
Deletes an index created with create_index.
This is possible using only columns due to the deterministic
index naming function which relies on column names.
"""
if isinstance(column_names, (str, unicode)):
column_names = [column_names]
name = self.create_index_name(table_name, column_names)
sql = self.drop_index_string % {
"index_name": self.quote_name(name),
"table_name": self.quote_name(table_name),
}
self.execute(sql)
drop_index = alias('delete_index')
@delete_column_constraints
def delete_column(self, table_name, name):
"""
Deletes the column 'column_name' from the table 'table_name'.
"""
db_name = self._get_setting('NAME')
params = (self.quote_name(table_name), self.quote_name(name))
self.execute(self.delete_column_string % params, [])
drop_column = alias('delete_column')
def rename_column(self, table_name, old, new):
"""
Renames the column 'old' from the table 'table_name' to 'new'.
"""
raise NotImplementedError("rename_column has no generic SQL syntax")
@invalidate_table_constraints
def delete_primary_key(self, table_name):
"""
Drops the old primary key.
"""
# Dry runs mean we can't do anything.
if self.dry_run:
if self.debug:
print ' - no dry run output for delete_primary_key() due to dynamic DDL, sorry'
return
constraints = list(self._constraints_affecting_columns(table_name, None, type="PRIMARY KEY"))
if not constraints:
raise ValueError("Cannot find a PRIMARY KEY constraint on table %s" % (table_name,))
for constraint in constraints:
self.execute(self.delete_primary_key_sql % {
"table": self.quote_name(table_name),
"constraint": self.quote_name(constraint),
})
drop_primary_key = alias('delete_primary_key')
@invalidate_table_constraints
def create_primary_key(self, table_name, columns):
"""
Creates a new primary key on the specified columns.
"""
if not isinstance(columns, (list, tuple)):
columns = [columns]
self.execute(self.create_primary_key_string % {
"table": self.quote_name(table_name),
"constraint": self.quote_name(table_name+"_pkey"),
"columns": ", ".join(map(self.quote_name, columns)),
})
def start_transaction(self):
"""
Makes sure the following commands are inside a transaction.
Must be followed by a (commit|rollback)_transaction call.
"""
if self.dry_run:
self.pending_transactions += 1
transaction.commit_unless_managed()
transaction.enter_transaction_management()
transaction.managed(True)
def commit_transaction(self):
"""
Commits the current transaction.
Must be preceded by a start_transaction call.
"""
if self.dry_run:
return
transaction.commit()
transaction.leave_transaction_management()
def rollback_transaction(self):
"""
Rolls back the current transaction.
Must be preceded by a start_transaction call.
"""
if self.dry_run:
self.pending_transactions -= 1
transaction.rollback()
transaction.leave_transaction_management()
def rollback_transactions_dry_run(self):
"""
Rolls back all pending_transactions during this dry run.
"""
if not self.dry_run:
return
while self.pending_transactions > 0:
self.rollback_transaction()
if transaction.is_dirty():
# Force an exception, if we're still in a dirty transaction.
# This means we are missing a COMMIT/ROLLBACK.
transaction.leave_transaction_management()
def send_create_signal(self, app_label, model_names):
self.pending_create_signals.append((app_label, model_names))
def send_pending_create_signals(self, verbosity=0, interactive=False):
# Group app_labels together
signals = SortedDict()
for (app_label, model_names) in self.pending_create_signals:
try:
signals[app_label].extend(model_names)
except KeyError:
signals[app_label] = list(model_names)
# Send only one signal per app.
for (app_label, model_names) in signals.iteritems():
self.really_send_create_signal(app_label, list(set(model_names)),
verbosity=verbosity,
interactive=interactive)
self.pending_create_signals = []
def really_send_create_signal(self, app_label, model_names,
verbosity=0, interactive=False):
"""
Sends a post_syncdb signal for the model specified.
If the model is not found (perhaps it's been deleted?),
no signal is sent.
TODO: The behavior of django.contrib.* apps seems flawed in that
they don't respect created_models. Rather, they blindly execute
over all models within the app sending the signal. This is a
patch we should push Django to make For now, this should work.
"""
if self.debug:
print " - Sending post_syncdb signal for %s: %s" % (app_label, model_names)
app = models.get_app(app_label)
if not app:
return
created_models = []
for model_name in model_names:
model = models.get_model(app_label, model_name)
if model:
created_models.append(model)
if created_models:
if hasattr(dispatcher, "send"):
# Older djangos
dispatcher.send(signal=models.signals.post_syncdb, sender=app,
app=app, created_models=created_models,
verbosity=verbosity, interactive=interactive)
else:
if self._is_multidb():
# Django 1.2+
models.signals.post_syncdb.send(
sender=app,
app=app,
created_models=created_models,
verbosity=verbosity,
interactive=interactive,
db=self.db_alias,
)
else:
# Django 1.1 - 1.0
models.signals.post_syncdb.send(
sender=app,
app=app,
created_models=created_models,
verbosity=verbosity,
interactive=interactive,
)
def mock_model(self, model_name, db_table, db_tablespace='',
pk_field_name='id', pk_field_type=models.AutoField,
pk_field_args=[], pk_field_kwargs={}):
"""
Generates a MockModel class that provides enough information
to be used by a foreign key/many-to-many relationship.
Migrations should prefer to use these rather than actual models
as models could get deleted over time, but these can remain in
migration files forever.
Depreciated.
"""
class MockOptions(object):
def __init__(self):
self.db_table = db_table
self.db_tablespace = db_tablespace or settings.DEFAULT_TABLESPACE
self.object_name = model_name
self.module_name = model_name.lower()
if pk_field_type == models.AutoField:
pk_field_kwargs['primary_key'] = True
self.pk = pk_field_type(*pk_field_args, **pk_field_kwargs)
self.pk.set_attributes_from_name(pk_field_name)
self.abstract = False
def get_field_by_name(self, field_name):
# we only care about the pk field
return (self.pk, self.model, True, False)
def get_field(self, name):
# we only care about the pk field
return self.pk
class MockModel(object):
_meta = None
# We need to return an actual class object here, not an instance
MockModel._meta = MockOptions()
MockModel._meta.model = MockModel
return MockModel
def _db_positive_type_for_alter_column(self, field):
"""
A helper for subclasses overriding _db_type_for_alter_column:
Remove the check constraint from the type string for PositiveInteger
and PositiveSmallInteger fields.
@param field: The field to generate type for
"""
super_result = super(type(self), self)._db_type_for_alter_column(field)
if isinstance(field, (models.PositiveSmallIntegerField, models.PositiveIntegerField)):
return super_result.split(" ", 1)[0]
return super_result
def _alter_add_positive_check(self, field, name, params, sqls):
"""
A helper for subclasses overriding _alter_add_column_mods:
Add a check constraint verifying positivity to PositiveInteger and
PositiveSmallInteger fields.
"""
super(type(self), self)._alter_add_column_mods(field, name, params, sqls)
if isinstance(field, (models.PositiveSmallIntegerField, models.PositiveIntegerField)):
uniq_hash = abs(hash(tuple(params.values())))
d = dict(
constraint = "CK_%s_PSTV_%s" % (name, hex(uniq_hash)[2:]),
check = "%s >= 0" % self.quote_name(name))
sqls.append((self.add_check_constraint_fragment % d, []))
# Single-level flattening of lists
def flatten(ls):
nl = []
for l in ls:
nl += l
return nl
|
|
import logging
import os
import pathlib
import tempfile
import time
import pandas
import pytest
from cellpy import log, prms
from cellpy.utils import batch as batch
from cellpy.utils import helpers
from cellpy.utils.batch_tools import (
batch_experiments,
batch_exporters,
batch_journals,
batch_plotters,
dumpers,
engines,
)
log.setup_logging(default_level="DEBUG", testing=True)
# TODO: I think these tests saves new versions of cellpyfiles each time. Fix that.
# TODO: Most likely some of these tests also saves an updated batch json file. Fix that.
@pytest.fixture(scope="module")
def clean_dir():
new_path = tempfile.mkdtemp()
return new_path
@pytest.fixture
def batch_instance(clean_dir, parameters):
prms.Paths.db_filename = parameters.db_file_name
prms.Paths.cellpydatadir = clean_dir
prms.Paths.outdatadir = clean_dir
prms.Paths.rawdatadir = parameters.raw_data_dir
prms.Paths.db_path = parameters.db_dir
prms.Paths.filelogdir = clean_dir
prms.Paths.batchfiledir = clean_dir
prms.Paths.notebookdir = clean_dir
return batch
@pytest.fixture
def populated_batch(batch_instance):
b = batch_instance.init(
"test", "ProjectOfRun", default_log_level="DEBUG", batch_col="b01", testing=True
)
b.create_journal()
b.paginate()
b.update()
return b
@pytest.fixture
def cycling_experiment(batch_instance):
experiment = batch_experiments.CyclingExperiment()
experiment.journal.project = "ProjectOfRun"
experiment.journal.name = "test"
experiment.export_raw = True
experiment.export_cycles = True
experiment.export_ica = True
experiment.journal.from_db()
return experiment
@pytest.fixture
def updated_cycling_experiment(cycling_experiment):
# warning: this test uses the same cellpy file that some of the other
# tests updates from time to time. so if one of those tests fails and corrupts
# the cellpy file, this test might also fail
logging.info(f"using pandas {pandas.__version__}")
cycling_experiment.update()
return cycling_experiment
def test_csv_exporter(updated_cycling_experiment):
logging.info(f"using pandas {pandas.__version__}")
exporter = batch_exporters.CSVExporter()
exporter.assign(updated_cycling_experiment)
exporter.do()
# TODO: fix me
@pytest.mark.slowtest
def test_update_time(cycling_experiment):
t0 = time.time()
cycling_experiment.update(all_in_memory=True)
cycling_experiment.status()
names = cycling_experiment.cell_names
for name in names:
# print(name)
cell = cycling_experiment.data[name]
cycles = cell.get_cycle_numbers()
for cycle in cycles:
capacity, voltage = cell.get_cap(cycle=cycle)
try:
l = len(capacity)
except TypeError as e:
print(e)
t1 = time.time()
dt = t1 - t0
print(f"This took {dt} seconds")
@pytest.mark.slowtest
def test_link_time(cycling_experiment):
t0 = time.time()
cycling_experiment.link()
cycling_experiment.status()
names = cycling_experiment.cell_names
for name in names:
cell = cycling_experiment.data[name]
cycles = cell.get_cycle_numbers()
for cycle in cycles:
capacity, voltage = cell.get_cap(cycle=cycle)
try:
l = len(capacity)
except TypeError as e:
print(e)
t1 = time.time()
dt = t1 - t0
print(f"This took {dt} seconds")
def test_link(cycling_experiment):
cycling_experiment.link()
print(cycling_experiment)
cycling_experiment.status()
names = cycling_experiment.cell_names
print(names)
def test_load_from_file(batch_instance, parameters):
experiment = batch_experiments.CyclingExperiment()
pages = parameters.pages
experiment.journal.from_file(pages)
def test_csv_exporter_modified(updated_cycling_experiment):
exporter = batch_exporters.CSVExporter()
exporter.assign(updated_cycling_experiment)
exporter._assign_engine(engines.dq_dv_engine)
exporter._assign_dumper(dumpers.screen_dumper)
def test_lab_journal(batch_instance):
lab_journal = batch_journals.LabJournal()
def test_cycling_experiment_to_file(cycling_experiment):
cycling_experiment.journal.to_file()
def test_interact_with_cellpydata_get_cap(updated_cycling_experiment, parameters):
name = parameters.run_name_2
capacity_voltage_df = updated_cycling_experiment.data[name].get_cap(cycle=1)
assert len(capacity_voltage_df) == 1105
def test_cycling_summary_plotter(populated_batch):
populated_batch.make_summaries()
populated_batch.plot_summaries()
def test_concatinator(populated_batch):
cellnames = populated_batch.cell_names
c = populated_batch.experiment.data[cellnames[0]]
cf = helpers.concatenate_summaries(
populated_batch, columns=["charge_capacity"], rate=0.04, group_it=True
)
print(cf.head(5))
def test_concatinator_yanked(populated_batch):
removed = helpers.yank_outliers(
populated_batch, remove_indexes=[3, 4, 5], keep_old=False
)
print(removed)
c1 = populated_batch.experiment.data[populated_batch.cell_names[0]]
print(c1.cell.summary.head(10))
cf1 = helpers.concatenate_summaries(
populated_batch, columns=["charge_capacity"], rate=0.04, group_it=True
)
cf2 = helpers.concatenate_summaries(
populated_batch,
columns=["charge_capacity"],
rate=0.04,
group_it=True,
inverted=True,
)
print(cf1.head())
print(cf2.head())
def test_report(populated_batch):
print(populated_batch.report)
# def test_iterate_folder(batch_instance):
# # Since the batch-files contains full paths I need to figure out how to make a custom json-file for the test.
# folder_name = prms.Paths.batchfiledir
# batch.iterate_batches(folder_name, default_log_level="CRITICAL")
|
|
#!/usr/bin/env python3
# Copyright (c) 2018 Bradley Denby
# Distributed under the MIT software license. See the accompanying file COPYING
# or http://www.opensource.org/licenses/mit-license.php.
"""Test transaction behaviors under the Dandelion spreading policy
Tests:
1. Resistance to active probing:
Stem: 0 --> 1 --> 2 --> 0 where each node has argument "-dandelion=1"
Probe: TestNode --> 0
Node 0 generates a Dandelion transaction "tx": 1.0 BTC from Node 0 to Node 2
TestNode immediately sends getdata for tx to Node 0
Assert that Node 0 does not reply with tx
2. Loop behavior:
Stem: 0 --> 1 --> 2 --> 0 where each node has argument "-dandelion=1"
Probe: TestNode --> 0
Wait ~5 seconds after Test 1, then TestNode sends getdata for tx to Node 0
Assert that Node 0 does not reply with tx
3. Resistance to black holes:
Stem: 0 --> 1 --> 2 --> 0 where each node has argument "-dandelion=1"
Probe: TestNode --> 0
Wait ~45 seconds after Test 2, then TestNode sends getdata for tx to Node 0
Assert that Node 0 replies with tx
"""
from collections import defaultdict
from test_framework.mininode import * # NodeConnCB
from test_framework.test_framework import BitcoinTestFramework # BitcoinTestFramework
from test_framework.util import * # other stuff
import time # sleep
class TestP2PConn(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
# Track number of messages of each type received and the most recent
# message of each type
self.message_count = defaultdict(int)
self.last_message = {}
self.ping_counter = 1
self.last_pong = msg_pong()
def add_connection(self, conn):
self.connection = conn
self.peer_disconnected = False
def on_inv(self, conn, message):
"""Receive message and dispatch message to appropriate callback.
We keep a count of how many of each message type has been received
and the most recent message of each type."""
with mininode_lock:
try:
command = message.command.decode('ascii')
self.message_count[command] += 1
self.last_message[command] = message
getattr(self, 'on_' + command)(message)
except:
print("ERROR delivering %s (%s)" % (repr(message), sys.exc_info()[0]))
raise
# Track the last getdata message we receive (used in the test)
def on_getdata(self, conn, message):
self.last_getdata = message
# Spin until verack message is received from the node.
# We use this to signal that our test can begin. This
# is called from the testing thread, so it needs to acquire
# the global lock.
def wait_for_verack(self):
def veracked():
return self.verack_received
return wait_until(veracked, timeout=10)
def wait_for_disconnect(self):
def disconnected():
return self.peer_disconnected
return wait_until(disconnected, timeout=10)
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def on_pong(self, conn, message):
self.last_pong = message
def on_close(self, conn):
self.peer_disconnected = True
# Sync up with the node after delivery of a block
def sync_with_ping(self, timeout=30):
def received_pong():
return (self.last_pong.nonce == self.ping_counter)
self.connection.send_message(msg_ping(nonce=self.ping_counter))
success = wait_until(received_pong, timeout=timeout)
self.ping_counter += 1
return success
def send_dandeliontx_getdata(self, dandeliontx_hash):
msg = msg_getdata([CInv(5,dandeliontx_hash)]) # 5: "DandelionTx"
print("Dandelion hash is ", dandeliontx_hash )
self.connection.send_message(msg)
class DandelionTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 8
self.extra_args = []
for i in range(self.num_nodes):
self.extra_args.append(["-dandelion=1"]) # ,"-debug=dandelion","-printtoconsole=1"
def setup_nodes(self):
print("setup_nodes");
return start_nodes(
self.num_nodes, self.options.tmpdir,
extra_args=[['-regtest', '-debug', '-whitelist=127.0.0.1']] * self.num_nodes)
def setup_network(self):
print("Setting up network for dandelion.")
self.nodes = self.setup_nodes()
# Tests 1,2,3: 0 --> 1 --> 2 --> 0
connect_nodes(self.nodes[0],1)
connect_nodes(self.nodes[1],2)
connect_nodes(self.nodes[2],0)
self.is_network_split = True
self.sync_all()
print("Finished setting up network for dandelion.")
def run_test(self):
# Convenience variables
node0 = self.nodes[0]
node1 = self.nodes[1]
node2 = self.nodes[2]
# Setup TestP2PConns
test_node0 = TestP2PConn()
connection = NodeConn('127.0.0.1', p2p_port(0), node0, test_node0)
test_node0.add_connection(connection)
# Start networking thread
NetworkThread().start()
test_node0.wait_for_verack()
print("Dandelion test: verack " + ("received." if test_node0.verack_received else "failed."))
# Get out of Initial Block Download (IBD)
for node in self.nodes:
node.generate(1)
# Generate funds for node0
node0.generate(101)
# Tests 1,2,3
# There is a low probability that one of these tests will fail even if
# the implementation is correct. Thus, these tests are repeated upon
# failure. A true bug will result in repeated failures.
print('Starting tests...')
test_1_passed = False
test_2_passed = False
test_3_passed = False
tries_left = 5
while(not (test_1_passed and test_2_passed and test_3_passed) and tries_left > 0):
tries_left -= 1
# Test 1: Resistance to active probing
test_node0.message_count['notfound'] = 0
node0_txid = node0.sendtoaddress(node2.getnewaddress(),1.0)
node0_tx = FromHex(CTransaction(),node0.gettransaction(node0_txid)['hex'])
test_node0.send_dandeliontx_getdata(node0_tx.calc_sha256(True))
time.sleep(1)
try:
assert(test_node0.message_count['notfound']==1)
if not test_1_passed:
test_1_passed = True
self.log.info('Success: resistance to active probing')
except AssertionError:
if not test_1_passed and tries_left == 0:
self.log.info('Failed: resistance to active probing')
# Test 2: Loop behavior
test_node0.message_count['notfound'] = 0
time.sleep(3)
test_node0.send_dandeliontx_getdata(node0_tx.calc_sha256(True))
time.sleep(1)
try:
assert(test_node0.message_count['notfound']==1)
if not test_2_passed:
test_2_passed = True
self.log.info('Success: loop behavior')
except AssertionError:
if not test_2_passed and tries_left == 0:
self.log.info('Failed: loop behavior')
# Test 3: Resistance to black holes
test_node0.message_count['tx'] = 0
time.sleep(44)
test_node0.send_dandeliontx_getdata(node0_tx.calc_sha256(True))
time.sleep(1)
try:
assert(test_node0.message_count['tx']==1)
if not test_3_passed:
test_3_passed = True
self.log.info('Success: resistance to black holes')
except AssertionError:
if not test_3_passed and tries_left == 0:
self.log.info('Failed: resistance to black holes')
print("Running dandelion test 7")
all_tests_passed = test_1_passed and test_2_passed and test_3_passed
assert(all_tests_passed)
if __name__ == '__main__':
DandelionTest().main()
|
|
#Copyright ReportLab Europe Ltd. 2000-2004
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/lib/codecharts.py
#$Header $
__version__=''' $Id '''
__doc__="""Routines to print code page (character set) drawings. Predates unicode.
To be sure we can accurately represent characters in various encodings
and fonts, we need some routines to display all those characters.
These are defined herein. The idea is to include flowable, drawable
and graphic objects for single and multi-byte fonts. """
import string
import codecs
from reportlab.pdfgen.canvas import Canvas
from reportlab.platypus import Flowable
from reportlab.pdfbase import pdfmetrics, cidfonts
from reportlab.graphics.shapes import Drawing, Group, String, Circle, Rect
from reportlab.graphics.widgetbase import Widget
from reportlab.lib import colors
adobe2codec = {
'WinAnsiEncoding':'winansi',
'MacRomanEncoding':'macroman',
'MacExpert':'macexpert',
'PDFDoc':'pdfdoc',
}
class CodeChartBase(Flowable):
"""Basic bits of drawing furniture used by
single and multi-byte versions: ability to put letters
into boxes."""
def calcLayout(self):
"Work out x and y positions for drawing"
rows = self.codePoints * 1.0 / self.charsPerRow
if rows == int(rows):
self.rows = int(rows)
else:
self.rows = int(rows) + 1
# size allows for a gray column of labels
self.width = self.boxSize * (1+self.charsPerRow)
self.height = self.boxSize * (1+self.rows)
#handy lists
self.ylist = []
for row in range(self.rows + 2):
self.ylist.append(row * self.boxSize)
self.xlist = []
for col in range(self.charsPerRow + 2):
self.xlist.append(col * self.boxSize)
def formatByte(self, byt):
if self.hex:
return '%02X' % byt
else:
return '%d' % byt
def drawChars(self, charList):
"""Fills boxes in order. None means skip a box.
Empty boxes at end get filled with gray"""
extraNeeded = (self.rows * self.charsPerRow - len(charList))
for i in range(extraNeeded):
charList.append(None)
#charList.extend([None] * extraNeeded)
row = 0
col = 0
self.canv.setFont(self.fontName, self.boxSize * 0.75)
for ch in charList: # may be 2 bytes or 1
if ch is None:
self.canv.setFillGray(0.9)
self.canv.rect((1+col) * self.boxSize, (self.rows - row - 1) * self.boxSize,
self.boxSize, self.boxSize, stroke=0, fill=1)
self.canv.setFillGray(0.0)
else:
try:
self.canv.drawCentredString(
(col+1.5) * self.boxSize,
(self.rows - row - 0.875) * self.boxSize,
ch,
)
except:
self.canv.setFillGray(0.9)
self.canv.rect((1+col) * self.boxSize, (self.rows - row - 1) * self.boxSize,
self.boxSize, self.boxSize, stroke=0, fill=1)
self.canv.drawCentredString(
(col+1.5) * self.boxSize,
(self.rows - row - 0.875) * self.boxSize,
'?',
)
self.canv.setFillGray(0.0)
col = col + 1
if col == self.charsPerRow:
row = row + 1
col = 0
def drawLabels(self, topLeft = ''):
"""Writes little labels in the top row and first column"""
self.canv.setFillGray(0.8)
self.canv.rect(0, self.ylist[-2], self.width, self.boxSize, fill=1, stroke=0)
self.canv.rect(0, 0, self.boxSize, self.ylist[-2], fill=1, stroke=0)
self.canv.setFillGray(0.0)
#label each row and column
self.canv.setFont('Helvetica-Oblique',0.375 * self.boxSize)
byt = 0
for row in range(self.rows):
if self.rowLabels:
label = self.rowLabels[row]
else: # format start bytes as hex or decimal
label = self.formatByte(row * self.charsPerRow)
self.canv.drawCentredString(0.5 * self.boxSize,
(self.rows - row - 0.75) * self.boxSize,
label
)
for col in range(self.charsPerRow):
self.canv.drawCentredString((col + 1.5) * self.boxSize,
(self.rows + 0.25) * self.boxSize,
self.formatByte(col)
)
if topLeft:
self.canv.setFont('Helvetica-BoldOblique',0.5 * self.boxSize)
self.canv.drawCentredString(0.5 * self.boxSize,
(self.rows + 0.25) * self.boxSize,
topLeft
)
class SingleByteEncodingChart(CodeChartBase):
def __init__(self, faceName='Helvetica', encodingName='WinAnsiEncoding',
charsPerRow=16, boxSize=14, hex=1):
self.codePoints = 256
self.faceName = faceName
self.encodingName = encodingName
self.fontName = self.faceName + '-' + self.encodingName
self.charsPerRow = charsPerRow
self.boxSize = boxSize
self.hex = hex
self.rowLabels = None
pdfmetrics.registerFont(pdfmetrics.Font(self.fontName,
self.faceName,
self.encodingName)
)
self.calcLayout()
def draw(self):
self.drawLabels()
charList = [None] * 32 + map(chr, range(32, 256))
#we need to convert these to Unicode, since ReportLab
#2.0 can only draw in Unicode.
encName = self.encodingName
#apply some common translations
encName = adobe2codec.get(encName, encName)
decoder = codecs.lookup(encName)[1]
def decodeFunc(txt):
if txt is None:
return None
else:
return decoder(txt, errors='replace')[0]
charList = [decodeFunc(ch) for ch in charList]
self.drawChars(charList)
self.canv.grid(self.xlist, self.ylist)
class KutenRowCodeChart(CodeChartBase):
"""Formats one 'row' of the 94x94 space used in many Asian encodings.aliases
These deliberately resemble the code charts in Ken Lunde's "Understanding
CJKV Information Processing", to enable manual checking. Due to the large
numbers of characters, we don't try to make one graphic with 10,000 characters,
but rather output a sequence of these."""
#would be cleaner if both shared one base class whose job
#was to draw the boxes, but never mind...
def __init__(self, row, faceName, encodingName):
self.row = row
self.codePoints = 94
self.boxSize = 18
self.charsPerRow = 20
self.rows = 5
self.rowLabels = ['00','20','40','60','80']
self.hex = 0
self.faceName = faceName
self.encodingName = encodingName
try:
# the dependent files might not be available
font = cidfonts.CIDFont(self.faceName, self.encodingName)
pdfmetrics.registerFont(font)
except:
# fall back to English and at least shwo we can draw the boxes
self.faceName = 'Helvetica'
self.encodingName = 'WinAnsiEncoding'
self.fontName = self.faceName + '-' + self.encodingName
self.calcLayout()
def makeRow(self, row):
"""Works out the character values for this kuten row"""
cells = []
if string.find(self.encodingName, 'EUC') > -1:
# it is an EUC family encoding.
for col in range(1, 95):
ch = chr(row + 160) + chr(col+160)
cells.append(ch)
## elif string.find(self.encodingName, 'GB') > -1:
## # it is an EUC family encoding.
## for col in range(1, 95):
## ch = chr(row + 160) + chr(col+160)
else:
cells.append([None] * 94)
return cells
def draw(self):
self.drawLabels(topLeft= 'R%d' % self.row)
# work out which characters we need for the row
#assert string.find(self.encodingName, 'EUC') > -1, 'Only handles EUC encoding today, you gave me %s!' % self.encodingName
# pad out by 1 to match Ken Lunde's tables
charList = [None] + self.makeRow(self.row)
self.drawChars(charList)
self.canv.grid(self.xlist, self.ylist)
class Big5CodeChart(CodeChartBase):
"""Formats one 'row' of the 94x160 space used in Big 5
These deliberately resemble the code charts in Ken Lunde's "Understanding
CJKV Information Processing", to enable manual checking."""
def __init__(self, row, faceName, encodingName):
self.row = row
self.codePoints = 160
self.boxSize = 18
self.charsPerRow = 16
self.rows = 10
self.hex = 1
self.faceName = faceName
self.encodingName = encodingName
self.rowLabels = ['4','5','6','7','A','B','C','D','E','F']
try:
# the dependent files might not be available
font = cidfonts.CIDFont(self.faceName, self.encodingName)
pdfmetrics.registerFont(font)
except:
# fall back to English and at least shwo we can draw the boxes
self.faceName = 'Helvetica'
self.encodingName = 'WinAnsiEncoding'
self.fontName = self.faceName + '-' + self.encodingName
self.calcLayout()
def makeRow(self, row):
"""Works out the character values for this Big5 row.
Rows start at 0xA1"""
cells = []
if string.find(self.encodingName, 'B5') > -1:
# big 5, different row size
for y in [4,5,6,7,10,11,12,13,14,15]:
for x in range(16):
col = y*16+x
ch = chr(row) + chr(col)
cells.append(ch)
else:
cells.append([None] * 160)
return cells
def draw(self):
self.drawLabels(topLeft='%02X' % self.row)
charList = self.makeRow(self.row)
self.drawChars(charList)
self.canv.grid(self.xlist, self.ylist)
def hBoxText(msg, canvas, x, y, fontName):
"""Helper for stringwidth tests on Asian fonts.
Registers font if needed. Then draws the string,
and a box around it derived from the stringWidth function"""
canvas.saveState()
try:
font = pdfmetrics.getFont(fontName)
except KeyError:
font = cidfonts.UnicodeCIDFont(fontName)
pdfmetrics.registerFont(font)
canvas.setFillGray(0.8)
canvas.rect(x,y,pdfmetrics.stringWidth(msg, fontName, 16),16,stroke=0,fill=1)
canvas.setFillGray(0)
canvas.setFont(fontName, 16,16)
canvas.drawString(x,y,msg)
canvas.restoreState()
class CodeWidget(Widget):
"""Block showing all the characters"""
def __init__(self):
self.x = 0
self.y = 0
self.width = 160
self.height = 160
def draw(self):
dx = self.width / 16.0
dy = self.height / 16.0
g = Group()
g.add(Rect(self.x, self.y, self.width, self.height,
fillColor=None, strokeColor=colors.black))
for x in range(16):
for y in range(16):
charValue = y * 16 + x
if charValue > 32:
s = String(self.x + x * dx,
self.y + (self.height - y*dy), chr(charValue))
g.add(s)
return g
def test():
c = Canvas('codecharts.pdf')
c.setFont('Helvetica-Bold', 24)
c.drawString(72, 750, 'Testing code page charts')
cc1 = SingleByteEncodingChart()
cc1.drawOn(c, 72, 500)
cc2 = SingleByteEncodingChart(charsPerRow=32)
cc2.drawOn(c, 72, 300)
cc3 = SingleByteEncodingChart(charsPerRow=25, hex=0)
cc3.drawOn(c, 72, 100)
## c.showPage()
##
## c.setFont('Helvetica-Bold', 24)
## c.drawString(72, 750, 'Multi-byte Kuten code chart examples')
## KutenRowCodeChart(1, 'HeiseiMin-W3','EUC-H').drawOn(c, 72, 600)
## KutenRowCodeChart(16, 'HeiseiMin-W3','EUC-H').drawOn(c, 72, 450)
## KutenRowCodeChart(84, 'HeiseiMin-W3','EUC-H').drawOn(c, 72, 300)
##
## c.showPage()
## c.setFont('Helvetica-Bold', 24)
## c.drawString(72, 750, 'Big5 Code Chart Examples')
## #Big5CodeChart(0xA1, 'MSungStd-Light-Acro','ETenms-B5-H').drawOn(c, 72, 500)
c.save()
print 'saved codecharts.pdf'
if __name__=='__main__':
test()
|
|
import logging
from django import template
from django.conf import settings
from django.template.defaultfilters import stringfilter
from django.utils.dateparse import parse_datetime
import utils
logger = logging.getLogger(__name__)
register = template.Library()
def unquoted_tag(func=None, name=None):
function_name = name or getattr(func, '_decorated_function', func).__name__
class Node(template.Node):
def __init__(self, func, value):
self.func = func
self.value = value
def render(self, context):
return self.func(context, self.value)
def wrap_func(func):
def tag_func(parser, token):
tag, contents = token.contents.split(' ', 1)
contents = utils.unquote_string(contents)
return Node(func, contents)
register.tag(function_name, tag_func)
return func
if func is None: # @unquoted_tag(...)
return wrap_func
elif callable(func): # @unquoted_tag
return wrap_func(func)
else:
raise TypeError("Invalid arguments provided to unquoted_tag")
@unquoted_tag
def title(context, value):
if 'vars' not in context.dicts[0]:
context.dicts[0]['vars'] = {}
context.dicts[0]['vars']['title'] = value
return u''
def datetag(context, value, var):
try:
dt = parse_datetime(value)
except ValueError:
dt = None
if not dt:
logger.warning('in template: invalid date: %s', value)
return u''
if 'vars' not in context.dicts[0]:
context.dicts[0]['vars'] = {}
context.dicts[0]['vars'][var] = dt
return u''
@unquoted_tag
def created(context, value):
return datetag(context, value, 'created')
@unquoted_tag
def published(context, value):
return datetag(context, value, 'published')
@unquoted_tag
def tags(context, value):
if 'vars' not in context.dicts[0]:
context.dicts[0]['vars'] = {}
context.dicts[0]['vars']['tags'] = map(
lambda s: s.strip(), value.split(','))
return u''
@unquoted_tag
def name(context, value):
if 'vars' not in context.dicts[0]:
context.dicts[0]['vars'] = {}
context.dicts[0]['vars']['name'] = value
return u''
@unquoted_tag(name='template')
def template_tag(context, value):
if 'vars' not in context.dicts[0]:
context.dicts[0]['vars'] = {}
context.dicts[0]['vars']['template'] = value
return u''
class ExcerptNode(template.Node):
def __init__(self, nodelist, show='off'):
self.nodelist = nodelist
self.show = show
def render(self, context):
excerpt = self.nodelist.render(context)
if 'vars' not in context.dicts[0]:
context.dicts[0]['vars'] = {}
context.dicts[0]['vars']['excerpt'] = excerpt
if self.show == 'on':
return excerpt
return u''
@register.tag
def excerpt(parser, token):
show = 'off'
cont = token.split_contents()
if len(cont) > 1:
show = cont[1]
nodelist = parser.parse(('endexcerpt', ))
parser.delete_first_token()
return ExcerptNode(nodelist, show)
class ImageNode(template.Node):
html = u"""<img src="{static}images/{src}" alt="{alt}">"""
def __init__(self, src, alt):
self.src = src
self.alt = alt
def render(self, context):
return self.html.format(static=settings.STATIC_URL, src=self.src,
alt=self.alt)
class ImageFullNode(ImageNode):
html = u"""
<a href="{static}images-full/{src}" data-lightbox="{name}" data-title="{title}">
<img src="{static}images/{src}" alt="{alt}">
</a>
"""
def render(self, context):
try:
name = context.dicts[0]['vars']['name']
except KeyError: # post.name can be unavailable if file_as_name is set
name = context.dicts[0]['vars']['title']
return self.html.format(static=settings.STATIC_URL, src=self.src,
alt=self.alt, name=name, title=self.alt)
class ImageBlockNode(template.Node):
html = u"""
<div class="image-block">
{block}
</div>"""
def __init__(self, *image_nodes):
self.image_nodes = image_nodes
def render(self, context):
block = "\n".join(n.render(context) for n in self.image_nodes)
return self.html.format(block=block)
class ConstNode(template.Node):
def __init__(self, conststr):
self.conststr = conststr
def render(self, context):
return self.conststr
def image_wrap(parser, token, image_node):
cont = token.split_contents()
nodes = []
alt = src = None
for val in cont[1:]:
val = utils.unquote_string(val)
if val == '|':
nodes.append(ConstNode("<br>"))
elif not alt and not src:
alt = val
elif alt and not src:
src = val
nodes.append(image_node(alt=alt, src=src))
alt = src = None
return ImageBlockNode(*nodes)
@register.tag
def image(parser, token):
return image_wrap(parser, token, ImageNode)
@register.tag
def image_full(parser, token):
return image_wrap(parser, token, ImageFullNode)
# useful in conjunction with timesince tag: http://stackoverflow.com/a/6481920
@register.filter(is_safe=True)
@stringfilter
def upto(value, delimiter=None):
return value.split(delimiter)[0]
|
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for special math operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import flags
from absl.testing import parameterized
import numpy as np
import scipy.special as sps
import six
from tensorflow.compiler.tests import xla_test
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import gen_random_ops
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
flags.DEFINE_bool('vary_seed', False,
('Whether to vary the PRNG seed unpredictably. '
'With --runs_per_test=N, produces N iid runs.'))
NUM_SAMPLES = int(1e3)
@def_function.function(jit_compile=True)
def _igamma(a, x):
return math_ops.igamma(a, x)
@def_function.function(jit_compile=True)
def _igammac(a, x):
return math_ops.igammac(a, x)
@def_function.function(jit_compile=True)
def _polygamma(n, x):
return math_ops.polygamma(n, x)
@def_function.function(jit_compile=True)
def _zeta(a, q):
return math_ops.zeta(a, q)
# This is df/da / df/dx, where f = igamma.
def implicit_reparameterization_grad(a, x):
log_prob = math_ops.xlogy(a - 1., x) - math_ops.lgamma(a) - x
prob = math_ops.exp(log_prob)
return -gen_math_ops.igamma_grad_a(a, x) / prob
@def_function.function(jit_compile=True)
def _log1p(x):
return math_ops.log1p(x)
class Log1pTest(xla_test.XLATestCase, parameterized.TestCase):
def setUp(self):
if flags.FLAGS.vary_seed:
entropy = os.urandom(64)
if six.PY2:
answer = int(entropy.encode('hex'), 16)
else:
answer = int.from_bytes(entropy, 'big')
np.random.seed(answer % (2**32 - 1))
super(Log1pTest, self).setUp()
def adjust_tolerance_for_tpu(self, dtype, rtol, atol):
if self.device not in ['TPU']:
return rtol, atol
if dtype == np.float32:
return 4e-4, 0.
return 1e-10, 0.
def _test_range(self, low, high, dtype, rtol, atol, is_negative=False):
# Test values near zero.
rtol, atol = self.adjust_tolerance_for_tpu(dtype, rtol, atol)
x = np.exp(np.random.uniform(
low=low, high=high, size=[NUM_SAMPLES])).astype(dtype)
if is_negative:
x = -x
expected_values = np.log1p(x)
with self.session() as sess:
with self.test_scope():
actual = _log1p(x)
actual = sess.run(actual)
self.assertAllClose(expected_values, actual, atol=atol, rtol=rtol)
@parameterized.parameters((np.float32, 1e-7, 0.),
(np.float64, 1e-15, 0.))
def testSmallX(self, dtype, rtol, atol):
self._test_range(-40., -20., dtype, rtol, atol, is_negative=False)
self._test_range(-40., -20., dtype, rtol, atol, is_negative=True)
@parameterized.parameters((np.float32, 2e-7, 0.),
(np.float64, 1e-15, 0.))
def testGreaterThanNegativeTwentyExponent(self, dtype, rtol, atol):
self._test_range(-20., -10., dtype, rtol, atol, is_negative=False)
self._test_range(-20., -10., dtype, rtol, atol, is_negative=True)
@parameterized.parameters((np.float32, 2e-7, 0.),
(np.float64, 1e-15, 0.))
def testGreaterThanNegativeTenExponent(self, dtype, rtol, atol):
self._test_range(-10., -5., dtype, rtol, atol, is_negative=False)
self._test_range(-10., -5., dtype, rtol, atol, is_negative=True)
@parameterized.parameters((np.float32, 2e-7, 0.),
(np.float64, 1e-15, 0.))
def testGreaterThanNegativeFiveExponent(self, dtype, rtol, atol):
self._test_range(-5., -1., dtype, rtol, atol, is_negative=False)
self._test_range(-5., -1., dtype, rtol, atol, is_negative=True)
@parameterized.parameters((np.float32, 4e-7, 0.),
(np.float64, 3e-14, 0.))
def testXGreaterThanOneTenth(self, dtype, rtol, atol):
self._test_range(-1., 0., dtype, rtol, atol, is_negative=False)
self._test_range(-1., 0., dtype, rtol, atol, is_negative=True)
@parameterized.parameters((np.float32, 2e-7, 0.),
(np.float64, 2e-15, 0.))
def testXGreaterThanOne(self, dtype, rtol, atol):
self._test_range(0., 3., dtype, rtol, atol, is_negative=False)
class ZetaTest(xla_test.XLATestCase, parameterized.TestCase):
def setUp(self):
if flags.FLAGS.vary_seed:
entropy = os.urandom(64)
if six.PY2:
answer = int(entropy.encode('hex'), 16)
else:
answer = int.from_bytes(entropy, 'big')
np.random.seed(answer % (2**32 - 1))
super(ZetaTest, self).setUp()
def adjust_tolerance_for_tpu(self, dtype, rtol, atol):
if self.device not in ['TPU']:
return rtol, atol
if dtype == np.float32:
return 2e-2, 1e-7
return 2e-4, 1e-20
@test_util.disable_mlir_bridge('TODO(b/165736950): Add support in MLIR')
def testBadValues(self):
q = np.random.uniform(low=0.3, high=20., size=[10])
with self.session() as sess:
with self.test_scope():
y = _zeta(np.float64(1.), q)
actual = sess.run(y)
# When x == 1, this is the Harmonic series.
self.assertTrue(np.all(np.isinf(actual)))
with self.session() as sess:
with self.test_scope():
y = _zeta(np.float64(0.1), q)
actual = sess.run(y)
# When x < 1, this is undefined.
self.assertTrue(np.all(np.isnan(actual)))
with self.session() as sess:
with self.test_scope():
y = _zeta([1., 1.1], [-1.1, -1.])
actual = sess.run(y)
# When q is negative, zeta is not defined
# if q is an integer or x is not an integer.
self.assertTrue(np.all(np.isinf(actual)))
@parameterized.parameters((np.float32, 1e-2, 1e-11),
(np.float64, 1e-4, 1e-30))
@test_util.disable_mlir_bridge('TODO(b/165736950): Add support in MLIR')
def testLargeXSmallQ(self, dtype, rtol, atol):
rtol, atol = self.adjust_tolerance_for_tpu(dtype, rtol, atol)
if self.device not in ['XLA_GPU', 'XLA_CPU'] and dtype == np.float64:
# TODO(b/165739664): Figure out why on TPU F64 Zeta sometimes returns
# infs.
self.skipTest(
'Skipping test because some F64 operations are numerically '
'unstable on TPU.')
x = np.random.uniform(low=100., high=200., size=[NUM_SAMPLES]).astype(dtype)
q = np.random.uniform(low=0.3, high=1., size=[NUM_SAMPLES]).astype(dtype)
expected_values = sps.zeta(x, q)
with self.session() as sess:
with self.test_scope():
y = _zeta(x, q)
actual = sess.run(y)
self.assertAllClose(expected_values, actual, atol=atol, rtol=rtol)
@parameterized.parameters((np.float32, 1e-2, 1e-11),
(np.float64, 1e-4, 1e-30))
@test_util.disable_mlir_bridge('TODO(b/165736950): Add support in MLIR')
def testSmallValues(self, dtype, rtol, atol):
rtol, atol = self.adjust_tolerance_for_tpu(dtype, rtol, atol)
# Test values near zero.
x = np.random.uniform(low=1.1, high=10., size=[NUM_SAMPLES]).astype(dtype)
q = np.random.uniform(
low=np.finfo(dtype).tiny, high=1., size=[NUM_SAMPLES]).astype(dtype)
expected_values = sps.zeta(x, q)
with self.session() as sess:
with self.test_scope():
actual = sess.run(_zeta(x, q))
self.assertAllClose(expected_values, actual, atol=atol, rtol=rtol)
@parameterized.parameters((np.float32, 1e-2, 1e-11),
(np.float64, 1e-4, 1e-30))
@test_util.disable_mlir_bridge('TODO(b/165736950): Add support in MLIR')
def testMediumValues(self, dtype, rtol, atol):
rtol, atol = self.adjust_tolerance_for_tpu(dtype, rtol, atol)
x = np.random.uniform(low=1.1, high=100., size=[NUM_SAMPLES]).astype(dtype)
q = np.random.uniform(low=1., high=1e1, size=[NUM_SAMPLES]).astype(dtype)
expected_values = sps.zeta(x, q)
with self.session() as sess:
with self.test_scope():
actual = sess.run(_zeta(x, q))
self.assertAllClose(expected_values, actual, atol=atol, rtol=rtol)
@parameterized.parameters((np.float32, 2e-2, 1e-5), (np.float64, 1e-4, 1e-30))
@test_util.disable_mlir_bridge('TODO(b/165736950): Add support in MLIR')
def testLargeValues(self, dtype, rtol, atol):
x = np.random.uniform(
low=100., high=int(1e3), size=[NUM_SAMPLES]).astype(dtype)
q = np.random.uniform(
low=1., high=int(1e1), size=[NUM_SAMPLES]).astype(dtype)
expected_values = sps.zeta(x, q)
with self.session() as sess:
with self.test_scope():
actual = sess.run(_zeta(x, q))
self.assertAllClose(expected_values, actual, atol=atol, rtol=rtol)
class PolygammaTest(xla_test.XLATestCase, parameterized.TestCase):
def setUp(self):
if flags.FLAGS.vary_seed:
entropy = os.urandom(64)
if six.PY2:
answer = int(entropy.encode('hex'), 16)
else:
answer = int.from_bytes(entropy, 'big')
np.random.seed(answer % (2**32 - 1))
super(PolygammaTest, self).setUp()
def adjust_tolerance_for_tpu(self, dtype, rtol, atol):
if self.device not in ['TPU']:
return rtol, atol
if dtype == np.float32:
return 2e-2, 1e-7
return 2e-4, 1e-20
@test_util.disable_mlir_bridge('TODO(b/165736950): Add support in MLIR')
def testBadValues(self):
x = np.random.uniform(low=0.3, high=20., size=[10])
with self.session() as sess:
with self.test_scope():
y = _polygamma(np.float64(-1.), x)
actual = sess.run(y)
# Not defined for negative numbers.
self.assertTrue(np.all(np.isnan(actual)))
with self.session() as sess:
with self.test_scope():
y = _polygamma(np.float64(0.1), x)
actual = sess.run(y)
# Not defined for non-integers.
self.assertTrue(np.all(np.isnan(actual)))
@parameterized.parameters((np.float32, 1e-2, 1e-11),
(np.float64, 1e-4, 1e-30))
@test_util.disable_mlir_bridge('TODO(b/165736950): Add support in MLIR')
def testRecoverDigamma(self, dtype, rtol, atol):
rtol, atol = self.adjust_tolerance_for_tpu(dtype, rtol, atol)
if self.device not in ['XLA_GPU', 'XLA_CPU'] and dtype == np.float64:
self.skipTest(
'Skipping test because some F64 operations are '
'numerically unstable on TPU.'
)
x = np.random.uniform(low=0.1, high=50., size=[NUM_SAMPLES]).astype(dtype)
expected_values = sps.digamma(x)
with self.session() as sess:
with self.test_scope():
y = _polygamma(dtype(0.), x)
actual = sess.run(y)
self.assertAllClose(expected_values, actual, atol=atol, rtol=rtol)
@parameterized.parameters((np.float32, 1e-2, 1e-11),
(np.float64, 1e-4, 1e-30))
@test_util.disable_mlir_bridge('TODO(b/165736950): Add support in MLIR')
def testSmallN(self, dtype, rtol, atol):
rtol, atol = self.adjust_tolerance_for_tpu(dtype, rtol, atol)
# Test values near zero.
n = np.random.randint(low=1, high=5, size=[NUM_SAMPLES]).astype(dtype)
x = np.random.uniform(
low=np.finfo(dtype).tiny, high=1., size=[NUM_SAMPLES]).astype(dtype)
expected_values = sps.polygamma(n, x)
with self.session() as sess:
with self.test_scope():
actual = sess.run(_polygamma(n, x))
self.assertAllClose(expected_values, actual, atol=atol, rtol=rtol)
@parameterized.parameters((np.float32, 1e-2, 1e-11),
(np.float64, 1e-4, 1e-30))
@test_util.disable_mlir_bridge('TODO(b/165736950): Add support in MLIR')
def testMediumLargeN(self, dtype, rtol, atol):
rtol, atol = self.adjust_tolerance_for_tpu(dtype, rtol, atol)
n = np.random.randint(low=5, high=10, size=[NUM_SAMPLES]).astype(dtype)
x = np.random.uniform(low=1., high=1e1, size=[NUM_SAMPLES]).astype(dtype)
expected_values = sps.polygamma(n, x)
with self.session() as sess:
with self.test_scope():
actual = sess.run(_polygamma(n, x))
self.assertAllClose(expected_values, actual, atol=atol, rtol=rtol)
class IgammaTest(xla_test.XLATestCase, parameterized.TestCase):
def setUp(self):
if flags.FLAGS.vary_seed:
entropy = os.urandom(64)
if six.PY2:
answer = int(entropy.encode('hex'), 16)
else:
answer = int.from_bytes(entropy, 'big')
np.random.seed(answer % (2**32 - 1))
super(IgammaTest, self).setUp()
# Skip Float64 test on TPU due to missing ops.
def maybe_skip_test(self, dtype):
if self.device not in ['XLA_GPU', 'XLA_CPU'] and dtype == np.float64:
self.skipTest(
'Skipping test because some F64 operations not supported on TPU.')
def adjust_tolerance_for_tpu(self, dtype, rtol, atol):
if self.device not in ['TPU']:
return rtol, atol
if dtype == np.float32:
return 2e-2, 1e-7
return 2e-4, 1e-20
@parameterized.parameters((np.float32, 1e-2, 1e-11),
(np.float64, 1e-4, 1e-30))
def testLargeXSmallA(self, dtype, rtol, atol):
self.maybe_skip_test(dtype)
rtol, atol = self.adjust_tolerance_for_tpu(dtype, rtol, atol)
# Test values near zero.
x = np.random.uniform(low=100., high=200., size=[NUM_SAMPLES]).astype(dtype)
a = np.random.uniform(low=0.3, high=1., size=[NUM_SAMPLES]).astype(dtype)
expected_values = sps.gammainc(a, x)
with self.session() as sess:
with self.test_scope():
y = _igamma(a, x)
actual = sess.run(y)
self.assertAllClose(expected_values, actual, atol=atol, rtol=rtol)
@parameterized.parameters((np.float32, 1e-2, 1e-11),
(np.float64, 1e-4, 1e-30))
def testSmallValues(self, dtype, rtol, atol):
self.maybe_skip_test(dtype)
rtol, atol = self.adjust_tolerance_for_tpu(dtype, rtol, atol)
# Test values near zero.
x = np.random.uniform(
low=np.finfo(dtype).tiny, high=1., size=[NUM_SAMPLES]).astype(dtype)
a = np.random.uniform(
low=np.finfo(dtype).tiny, high=1., size=[NUM_SAMPLES]).astype(dtype)
expected_values = sps.gammainc(a, x)
with self.session() as sess:
with self.test_scope():
actual = sess.run(_igamma(a, x))
self.assertAllClose(expected_values, actual, atol=atol, rtol=rtol)
@parameterized.parameters((np.float32, 1e-2, 1e-11),
(np.float64, 1e-4, 1e-30))
def testMediumValues(self, dtype, rtol, atol):
self.maybe_skip_test(dtype)
rtol, atol = self.adjust_tolerance_for_tpu(dtype, rtol, atol)
# Test values near zero.
x = np.random.uniform(low=1., high=100., size=[NUM_SAMPLES]).astype(dtype)
a = np.random.uniform(low=1., high=100., size=[NUM_SAMPLES]).astype(dtype)
expected_values = sps.gammainc(a, x)
with self.session() as sess:
with self.test_scope():
actual = sess.run(_igamma(a, x))
self.assertAllClose(expected_values, actual, atol=atol, rtol=rtol)
@parameterized.parameters((np.float32, 2e-2, 1e-5), (np.float64, 1e-4, 1e-30))
def testLargeValues(self, dtype, rtol, atol):
if self.device == 'TPU':
# TODO(b/154908275): Remove this once fixed for large a, x.
self.skipTest('Skipping test since numerically unstable on TPU.')
# Test values near zero.
x = np.random.uniform(
low=100., high=int(1e4), size=[NUM_SAMPLES]).astype(dtype)
a = np.random.uniform(
low=100., high=int(1e4), size=[NUM_SAMPLES]).astype(dtype)
expected_values = sps.gammainc(a, x)
with self.session() as sess:
with self.test_scope():
actual = sess.run(_igamma(a, x))
self.assertAllClose(expected_values, actual, atol=atol, rtol=rtol)
# We don't check small values because the numerical gradients become quite
# large.
@parameterized.parameters((np.float32, 0.09), (np.float64, 1e-7))
def testGradMediumValues(self, dtype, tolerance):
self.maybe_skip_test(dtype)
with self.session():
with self.test_scope():
x = constant_op.constant(
np.random.uniform(low=1., high=100.,
size=[NUM_SAMPLES]).astype(dtype))
a = constant_op.constant(
np.random.uniform(low=1., high=100.,
size=[NUM_SAMPLES]).astype(dtype))
f = lambda b: _igamma(b, x)
max_error = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(f, x=[a], delta=1e-3))
self.assertLessEqual(max_error, tolerance)
@parameterized.parameters((np.float32, 0.5), (np.float64, 1e-7))
def testGradLargeValues(self, dtype, tolerance):
self.maybe_skip_test(dtype)
with self.session():
with self.test_scope():
x = constant_op.constant(
np.random.uniform(low=100., high=int(1e4),
size=[NUM_SAMPLES]).astype(dtype))
a = constant_op.constant(
np.random.uniform(low=100., high=int(1e4),
size=[NUM_SAMPLES]).astype(dtype))
f = lambda b: _igamma(b, x)
max_error = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(f, x=[a], delta=1e-2))
self.assertLessEqual(max_error, tolerance)
@parameterized.parameters((np.float32, 1e-2, 1e-11),
(np.float64, 1e-4, 1e-30))
def testRandomGammaGradSmallValues(self, dtype, rtol, atol):
self.maybe_skip_test(dtype)
rtol, atol = self.adjust_tolerance_for_tpu(dtype, rtol, atol)
# Test values near zero.
with self.session() as sess:
with self.test_scope():
x = constant_op.constant(
np.random.uniform(
low=np.finfo(dtype).tiny, high=1.,
size=[NUM_SAMPLES]).astype(dtype))
a = constant_op.constant(
np.random.uniform(
low=np.finfo(dtype).tiny, high=1.,
size=[NUM_SAMPLES]).astype(dtype))
gamma_sample_grad = gen_random_ops.random_gamma_grad(a, x)
actual_grad = implicit_reparameterization_grad(a, x)
gamma_sample_grad, actual_grad = sess.run(
[gamma_sample_grad, actual_grad])
# We do this because the ratio computed in
# implicit_reparameterization_grad can very easily result in a NaN due
# to the computed numerator and denominator zeroing out.
gamma_sample_grad = gamma_sample_grad[
~np.logical_or(np.isnan(actual_grad), np.isinf(actual_grad))]
actual_grad = actual_grad[
~np.logical_or(np.isnan(actual_grad), np.isinf(actual_grad))]
self.assertAllClose(actual_grad, gamma_sample_grad, atol=atol, rtol=rtol)
@parameterized.parameters((np.float32, 1e-2, 1e-11),
(np.float64, 1e-4, 1e-30))
def testRandomGammaGradMediumValues(self, dtype, rtol, atol):
self.maybe_skip_test(dtype)
rtol, atol = self.adjust_tolerance_for_tpu(dtype, rtol, atol)
with self.session() as sess:
with self.test_scope():
x = constant_op.constant(
np.random.uniform(low=1., high=10.,
size=[NUM_SAMPLES]).astype(dtype))
a = constant_op.constant(
np.random.uniform(low=1., high=10.,
size=[NUM_SAMPLES]).astype(dtype))
gamma_sample_grad = gen_random_ops.random_gamma_grad(a, x)
actual_grad = implicit_reparameterization_grad(a, x)
gamma_sample_grad, actual_grad = sess.run(
[gamma_sample_grad, actual_grad])
# We do this because the ratio computed in
# implicit_reparameterization_grad can very easily result in a NaN due
# to the computed numerator and denominator zeroing out.
gamma_sample_grad = gamma_sample_grad[
~np.logical_or(np.isnan(actual_grad), np.isinf(actual_grad))]
actual_grad = actual_grad[
~np.logical_or(np.isnan(actual_grad), np.isinf(actual_grad))]
self.assertAllClose(actual_grad, gamma_sample_grad, atol=atol, rtol=rtol)
class IgammacTest(xla_test.XLATestCase, parameterized.TestCase):
def setUp(self):
if flags.FLAGS.vary_seed:
entropy = os.urandom(64)
if six.PY2:
answer = int(entropy.encode('hex'), 16)
else:
answer = int.from_bytes(entropy, 'big')
np.random.seed(answer % (2**32 - 1))
super(IgammacTest, self).setUp()
# Skip Float64 test on TPU due to missing ops.
def maybe_skip_test(self, dtype):
if self.device not in ['XLA_GPU', 'XLA_CPU'] and dtype == np.float64:
# TODO(b/154908275): Remove this once fixed for large a, x.
self.skipTest(
'Skipping test because some F64 operations not supported on TPU.')
def adjust_tolerance_for_tpu(self, dtype, rtol, atol):
if self.device not in ['TPU']:
return rtol, atol
if dtype == np.float32:
return 2e-2, 1e-7
return 2e-4, 1e-20
@parameterized.parameters((np.float32, 1e-2, 1e-11),
(np.float64, 1e-4, 1e-30))
def testLargeXSmallA(self, dtype, rtol, atol):
self.maybe_skip_test(dtype)
rtol, atol = self.adjust_tolerance_for_tpu(dtype, rtol, atol)
# Test values near zero.
x = np.random.uniform(low=100., high=200., size=[NUM_SAMPLES]).astype(dtype)
a = np.random.uniform(low=0.3, high=1., size=[NUM_SAMPLES]).astype(dtype)
expected_values = sps.gammaincc(a, x)
with self.session() as sess:
with self.test_scope():
y = _igammac(a, x)
actual = sess.run(y)
self.assertAllClose(expected_values, actual, atol=atol, rtol=rtol)
@parameterized.parameters((np.float32, 1e-2, 1e-11),
(np.float64, 1e-4, 1e-30))
def testSmallValues(self, dtype, rtol, atol):
self.maybe_skip_test(dtype)
rtol, atol = self.adjust_tolerance_for_tpu(dtype, rtol, atol)
# Test values near zero.
x = np.random.uniform(
low=np.finfo(dtype).tiny, high=1., size=[NUM_SAMPLES]).astype(dtype)
a = np.random.uniform(
low=np.finfo(dtype).tiny, high=1., size=[NUM_SAMPLES]).astype(dtype)
expected_values = sps.gammaincc(a, x)
with self.session() as sess:
with self.test_scope():
actual = sess.run(_igammac(a, x))
self.assertAllClose(expected_values, actual, atol=atol, rtol=rtol)
@parameterized.parameters((np.float32, 1e-2, 1e-11),
(np.float64, 1e-4, 1e-30))
def testMediumValues(self, dtype, rtol, atol):
self.maybe_skip_test(dtype)
rtol, atol = self.adjust_tolerance_for_tpu(dtype, rtol, atol)
# Test values near zero.
x = np.random.uniform(low=1., high=100., size=[NUM_SAMPLES]).astype(dtype)
a = np.random.uniform(low=1., high=100., size=[NUM_SAMPLES]).astype(dtype)
expected_values = sps.gammaincc(a, x)
with self.session() as sess:
with self.test_scope():
actual = sess.run(_igammac(a, x))
self.assertAllClose(expected_values, actual, atol=atol, rtol=rtol)
@parameterized.parameters((np.float32, 2e-2, 1e-5), (np.float64, 1e-4, 1e-30))
def testLargeValues(self, dtype, rtol, atol):
if self.device == 'TPU':
self.skipTest('Skipping test since numerically unstable on TPU.')
# Test values near zero.
x = np.random.uniform(
low=100., high=int(1e4), size=[NUM_SAMPLES]).astype(dtype)
a = np.random.uniform(
low=100., high=int(1e4), size=[NUM_SAMPLES]).astype(dtype)
expected_values = sps.gammaincc(a, x)
with self.session() as sess:
with self.test_scope():
actual = sess.run(_igammac(a, x))
self.assertAllClose(expected_values, actual, atol=atol, rtol=rtol)
if __name__ == '__main__':
os.environ['XLA_FLAGS'] = '--xla_cpu_enable_fast_math=false'
test.main()
|
|
from __future__ import division, unicode_literals
import collections
import itertools
import os
import datetime
import logging
from argparse import RawTextHelpFormatter
import sys
if sys.platform != 'win32':
import resource
try:
import cPickle as pickle
except ImportError:
import pickle
from django.conf import settings
from django.db import transaction
from django.db import reset_queries, IntegrityError
from django.core.management.base import BaseCommand
import progressbar
from ...exceptions import *
from ...signals import *
from ...settings import *
from ...geonames import Geonames
from ...loading import get_cities_models
Country, Region, City = get_cities_models()
class MemoryUsageWidget(progressbar.widgets.WidgetBase):
def __call__(self, progress, data):
if sys.platform != 'win32':
return '%s kB' % resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
return '?? kB'
class Command(BaseCommand):
help = """
Download all files in CITIES_LIGHT_COUNTRY_SOURCES if they were updated or if
--force-all option was used.
Import country data if they were downloaded or if --force-import-all was used.
Same goes for CITIES_LIGHT_CITY_SOURCES.
It is possible to force the download of some files which have not been updated
on the server:
manage.py --force cities15000 --force countryInfo
It is possible to force the import of files which weren't downloaded using the
--force-import option:
manage.py --force-import cities15000 --force-import country
""".strip()
logger = logging.getLogger('cities_light')
def create_parser(self, *args, **kwargs):
parser = super(Command, self).create_parser(*args, **kwargs)
parser.formatter_class = RawTextHelpFormatter
return parser
def add_arguments(self, parser):
parser.add_argument('--force-import-all', action='store_true',
default=False, help='Import even if files are up-to-date.'
),
parser.add_argument('--force-all', action='store_true', default=False,
help='Download and import if files are up-to-date.'
),
parser.add_argument('--force-import', action='append', default=[],
help='Import even if files matching files are up-to-date'
),
parser.add_argument('--force', action='append', default=[],
help='Download and import even if matching files are up-to-date'
),
parser.add_argument('--noinsert', action='store_true',
default=False,
help='Update existing data only'
),
parser.add_argument('--hack-translations', action='store_true',
default=False,
help='Set this if you intend to import translations a lot'
),
parser.add_argument('--keep-slugs', action='store_true',
default=False,
help='Do not update slugs'
),
parser.add_argument('--progress', action='store_true',
default=False,
help='Show progress bar'
),
def progress_init(self):
"""Initialize progress bar."""
if self.progress_enabled:
self.progress_widgets = [
'RAM used: ',
MemoryUsageWidget(),
' ',
progressbar.ETA(),
' Done: ',
progressbar.Percentage(),
progressbar.Bar(),
]
def progress_start(self, max_value):
"""Start progress bar."""
if self.progress_enabled:
self.progress = progressbar.ProgressBar(
max_value=max_value,
widgets=self.progress_widgets
).start()
def progress_update(self, value):
"""Update progress bar."""
if self.progress_enabled:
self.progress.update(value)
def progress_finish(self):
"""Finalize progress bar."""
if self.progress_enabled:
self.progress.finish()
def handle(self, *args, **options):
# initialize lazy identity maps
self._clear_identity_maps()
if not os.path.exists(DATA_DIR):
self.logger.info('Creating %s' % DATA_DIR)
os.mkdir(DATA_DIR)
install_file_path = os.path.join(DATA_DIR, 'install_datetime')
translation_hack_path = os.path.join(DATA_DIR, 'translation_hack')
self.noinsert = options.get('noinsert', False)
self.keep_slugs = options.get('keep_slugs', False)
self.progress_enabled = options.get('progress')
self.progress_init()
sources = list(itertools.chain(
COUNTRY_SOURCES,
REGION_SOURCES,
CITY_SOURCES,
TRANSLATION_SOURCES,
))
for url in sources:
if url in TRANSLATION_SOURCES:
# free some memory
self._clear_identity_maps()
destination_file_name = url.split('/')[-1]
force = options.get('force_all', False)
if not force:
for f in options['force']:
if f in destination_file_name or f in url:
force = True
geonames = Geonames(url, force=force)
downloaded = geonames.downloaded
force_import = options.get('force_import_all', False)
if not force_import:
for f in options['force_import']:
if f in destination_file_name or f in url:
force_import = True
if not os.path.exists(install_file_path):
self.logger.info('Forced import of %s because data do not seem'
' to have installed successfuly yet, note that this is'
' equivalent to --force-import-all.' %
destination_file_name)
force_import = True
if downloaded or force_import:
self.logger.info('Importing %s' % destination_file_name)
if url in TRANSLATION_SOURCES:
if options.get('hack_translations', False):
if os.path.exists(translation_hack_path):
self.logger.debug(
'Using translation parsed data: %s' %
translation_hack_path)
continue
i = 0
self.progress_start(geonames.num_lines())
for items in geonames.parse():
if url in CITY_SOURCES:
self.city_import(items)
elif url in REGION_SOURCES:
self.region_import(items)
elif url in COUNTRY_SOURCES:
self.country_import(items)
elif url in TRANSLATION_SOURCES:
self.translation_parse(items)
# prevent memory leaks in DEBUG mode
# https://docs.djangoproject.com/en/1.9/faq/models/
# #how-can-i-see-the-raw-sql-queries-django-is-running
if settings.DEBUG:
reset_queries()
i += 1
self.progress_update(i)
self.progress_finish()
if url in TRANSLATION_SOURCES and options.get(
'hack_translations', False):
with open(translation_hack_path, 'w+') as f:
pickle.dump(self.translation_data, f)
if options.get('hack_translations', False):
with open(translation_hack_path, 'r') as f:
self.translation_data = pickle.load(f)
self.logger.info('Importing parsed translation in the database')
self.translation_import()
with open(install_file_path, 'wb+') as f:
pickle.dump(datetime.datetime.now(), f)
def _clear_identity_maps(self):
"""Clear identity maps and free some memory."""
if getattr(self, '_country_codes', False):
del self._country_codes
if getattr(self, '_region_codes', False):
del self._region_codes
self._country_codes = {}
self._region_codes = collections.defaultdict(dict)
def _get_country_id(self, code2):
"""
Simple lazy identity map for code2->country
"""
if code2 not in self._country_codes:
self._country_codes[code2] = Country.objects.get(code2=code2).pk
return self._country_codes[code2]
def _get_region_id(self, country_code2, region_id):
"""
Simple lazy identity map for (country_code2, region_id)->region
"""
country_id = self._get_country_id(country_code2)
if region_id not in self._region_codes[country_id]:
self._region_codes[country_id][region_id] = Region.objects.get(
country_id=country_id, geoname_code=region_id).pk
return self._region_codes[country_id][region_id]
def country_import(self, items):
try:
country_items_pre_import.send(sender=self, items=items)
except InvalidItems:
return
try:
force_insert = False
force_update = False
country = Country.objects.get(geoname_id=items[ICountry.geonameid])
force_update = True
except Country.DoesNotExist:
if self.noinsert:
return
country = Country(geoname_id=items[ICountry.geonameid])
force_insert = True
country.name = items[ICountry.name]
country.code2 = items[ICountry.code2]
country.code3 = items[ICountry.code3]
country.continent = items[ICountry.continent]
country.tld = items[ICountry.tld][1:] # strip the leading dot
# Strip + prefix for consistency. Note that some countries have several
# prefixes ie. Puerto Rico
country.phone = items[ICountry.phone].replace('+', '')
# Clear name_ascii to always update it by set_name_ascii() signal
country.name_ascii = ''
if force_update and not self.keep_slugs:
country.slug = None
country_items_post_import.send(
sender=self,
instance=country,
items=items
)
self.save(
country,
force_insert=force_insert,
force_update=force_update
)
def region_import(self, items):
try:
region_items_pre_import.send(sender=self, items=items)
except InvalidItems:
return
try:
force_insert = False
force_update = False
region = Region.objects.get(geoname_id=items[IRegion.geonameid])
force_update = True
except Region.DoesNotExist:
if self.noinsert:
return
region = Region(geoname_id=items[IRegion.geonameid])
force_insert = True
name = items[IRegion.name]
if not items[IRegion.name]:
name = items[IRegion.asciiName]
code2, geoname_code = items[IRegion.code].split('.')
country_id = self._get_country_id(code2)
save = False
if region.name != name:
region.name = name
save = True
if region.country_id != country_id:
region.country_id = country_id
save = True
if region.geoname_code != geoname_code:
region.geoname_code = geoname_code
save = True
if region.name_ascii != items[IRegion.asciiName]:
region.name_ascii = items[IRegion.asciiName]
save = True
if force_update and not self.keep_slugs:
region.slug = None
region_items_post_import.send(
sender=self,
instance=region,
items=items
)
if save:
self.save(
region,
force_insert=force_insert,
force_update=force_update
)
def city_import(self, items):
try:
city_items_pre_import.send(sender=self, items=items)
except InvalidItems:
return
try:
force_insert = False
force_update = False
city = City.objects.get(geoname_id=items[ICity.geonameid])
force_update = True
except City.DoesNotExist:
if self.noinsert:
return
city = City(geoname_id=items[ICity.geonameid])
force_insert = True
try:
country_id = self._get_country_id(items[ICity.countryCode])
except Country.DoesNotExist:
if self.noinsert:
return
else:
raise
try:
region_id = self._get_region_id(
items[ICity.countryCode],
items[ICity.admin1Code]
)
except Region.DoesNotExist:
region_id = None
save = False
if city.country_id != country_id:
city.country_id = country_id
save = True
if city.region_id != region_id:
city.region_id = region_id
save = True
if city.name != items[ICity.name]:
city.name = items[ICity.name]
save = True
if city.name_ascii != items[ICity.asciiName]:
# useful for cities with chinese names
city.name_ascii = items[ICity.asciiName]
save = True
if city.latitude != items[ICity.latitude]:
city.latitude = items[ICity.latitude]
save = True
if city.longitude != items[ICity.longitude]:
city.longitude = items[ICity.longitude]
save = True
if city.population != items[ICity.population]:
city.population = items[ICity.population]
save = True
if city.feature_code != items[ICity.featureCode]:
city.feature_code = items[ICity.featureCode]
save = True
altnames = items[ICity.alternateNames]
if not TRANSLATION_SOURCES and city.alternate_names != altnames:
city.alternate_names = altnames
save = True
if force_update and not self.keep_slugs:
city.slug = None
city_items_post_import.send(
sender=self,
instance=city,
items=items,
save=save
)
if save:
self.save(
city,
force_insert=force_insert,
force_update=force_update
)
def translation_parse(self, items):
if not hasattr(self, 'translation_data'):
self.country_ids = set(Country.objects.values_list('geoname_id',
flat=True))
self.region_ids = set(Region.objects.values_list('geoname_id',
flat=True))
self.city_ids = set(City.objects.values_list('geoname_id',
flat=True))
self.translation_data = collections.OrderedDict((
(Country, {}),
(Region, {}),
(City, {}),
))
try:
translation_items_pre_import.send(sender=self, items=items)
except InvalidItems:
return
if len(items) > 5:
# avoid shortnames, colloquial, and historic
return
item_lang = items[IAlternate.language]
if item_lang not in TRANSLATION_LANGUAGES:
return
item_geoid = items[IAlternate.geonameid]
item_name = items[IAlternate.name]
# arg optimisation code kills me !!!
item_geoid = int(item_geoid)
if item_geoid in self.country_ids:
model_class = Country
elif item_geoid in self.region_ids:
model_class = Region
elif item_geoid in self.city_ids:
model_class = City
else:
return
if item_geoid not in self.translation_data[model_class]:
self.translation_data[model_class][item_geoid] = {}
if item_lang not in self.translation_data[model_class][item_geoid]:
self.translation_data[model_class][item_geoid][item_lang] = []
self.translation_data[model_class][item_geoid][item_lang].append(
item_name)
def translation_import(self):
data = getattr(self, 'translation_data', None)
if not data:
return
max = 0
for model_class, model_class_data in data.items():
max += len(model_class_data.keys())
i = 0
self.progress_start(max)
for model_class, model_class_data in data.items():
for geoname_id, geoname_data in model_class_data.items():
try:
model = model_class.objects.get(geoname_id=geoname_id)
except model_class.DoesNotExist:
continue
save = False
alternate_names = set()
for lang, names in geoname_data.items():
if lang == 'post':
# we might want to save the postal codes somewhere
# here's where it will all start ...
continue
for name in names:
if name == model.name:
continue
alternate_names.add(name)
alternate_names = ';'.join(sorted(alternate_names))
if model.alternate_names != alternate_names:
model.alternate_names = alternate_names
save = True
if save:
model.save(force_update=True)
i += 1
self.progress_update(i)
self.progress_finish()
def save(self, model, force_insert=False, force_update=False):
try:
with transaction.atomic():
self.logger.debug('Saving %s' % model.name)
model.save(
force_insert=force_insert,
force_update=force_update
)
except IntegrityError as e:
# Regarding %r see the https://code.djangoproject.com/ticket/20572
# Also related to http://bugs.python.org/issue2517
self.logger.warning('Saving %s failed: %r' % (model, e))
|
|
"""
This file contains the `Board` class, which implements the rules for the
game Isolation as described in lecture, modified so that the players move
like knights in chess rather than queens.
You MAY use and modify this class, however ALL function signatures must
remain compatible with the defaults provided, and none of your changes will
be available to project reviewers.
"""
import random
import timeit
from copy import copy
TIME_LIMIT_MILLIS = 150
class Board(object):
"""Implement a model for the game Isolation assuming each player moves like
a knight in chess.
Parameters
----------
player_1 : object
An object with a get_move() function. This is the only function
directly called by the Board class for each player.
player_2 : object
An object with a get_move() function. This is the only function
directly called by the Board class for each player.
width : int (optional)
The number of columns that the board should have.
height : int (optional)
The number of rows that the board should have.
"""
BLANK = 0
NOT_MOVED = None
def __init__(self, player_1, player_2, width=7, height=7):
self.width = width
self.height = height
self.move_count = 0
self._player_1 = player_1
self._player_2 = player_2
self._active_player = player_1
self._inactive_player = player_2
# The last 3 entries of the board state includes initiative (0 for
# player 1, 1 for player 2) player 2 last move, and player 1 last move
self._board_state = [Board.BLANK] * (width * height + 3)
self._board_state[-1] = Board.NOT_MOVED
self._board_state[-2] = Board.NOT_MOVED
def hash(self):
return str(self._board_state).__hash__()
@property
def active_player(self):
"""The object registered as the player holding initiative in the
current game state.
"""
return self._active_player
@property
def inactive_player(self):
"""The object registered as the player in waiting for the current
game state.
"""
return self._inactive_player
def get_opponent(self, player):
"""Return the opponent of the supplied player.
Parameters
----------
player : object
An object registered as a player in the current game. Raises an
error if the supplied object is not registered as a player in
this game.
Returns
-------
object
The opponent of the input player object.
"""
if player == self._active_player:
return self._inactive_player
elif player == self._inactive_player:
return self._active_player
raise RuntimeError("`player` must be an object registered as a player in the current game.")
def copy(self):
""" Return a deep copy of the current board. """
new_board = Board(self._player_1, self._player_2, width=self.width, height=self.height)
new_board.move_count = self.move_count
new_board._active_player = self._active_player
new_board._inactive_player = self._inactive_player
new_board._board_state = copy(self._board_state)
return new_board
def forecast_move(self, move):
"""Return a deep copy of the current game with an input move applied to
advance the game one ply.
Parameters
----------
move : (int, int)
A coordinate pair (row, column) indicating the next position for
the active player on the board.
Returns
-------
isolation.Board
A deep copy of the board with the input move applied.
"""
new_board = self.copy()
new_board.apply_move(move)
return new_board
def move_is_legal(self, move):
"""Test whether a move is legal in the current game state.
Parameters
----------
move : (int, int)
A coordinate pair (row, column) indicating the next position for
the active player on the board.
Returns
-------
bool
Returns True if the move is legal, False otherwise
"""
idx = move[0] + move[1] * self.height
return (0 <= move[0] < self.height and 0 <= move[1] < self.width and
self._board_state[idx] == Board.BLANK)
def get_blank_spaces(self):
"""Return a list of the locations that are still available on the board.
"""
return [(i, j) for j in range(self.width) for i in range(self.height)
if self._board_state[i + j * self.height] == Board.BLANK]
def get_player_location(self, player):
"""Find the current location of the specified player on the board.
Parameters
----------
player : object
An object registered as a player in the current game.
Returns
-------
(int, int) or None
The coordinate pair (row, column) of the input player, or None
if the player has not moved.
"""
if player == self._player_1:
if self._board_state[-1] == Board.NOT_MOVED:
return Board.NOT_MOVED
idx = self._board_state[-1]
elif player == self._player_2:
if self._board_state[-2] == Board.NOT_MOVED:
return Board.NOT_MOVED
idx = self._board_state[-2]
else:
raise RuntimeError(
"Invalid player in get_player_location: {}".format(player))
w = idx // self.height
h = idx % self.height
return (h, w)
def get_legal_moves(self, player=None):
"""Return the list of all legal moves for the specified player.
Parameters
----------
player : object (optional)
An object registered as a player in the current game. If None,
return the legal moves for the active player on the board.
Returns
-------
list<(int, int)>
The list of coordinate pairs (row, column) of all legal moves
for the player constrained by the current game state.
"""
if player is None:
player = self.active_player
return self.__get_moves(self.get_player_location(player))
def apply_move(self, move):
"""Move the active player to a specified location.
Parameters
----------
move : (int, int)
A coordinate pair (row, column) indicating the next position for
the active player on the board.
"""
idx = move[0] + move[1] * self.height
last_move_idx = int(self.active_player == self._player_2) + 1
self._board_state[-last_move_idx] = idx
self._board_state[idx] = 1
self._board_state[-3] ^= 1
self._active_player, self._inactive_player = self._inactive_player, self._active_player
self.move_count += 1
def is_winner(self, player):
""" Test whether the specified player has won the game. """
return player == self._inactive_player and not self.get_legal_moves(self._active_player)
def is_loser(self, player):
""" Test whether the specified player has lost the game. """
return player == self._active_player and not self.get_legal_moves(self._active_player)
def utility(self, player):
"""Returns the utility of the current game state from the perspective
of the specified player.
/ +infinity, "player" wins
utility = | -infinity, "player" loses
\ 0, otherwise
Parameters
----------
player : object (optional)
An object registered as a player in the current game. If None,
return the utility for the active player on the board.
Returns
----------
float
The utility value of the current game state for the specified
player. The game has a utility of +inf if the player has won,
a value of -inf if the player has lost, and a value of 0
otherwise.
"""
if not self.get_legal_moves(self._active_player):
if player == self._inactive_player:
return float("inf")
if player == self._active_player:
return float("-inf")
return 0.
def __get_moves(self, loc):
"""Generate the list of possible moves for an L-shaped motion (like a
knight in chess).
"""
if loc == Board.NOT_MOVED:
return self.get_blank_spaces()
r, c = loc
directions = [(-2, -1), (-2, 1), (-1, -2), (-1, 2),
(1, -2), (1, 2), (2, -1), (2, 1)]
valid_moves = [(r + dr, c + dc) for dr, dc in directions
if self.move_is_legal((r + dr, c + dc))]
random.shuffle(valid_moves)
return valid_moves
def print_board(self):
"""DEPRECATED - use Board.to_string()"""
return self.to_string()
def to_string(self, symbols=['1', '2']):
"""Generate a string representation of the current game state, marking
the location of each player and indicating which cells have been
blocked, and which remain open.
"""
p1_loc = self._board_state[-1]
p2_loc = self._board_state[-2]
col_margin = len(str(self.height - 1)) + 1
prefix = "{:<" + "{}".format(col_margin) + "}"
offset = " " * (col_margin + 3)
out = offset + ' '.join(map(str, range(self.width))) + '\n\r'
for i in range(self.height):
out += prefix.format(i) + ' | '
for j in range(self.width):
idx = i + j * self.height
if not self._board_state[idx]:
out += ' '
elif p1_loc == idx:
out += symbols[0]
elif p2_loc == idx:
out += symbols[1]
else:
out += '-'
out += ' | '
out += '\n\r'
return out
def play(self, time_limit=TIME_LIMIT_MILLIS):
"""Execute a match between the players by alternately soliciting them
to select a move and applying it in the game.
Parameters
----------
time_limit : numeric (optional)
The maximum number of milliseconds to allow before timeout
during each turn.
Returns
----------
(player, list<[(int, int),]>, str)
Return multiple including the winning player, the complete game
move history, and a string indicating the reason for losing
(e.g., timeout or invalid move).
"""
move_history = []
time_millis = lambda: 1000 * timeit.default_timer()
while True:
legal_player_moves = self.get_legal_moves()
game_copy = self.copy()
move_start = time_millis()
time_left = lambda : time_limit - (time_millis() - move_start)
curr_move = self._active_player.get_move(game_copy, time_left)
move_end = time_left()
if curr_move is None:
curr_move = Board.NOT_MOVED
if move_end < 0:
return self._inactive_player, move_history, "timeout"
if curr_move not in legal_player_moves:
if len(legal_player_moves) > 0:
return self._inactive_player, move_history, "forfeit"
return self._inactive_player, move_history, "illegal move"
move_history.append(list(curr_move))
self.apply_move(curr_move)
|
|
from __future__ import absolute_import, division
import logging
import os
import re
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import pyproteome as pyp
LOGGER = logging.getLogger('pyproteome.volcano')
MAX_VOLCANO_LABELS = 500
VOLCANO_TEXT_SIZE = 10
VOLCANO_LARGE_TEXT_SIZE = 20
def _remove_lesser_dups(labels, compress_sym=False):
if labels.shape[0] < 1:
return labels
labels['xy'] = labels.apply(lambda x: abs(x['x']) + x['y'], axis=1)
labels = labels.sort_values('xy', ascending=False)
if compress_sym:
labels = labels.drop_duplicates(subset='Label')
else:
labels = pd.concat([
labels[labels['x'] >= 0].drop_duplicates(subset='Label'),
labels[labels['x'] < 0].drop_duplicates(subset='Label'),
])
return labels
def plot_volcano_labels(
data,
ax,
upper_fold=None,
lower_fold=None,
p=None,
fold_and_p=True,
sequence_labels=False,
options=None,
show_duplicates=False,
compress_sym=True,
adjust=True,
mods=None,
):
'''
Plot labels on a volcano plot.
Parameters
----------
data : :class:`pyproteome.data_sets.DataSet`
ax : :class:`matplotlib.axes.Axes`
upper_fold : float, optional
lower_fold : float, optional
p : float, optional
fold_and_p : bool, optional
sequence_labels : bool, optional
options : dict, optional
show_duplicates : bool, optional
compress_sym : bool, optional
adjust : bool, optional
mods : str or list of str, optional
Returns
-------
labels : :class:`pandas.DataFrame`
'''
options = options or {}
highlight = options.get('highlight', {})
hide = options.get('hide', {})
show = options.get('show', {})
edgecolors = options.get('edgecolors', {})
rename = options.get('rename', {})
xminmax = ax.get_xlim()
yminmax = ax.get_ylim()
labels = data.psms.copy()
labels['x'] = labels['Fold Change']
labels['y'] = labels['p-value']
labels = labels[
(labels['x'] >= xminmax[0]) &
(labels['x'] <= xminmax[1]) &
(labels['y'] >= yminmax[0]) &
(labels['y'] <= yminmax[1])
]
if labels.shape[0] < 1:
return labels
if sequence_labels:
labels['Label'] = labels['Sequence'].apply(str)
elif not mods:
labels['Label'] = labels['Proteins'].apply(pyp.utils.get_name)
else:
labels['Label'] = labels.apply(
lambda x:
'{}{}{}'.format(
pyp.utils.get_name(x['Proteins']),
' ' if len(x['Modifications'].get_mods(mods)) > 0 else '',
' / '.join([
x['Modifications'].get_mods(
mods,
).__str__(prot_index=index)
for index, gene in enumerate(x['Proteins'].genes)
]) if len(x['Modifications'].get_mods(mods)) > 0 else '',
),
# if len(list(x['Modifications'].get_mods(mods))) > 0 else
# ' / '.join(x['Proteins'].genes),
axis=1,
)
def _get_names(row):
names = [
rename.get(row['Label'], row['Label']),
row['Label'],
]
if 'Sequence' in row:
names += [str(row['Sequence'])]
if 'Proteins' in row:
names += [
j
for i in row['Proteins'].genes
for j in [i, rename.get(i, i)]
]
return names
labels['Names'] = labels.apply(_get_names, axis=1)
labels = labels[
labels['Names'].apply(lambda x: not any([i in hide for i in x]))
]
labels = labels[
labels['Names'].apply(lambda x: any([i in show for i in x])) | (
(
(labels['y'] >= p) &
(
(labels['x'] >= upper_fold) |
(labels['x'] <= lower_fold)
)
)
if fold_and_p else
(
(labels['y'] >= p) |
(
(labels['x'] >= upper_fold) |
(labels['x'] <= lower_fold)
)
)
)
]
labels['Highlight'] = labels['Names'].apply(
lambda x:
any([
i in highlight
for i in x
])
)
labels['Label'] = labels['Names'].apply(
lambda x:
x[0]
)
def _get_txt_color(row):
colors = [
edgecolors.get(i)
for i in row['Names'][:2]
if i in edgecolors
]
edgecolor = colors[0] if colors else None
if edgecolor is None:
gene_colors = [
edgecolors.get(gene, None)
for gene in row['Proteins'].genes
]
if len(set(gene_colors)) == 1:
edgecolor = gene_colors[0]
return edgecolor
labels['EdgeColor'] = labels.apply(
_get_txt_color,
axis=1,
) if labels.shape[0] > 0 else []
labels = labels[
[
'x',
'y',
'Label',
'EdgeColor',
'Highlight',
]
].copy()
if not show_duplicates:
labels = _remove_lesser_dups(labels, compress_sym=compress_sym)
# Position the labels
txt_lim = 100 if mods else 12
texts = [
ax.text(
x=row['x'],
y=row['y'],
s=row['Label'][:txt_lim] + (
'...' if len(row['Label']) > txt_lim else ''
),
zorder=10,
fontsize=(
VOLCANO_LARGE_TEXT_SIZE
if row['Highlight'] else
VOLCANO_TEXT_SIZE
),
horizontalalignment=(
'left' if row['x'] > 0 else 'right'
),
bbox=dict(
alpha=1,
linewidth=0.1,
pad=.2,
facecolor=row['EdgeColor'] or (
'#DDDDDD'
if edgecolors else
('#BFEE90' if row['x'] > 0 else '#FFC1C1')
),
zorder=1,
# edgecolor='black',
boxstyle='round',
),
)
for _, row in labels.iterrows()
]
LOGGER.info('Plotting volcano labels for {} peptides from {} points'.format(len(texts), data.shape[0]))
if adjust and texts:
texts = texts[:MAX_VOLCANO_LABELS]
pyp.utils.adjust_text(
texts=texts,
ax=ax,
lim=100,
force_text=(.05, .3),
force_points=.01,
arrowprops=dict(
arrowstyle='->',
relpos=(0, 0),
lw=1,
zorder=1,
color='k',
),
only_move={
'points': 'y',
'text': 'xy',
}
)
return labels
def plot_volcano(
data,
group_a=None,
group_b=None,
p=0.05,
fold=1.25,
xminmax=None,
yminmax=None,
title=None,
ax=None,
show_xlabel=True,
show_ylabel=True,
log2_fold=True,
log10_p=True,
bonferoni=False,
**kwargs
):
'''
Display a volcano plot of data.
This plot inclues the fold-changes and p-values associated with said
changes.
Parameters
----------
data : :class:`pyproteome.data_sets.DataSet`
group_a : str or list of str, optional
group_b : str or list of str, optional
p : float, optional
fold : float, optional
xminmax : tuple of (float, float), optional
yminmax : tuple of (float, float), optional
title : str, optional
ax : :class:`matplotlib.axes.Axes`
show_xlabel : bool, optional
show_ylabel : bool, optional
log2_fold : bool, optional
log10_p : bool, optional
bonferoni : bool, optional
kwargs : dict
Arguments passed to :func:`.plot_volcano_labels`
Returns
-------
f : :class:`matplotlib.figure.Figure`
ax : :class:`matplotlib.axes.Axes`
'''
data = data.copy()
(channels_a, channels_b), (label_a, label_b), _ = data.get_groups(
group_a=group_a,
group_b=group_b,
)
if group_a and group_b:
data.update_group_changes(group_a=group_a, group_b=group_b)
if log10_p:
p = -np.log10(p)
data.psms['p-value'] = data.psms['p-value'].apply(
lambda x: -np.log10(x)
)
if log2_fold:
fold = np.log2(fold)
data.psms['Fold Change'] = data.psms['Fold Change'].apply(
lambda x: np.log2(x)
)
upper_fold = fold
lower_fold = -upper_fold
with pd.option_context('mode.use_inf_as_null', True):
data.psms = data.psms.dropna(
subset=['p-value', 'Fold Change'],
how='any',
)
if yminmax:
data.psms = data[
(data['p-value'] <= yminmax[1]) &
(data['p-value'] >= yminmax[0])
]
if xminmax:
data.psms = data[
(data['Fold Change'] <= xminmax[1]) &
(data['Fold Change'] >= xminmax[0])
]
if bonferoni:
p += np.log10(data.shape[0])
# Draw the figure
if ax is None:
_, ax = plt.subplots(figsize=(6, 6))
ax.scatter(
data['Fold Change'],
data['p-value'],
s=5,
c='grey',
)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
if not np.isnan(p):
ax.axhline(
p,
color='r', linestyle='dashed', linewidth=0.5,
)
if (abs(fold) if log2_fold else abs(fold - 1)) > 0.01:
ax.axvline(
upper_fold,
color='r',
linestyle='dashed',
linewidth=0.5,
)
ax.axvline(
lower_fold,
color='r',
linestyle='dashed',
linewidth=0.5,
)
if xminmax:
ax.set_xlim(left=xminmax[0], right=xminmax[1])
else:
ax.set_xlim(
left=np.floor(min(data['Fold Change'] + [0]) * 2) / 2,
right=np.ceil(max(data['Fold Change'] + [0]) * 2) / 2,
)
if yminmax:
ax.set_ylim(bottom=yminmax[0], top=yminmax[1])
else:
ax.set_ylim(bottom=-0.1)
ax.set_xticks(
list(
sorted(
tick
for tick in tuple(ax.get_xticks()) + (lower_fold, upper_fold)
if (
tick in [lower_fold, upper_fold] or
tick < lower_fold - .25 or
tick > upper_fold + .25
) and (tick <= ax.get_xlim()[1] and tick >= ax.get_xlim()[0])
)
)
)
ax.set_yticks(
list(
sorted(
[
tick
for tick in tuple(ax.get_yticks()) + (p,)
if '{}'.format(np.power(1/10, tick)).strip('0.')[:1] in
['1', '5'] and
tick >= p and
(tick <= ax.get_ylim()[1] and tick >= ax.get_ylim()[0])
]
)
)
)
ax.set_xticklabels(
[
'{:.3}'.format(np.exp2(i) if log2_fold else i)
for i in ax.get_xticks()
],
)
ax.set_yticklabels(
[
(
'{:.3}' if i > 5e-3 else '{:.0e}'
).format(np.power(1/10, i) if log10_p else i)
for i in ax.get_yticks()
],
)
if show_xlabel:
max_len = 25
ax.set_xlabel(
'{} (n={}) / {} (n={})'.format(
label_a[:max_len] + ('...' if len(label_a) > max_len else ''),
len(channels_a),
label_b[:max_len] + ('...' if len(label_b) > max_len else ''),
len(channels_b),
),
)
if show_ylabel:
ax.set_ylabel(
'p-value',
)
if title:
ax.set_title(
title,
)
plot_volcano_labels(
data=data,
ax=ax,
upper_fold=upper_fold,
lower_fold=lower_fold,
p=p,
**kwargs
)
fig = ax.get_figure()
return fig, ax
def plot_volcano_filtered(data, f, **kwargs):
'''
Display a volcano plot, showing only peptides that are included by a given
filter.
Parameters
----------
data : :class:`pyproteome.data_sets.DataSet`
f : dict or list of dict
Filters passed to :func:`pyproteome.data_sets.DataSet.filter`.
kwargs : dict
Extra arguments that are passed directly to :func:`.plot_volcano`.
Returns
-------
f : :class:`matplotlib.figure.Figure`
ax : :class:`matplotlib.axes.Axes`
'''
data = data.copy()
data.update_group_changes(
group_a=kwargs.get('group_a', None),
group_b=kwargs.get('group_b', None),
)
changes = []
pvals = []
for _, row in data.psms.iterrows():
row_pval = -np.log10(row['p-value'])
row_change = np.log2(row['Fold Change'])
if (
np.isnan(row_pval) or
np.isnan(row_change) or
np.isinf(row_pval) or
np.isinf(row_change)
):
continue
pvals.append(row_pval)
changes.append(row_change)
d = data.filter(f)
xminmax = kwargs.pop(
'xminmax',
(
np.floor(min(changes + [0]) * 2) / 2,
np.ceil(max(changes + [0]) * 2) / 2,
),
)
yminmax = kwargs.pop(
'yminmax',
(-0.1, np.ceil(max(pvals + [1]))),
)
f, ax = plot_volcano(
d,
xminmax=xminmax,
yminmax=yminmax,
**kwargs
)
if changes and pvals:
changes, pvals = zip(*[
(x, y)
for x, y in zip(changes, pvals)
if x > xminmax[0] and x < xminmax[1] and
y > yminmax[0] and y < yminmax[1]
])
ax.scatter(
changes,
pvals,
s=5,
zorder=0,
c='grey',
# c='lightblue',
# alpha=0.3,
)
return f, ax
|
|
#
# This file is part of the PyMeasure package.
#
# Copyright (c) 2013-2017 PyMeasure Developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import logging
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
import os
import pyqtgraph as pg
from .Qt import QtCore, QtGui
from .widgets import PlotWidget, BrowserWidget, InputsWidget, LogWidget, ResultsDialog
from .curves import ResultsCurve
from .browser import BrowserItem
from .manager import Manager, Experiment
from .log import LogHandler
from ..experiment.results import Results
class PlotterWindow(QtGui.QMainWindow):
def __init__(self, plotter, refresh_time=0.1, parent=None):
super(PlotterWindow, self).__init__(parent)
self.plotter = plotter
columns = plotter.results.procedure.DATA_COLUMNS
self.setWindowTitle('Results Plotter')
self.main = QtGui.QWidget(self)
vbox = QtGui.QVBoxLayout(self.main)
vbox.setSpacing(0)
hbox1 = QtGui.QHBoxLayout()
hbox1.setSpacing(6)
hbox1.setContentsMargins(-1, 6, -1, -1)
file_label = QtGui.QLabel(self.main)
file_label.setText('Data Filename:')
self.file = QtGui.QLineEdit(self.main)
self.file.setText(plotter.results.data_filename)
hbox1.addWidget(file_label)
hbox1.addWidget(self.file)
vbox.addLayout(hbox1)
self.plot_widget = PlotWidget(columns, check_status=False)
self.plot = self.plot_widget.plot
vbox.addWidget(self.plot_widget)
self.main.setLayout(vbox)
self.setCentralWidget(self.main)
self.main.show()
self.resize(800, 600)
self.curve = ResultsCurve(plotter.results, columns[0], columns[1],
pen=pg.mkPen(color=pg.intColor(0), width=2), antialias=False)
self.plot.addItem(self.curve)
self.plot_widget.updated.connect(self.check_stop)
def quit(self, evt=None):
log.info("Quitting the Plotter")
self.close()
self.plotter.stop()
def check_stop(self):
""" Checks if the Plotter should stop and exits the Qt main loop if so
"""
if self.plotter.should_stop():
QtCore.QCoreApplication.instance().quit()
class ManagedWindow(QtGui.QMainWindow):
""" The ManagedWindow uses a Manager to control Workers in a Queue,
and provides a simple interface. The queue method must be overwritten
by a child class which is required to pass an Experiment containing the
Results and Procedure to self.manager.queue.
"""
EDITOR = 'gedit'
def __init__(self, procedure_class, inputs=[], displays=[], x_axis=None, y_axis=None, log_channel='', log_level=logging.INFO, parent=None):
super(ManagedWindow, self).__init__(parent=parent)
app = QtCore.QCoreApplication.instance()
app.aboutToQuit.connect(self.quit)
self.procedure_class = procedure_class
self.inputs = inputs
self.displays = displays
self.log = logging.getLogger(log_channel)
self.log_level = log_level
log.setLevel(log_level)
self.log.setLevel(log_level)
self.x_axis, self.y_axis = x_axis, y_axis
self._setup_ui()
self._layout()
def _setup_ui(self):
self.log_widget = LogWidget()
self.log.addHandler(self.log_widget.handler) # needs to be in Qt context?
log.info("ManagedWindow connected to logging")
self.queue_button = QtGui.QPushButton('Queue', self)
self.queue_button.clicked.connect(self.queue)
self.abort_button = QtGui.QPushButton('Abort', self)
self.abort_button.setEnabled(False)
self.abort_button.clicked.connect(self.abort)
self.plot_widget = PlotWidget(self.procedure_class.DATA_COLUMNS, self.x_axis, self.y_axis)
self.plot = self.plot_widget.plot
self.browser_widget = BrowserWidget(
self.procedure_class,
self.displays,
[self.x_axis, self.y_axis],
parent=self
)
self.browser_widget.show_button.clicked.connect(self.show_experiments)
self.browser_widget.hide_button.clicked.connect(self.hide_experiments)
self.browser_widget.clear_button.clicked.connect(self.clear_experiments)
self.browser_widget.open_button.clicked.connect(self.open_experiment)
self.browser = self.browser_widget.browser
self.browser.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.browser.customContextMenuRequested.connect(self.browser_item_menu)
self.browser.itemChanged.connect(self.browser_item_changed)
self.inputs = InputsWidget(
self.procedure_class,
self.inputs,
parent=self
)
self.manager = Manager(self.plot, self.browser, log_level=self.log_level, parent=self)
self.manager.abort_returned.connect(self.abort_returned)
self.manager.queued.connect(self.queued)
self.manager.running.connect(self.running)
self.manager.finished.connect(self.finished)
self.manager.log.connect(self.log.handle)
def _layout(self):
self.main = QtGui.QWidget(self)
inputs_dock = QtGui.QWidget(self)
inputs_vbox = QtGui.QVBoxLayout(self.main)
hbox = QtGui.QHBoxLayout()
hbox.setSpacing(10)
hbox.setContentsMargins(-1, 6, -1, 6)
hbox.addWidget(self.queue_button)
hbox.addWidget(self.abort_button)
hbox.addStretch()
inputs_vbox.addWidget(self.inputs)
inputs_vbox.addLayout(hbox)
inputs_vbox.addStretch()
inputs_dock.setLayout(inputs_vbox)
dock = QtGui.QDockWidget('Input Parameters')
dock.setWidget(inputs_dock)
dock.setFeatures(QtGui.QDockWidget.NoDockWidgetFeatures)
self.addDockWidget(QtCore.Qt.LeftDockWidgetArea, dock)
tabs = QtGui.QTabWidget(self.main)
tabs.addTab(self.plot_widget, "Results Graph")
tabs.addTab(self.log_widget, "Experiment Log")
splitter = QtGui.QSplitter(QtCore.Qt.Vertical)
splitter.addWidget(tabs)
splitter.addWidget(self.browser_widget)
self.plot_widget.setMinimumSize(100, 200)
vbox = QtGui.QVBoxLayout(self.main)
vbox.setSpacing(0)
vbox.addWidget(splitter)
self.main.setLayout(vbox)
self.setCentralWidget(self.main)
self.main.show()
self.resize(1000, 800)
def quit(self, evt=None):
self.close()
def browser_item_changed(self, item, column):
if column == 0:
state = item.checkState(0)
experiment = self.manager.experiments.with_browser_item(item)
if state == 0:
self.plot.removeItem(experiment.curve)
else:
experiment.curve.x = self.plot_widget.plot_frame.x_axis
experiment.curve.y = self.plot_widget.plot_frame.y_axis
experiment.curve.update()
self.plot.addItem(experiment.curve)
def browser_item_menu(self, position):
item = self.browser.itemAt(position)
if item is not None:
experiment = self.manager.experiments.with_browser_item(item)
menu = QtGui.QMenu(self)
# Open
action_open = QtGui.QAction(menu)
action_open.setText("Open Data Externally")
action_open.triggered.connect(
lambda: self.open_file_externally(experiment.results.data_filename))
menu.addAction(action_open)
# Change Color
action_change_color = QtGui.QAction(menu)
action_change_color.setText("Change Color")
action_change_color.triggered.connect(
lambda: self.change_color(experiment))
menu.addAction(action_change_color)
# Remove
action_remove = QtGui.QAction(menu)
action_remove.setText("Remove Graph")
if self.manager.is_running():
if self.manager.running_experiment() == experiment: # Experiment running
action_remove.setEnabled(False)
action_remove.triggered.connect(lambda: self.remove_experiment(experiment))
menu.addAction(action_remove)
# Use parameters
action_use = QtGui.QAction(menu)
action_use.setText("Use These Parameters")
action_use.triggered.connect(
lambda: self.set_parameters(experiment.procedure.parameter_objects()))
menu.addAction(action_use)
menu.exec_(self.browser.viewport().mapToGlobal(position))
def remove_experiment(self, experiment):
reply = QtGui.QMessageBox.question(self, 'Remove Graph',
"Are you sure you want to remove the graph?", QtGui.QMessageBox.Yes |
QtGui.QMessageBox.No, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
self.manager.remove(experiment)
def show_experiments(self):
root = self.browser.invisibleRootItem()
for i in range(root.childCount()):
item = root.child(i)
item.setCheckState(0, QtCore.Qt.Checked)
def hide_experiments(self):
root = self.browser.invisibleRootItem()
for i in range(root.childCount()):
item = root.child(i)
item.setCheckState(0, QtCore.Qt.Unchecked)
def clear_experiments(self):
self.manager.clear()
def open_experiment(self):
dialog = ResultsDialog(self.procedure_class.DATA_COLUMNS, self.x_axis, self.y_axis)
if dialog.exec_():
filenames = dialog.selectedFiles()
for filename in map(str,filenames):
if filename in self.manager.experiments:
QtGui.QMessageBox.warning(self, "Load Error",
"The file %s cannot be opened twice." % os.path.basename(filename))
elif filename == '':
return
else:
results = Results.load(filename)
experiment = self.new_experiment(results)
experiment.curve.update()
experiment.browser_item.progressbar.setValue(100.)
self.manager.load(experiment)
log.info('Opened data file %s' % filename)
def change_color(self, experiment):
color = QtGui.QColorDialog.getColor(
initial=experiment.curve.opts['pen'].color(), parent=self)
if color.isValid():
pixelmap = QtGui.QPixmap(24, 24)
pixelmap.fill(color)
experiment.browser_item.setIcon(0, QtGui.QIcon(pixelmap))
experiment.curve.setPen(pg.mkPen(color=color, width=2))
def open_file_externally(self, filename):
# TODO: Make this function OS-agnostic
import subprocess
proc = subprocess.Popen([self.EDITOR, filename])
def make_procedure(self):
if not isinstance(self.inputs, InputsWidget):
raise Exception("ManagedWindow can not make a Procedure"
" without a InputsWidget type")
return self.inputs.get_procedure()
def new_curve(self, results, color=None, **kwargs):
if color is None:
color = pg.intColor(self.browser.topLevelItemCount() % 8)
return self.plot_widget.new_curve(results, color=color, **kwargs)
def new_experiment(self, results, curve=None):
if curve is None:
curve = self.new_curve(results)
browser_item = BrowserItem(results, curve)
return Experiment(results, curve, browser_item)
def set_parameters(self, parameters):
""" This method should be overwritten by the child class. The
parameters argument is a dictionary of Parameter objects.
The Parameters should overwrite the GUI values so that a user
can click "Queue" to capture the same parameters.
"""
if not isinstance(self.inputs, InputsWidget):
raise Exception("ManagedWindow can not set parameters"
" without a InputsWidget")
self.inputs.set_parameters(parameters)
def queue(self):
""" This method should be overwritten by the child class. The
self.manager.queue method should be passed an Experiment object
which contains the Results and Procedure to be run.
"""
raise Exception("ManagedWindow child class does not implement queue method")
def abort(self):
self.abort_button.setEnabled(False)
self.abort_button.setText("Resume")
self.abort_button.clicked.disconnect()
self.abort_button.clicked.connect(self.resume)
try:
self.manager.abort()
except:
log.error('Failed to abort experiment', exc_info=True)
self.abort_button.setText("Abort")
self.abort_button.clicked.disconnect()
self.abort_button.clicked.connect(self.abort)
def resume(self):
self.abort_button.setText("Abort")
self.abort_button.clicked.disconnect()
self.abort_button.clicked.connect(self.abort)
if self.manager.experiments.has_next():
self.manager.resume()
else:
self.abort_button.setEnabled(False)
def queued(self, experiment):
self.abort_button.setEnabled(True)
self.browser_widget.show_button.setEnabled(True)
self.browser_widget.hide_button.setEnabled(True)
self.browser_widget.clear_button.setEnabled(True)
def running(self, experiment):
self.browser_widget.clear_button.setEnabled(False)
def abort_returned(self, experiment):
if self.manager.experiments.has_next():
self.abort_button.setText("Resume")
self.abort_button.setEnabled(True)
else:
self.browser_widget.clear_button.setEnabled(True)
def finished(self, experiment):
if not self.manager.experiments.has_next():
self.abort_button.setEnabled(False)
self.browser_widget.clear_button.setEnabled(True)
|
|
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import optparse
import sys
import tempfile
import unittest
from webkitpy.common.system.executive import Executive, ScriptError
from webkitpy.common.system import executive_mock
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.system.path import abspath_to_uri
from webkitpy.tool.mocktool import MockOptions
from webkitpy.common.system.executive_mock import MockExecutive, MockExecutive2
from webkitpy.common.system.systemhost_mock import MockSystemHost
from webkitpy.layout_tests.port import Port, Driver, DriverOutput
from webkitpy.layout_tests.port.base import VirtualTestSuite
from webkitpy.layout_tests.port.test import add_unit_tests_to_mock_filesystem, TestPort
class PortTest(unittest.TestCase):
def make_port(self, executive=None, with_tests=False, port_name=None, **kwargs):
host = MockSystemHost()
if executive:
host.executive = executive
if with_tests:
add_unit_tests_to_mock_filesystem(host.filesystem)
return TestPort(host, **kwargs)
return Port(host, port_name or 'baseport', **kwargs)
def test_default_child_processes(self):
port = self.make_port()
self.assertIsNotNone(port.default_child_processes())
def test_format_wdiff_output_as_html(self):
output = "OUTPUT %s %s %s" % (Port._WDIFF_DEL, Port._WDIFF_ADD, Port._WDIFF_END)
html = self.make_port()._format_wdiff_output_as_html(output)
expected_html = "<head><style>.del { background: #faa; } .add { background: #afa; }</style></head><pre>OUTPUT <span class=del> <span class=add> </span></pre>"
self.assertEqual(html, expected_html)
def test_wdiff_command(self):
port = self.make_port()
port._path_to_wdiff = lambda: "/path/to/wdiff"
command = port._wdiff_command("/actual/path", "/expected/path")
expected_command = [
"/path/to/wdiff",
"--start-delete=##WDIFF_DEL##",
"--end-delete=##WDIFF_END##",
"--start-insert=##WDIFF_ADD##",
"--end-insert=##WDIFF_END##",
"/actual/path",
"/expected/path",
]
self.assertEqual(command, expected_command)
def _file_with_contents(self, contents, encoding="utf-8"):
new_file = tempfile.NamedTemporaryFile()
new_file.write(contents.encode(encoding))
new_file.flush()
return new_file
def test_pretty_patch_os_error(self):
port = self.make_port(executive=executive_mock.MockExecutive2(exception=OSError))
oc = OutputCapture()
oc.capture_output()
self.assertEqual(port.pretty_patch_text("patch.txt"),
port._pretty_patch_error_html)
# This tests repeated calls to make sure we cache the result.
self.assertEqual(port.pretty_patch_text("patch.txt"),
port._pretty_patch_error_html)
oc.restore_output()
def test_pretty_patch_script_error(self):
# FIXME: This is some ugly white-box test hacking ...
port = self.make_port(executive=executive_mock.MockExecutive2(exception=ScriptError))
port._pretty_patch_available = True
self.assertEqual(port.pretty_patch_text("patch.txt"),
port._pretty_patch_error_html)
# This tests repeated calls to make sure we cache the result.
self.assertEqual(port.pretty_patch_text("patch.txt"),
port._pretty_patch_error_html)
def test_wdiff_text(self):
port = self.make_port()
port.wdiff_available = lambda: True
port._run_wdiff = lambda a, b: 'PASS'
self.assertEqual('PASS', port.wdiff_text(None, None))
def test_diff_text(self):
port = self.make_port()
# Make sure that we don't run into decoding exceptions when the
# filenames are unicode, with regular or malformed input (expected or
# actual input is always raw bytes, not unicode).
port.diff_text('exp', 'act', 'exp.txt', 'act.txt')
port.diff_text('exp', 'act', u'exp.txt', 'act.txt')
port.diff_text('exp', 'act', u'a\xac\u1234\u20ac\U00008000', 'act.txt')
port.diff_text('exp' + chr(255), 'act', 'exp.txt', 'act.txt')
port.diff_text('exp' + chr(255), 'act', u'exp.txt', 'act.txt')
# Though expected and actual files should always be read in with no
# encoding (and be stored as str objects), test unicode inputs just to
# be safe.
port.diff_text(u'exp', 'act', 'exp.txt', 'act.txt')
port.diff_text(
u'a\xac\u1234\u20ac\U00008000', 'act', 'exp.txt', 'act.txt')
# And make sure we actually get diff output.
diff = port.diff_text('foo', 'bar', 'exp.txt', 'act.txt')
self.assertIn('foo', diff)
self.assertIn('bar', diff)
self.assertIn('exp.txt', diff)
self.assertIn('act.txt', diff)
self.assertNotIn('nosuchthing', diff)
# Test for missing newline at end of file diff output.
content_a = "Hello\n\nWorld"
content_b = "Hello\n\nWorld\n\n\n"
expected = "--- exp.txt\n+++ act.txt\n@@ -1,3 +1,5 @@\n Hello\n \n-World\n\ No newline at end of file\n+World\n+\n+\n"
self.assertEqual(expected, port.diff_text(content_a, content_b, 'exp.txt', 'act.txt'))
def test_setup_test_run(self):
port = self.make_port()
# This routine is a no-op. We just test it for coverage.
port.setup_test_run()
def test_test_dirs(self):
port = self.make_port()
port.host.filesystem.write_text_file(port.layout_tests_dir() + '/canvas/test', '')
port.host.filesystem.write_text_file(port.layout_tests_dir() + '/css2.1/test', '')
dirs = port.test_dirs()
self.assertIn('canvas', dirs)
self.assertIn('css2.1', dirs)
def test_skipped_perf_tests(self):
port = self.make_port()
def add_text_file(dirname, filename, content='some content'):
dirname = port.host.filesystem.join(port.perf_tests_dir(), dirname)
port.host.filesystem.maybe_make_directory(dirname)
port.host.filesystem.write_text_file(port.host.filesystem.join(dirname, filename), content)
add_text_file('inspector', 'test1.html')
add_text_file('inspector', 'unsupported_test1.html')
add_text_file('inspector', 'test2.html')
add_text_file('inspector/resources', 'resource_file.html')
add_text_file('unsupported', 'unsupported_test2.html')
add_text_file('', 'Skipped', '\n'.join(['Layout', '', 'SunSpider', 'Supported/some-test.html']))
self.assertEqual(port.skipped_perf_tests(), ['Layout', 'SunSpider', 'Supported/some-test.html'])
def test_get_option__set(self):
options, args = optparse.OptionParser().parse_args([])
options.foo = 'bar'
port = self.make_port(options=options)
self.assertEqual(port.get_option('foo'), 'bar')
def test_get_option__unset(self):
port = self.make_port()
self.assertIsNone(port.get_option('foo'))
def test_get_option__default(self):
port = self.make_port()
self.assertEqual(port.get_option('foo', 'bar'), 'bar')
def test_additional_platform_directory(self):
port = self.make_port(port_name='foo')
port.default_baseline_search_path = lambda: ['LayoutTests/platform/foo']
layout_test_dir = port.layout_tests_dir()
test_file = 'fast/test.html'
# No additional platform directory
self.assertEqual(
port.expected_baselines(test_file, '.txt'),
[(None, 'fast/test-expected.txt')])
self.assertEqual(port.baseline_path(), 'LayoutTests/platform/foo')
# Simple additional platform directory
port._options.additional_platform_directory = ['/tmp/local-baselines']
port._filesystem.write_text_file('/tmp/local-baselines/fast/test-expected.txt', 'foo')
self.assertEqual(
port.expected_baselines(test_file, '.txt'),
[('/tmp/local-baselines', 'fast/test-expected.txt')])
self.assertEqual(port.baseline_path(), '/tmp/local-baselines')
# Multiple additional platform directories
port._options.additional_platform_directory = ['/foo', '/tmp/local-baselines']
self.assertEqual(
port.expected_baselines(test_file, '.txt'),
[('/tmp/local-baselines', 'fast/test-expected.txt')])
self.assertEqual(port.baseline_path(), '/foo')
def test_nonexistant_expectations(self):
port = self.make_port(port_name='foo')
port.expectations_files = lambda: ['/mock-checkout/third_party/WebKit/LayoutTests/platform/exists/TestExpectations', '/mock-checkout/third_party/WebKit/LayoutTests/platform/nonexistant/TestExpectations']
port._filesystem.write_text_file('/mock-checkout/third_party/WebKit/LayoutTests/platform/exists/TestExpectations', '')
self.assertEqual('\n'.join(port.expectations_dict().keys()), '/mock-checkout/third_party/WebKit/LayoutTests/platform/exists/TestExpectations')
def test_additional_expectations(self):
port = self.make_port(port_name='foo')
port.port_name = 'foo'
port._filesystem.write_text_file('/mock-checkout/third_party/WebKit/LayoutTests/platform/foo/TestExpectations', '')
port._filesystem.write_text_file(
'/tmp/additional-expectations-1.txt', 'content1\n')
port._filesystem.write_text_file(
'/tmp/additional-expectations-2.txt', 'content2\n')
self.assertEqual('\n'.join(port.expectations_dict().values()), '')
port._options.additional_expectations = [
'/tmp/additional-expectations-1.txt']
self.assertEqual('\n'.join(port.expectations_dict().values()), 'content1\n')
port._options.additional_expectations = [
'/tmp/nonexistent-file', '/tmp/additional-expectations-1.txt']
self.assertEqual('\n'.join(port.expectations_dict().values()), 'content1\n')
port._options.additional_expectations = [
'/tmp/additional-expectations-1.txt', '/tmp/additional-expectations-2.txt']
self.assertEqual('\n'.join(port.expectations_dict().values()), 'content1\n\ncontent2\n')
def test_additional_env_var(self):
port = self.make_port(options=optparse.Values({'additional_env_var': ['FOO=BAR', 'BAR=FOO']}))
self.assertEqual(port.get_option('additional_env_var'), ['FOO=BAR', 'BAR=FOO'])
environment = port.setup_environ_for_server()
self.assertTrue(('FOO' in environment) & ('BAR' in environment))
self.assertEqual(environment['FOO'], 'BAR')
self.assertEqual(environment['BAR'], 'FOO')
def test_find_no_paths_specified(self):
port = self.make_port(with_tests=True)
layout_tests_dir = port.layout_tests_dir()
tests = port.tests([])
self.assertNotEqual(len(tests), 0)
def test_find_one_test(self):
port = self.make_port(with_tests=True)
tests = port.tests(['failures/expected/image.html'])
self.assertEqual(len(tests), 1)
def test_find_glob(self):
port = self.make_port(with_tests=True)
tests = port.tests(['failures/expected/im*'])
self.assertEqual(len(tests), 2)
def test_find_with_skipped_directories(self):
port = self.make_port(with_tests=True)
tests = port.tests(['userscripts'])
self.assertNotIn('userscripts/resources/iframe.html', tests)
def test_find_with_skipped_directories_2(self):
port = self.make_port(with_tests=True)
tests = port.tests(['userscripts/resources'])
self.assertEqual(tests, [])
def test_is_test_file(self):
filesystem = MockFileSystem()
self.assertTrue(Port.is_test_file(filesystem, '', 'foo.html'))
self.assertTrue(Port.is_test_file(filesystem, '', 'foo.svg'))
self.assertTrue(Port.is_test_file(filesystem, '', 'test-ref-test.html'))
self.assertFalse(Port.is_test_file(filesystem, '', 'foo.png'))
self.assertFalse(Port.is_test_file(filesystem, '', 'foo-expected.html'))
self.assertFalse(Port.is_test_file(filesystem, '', 'foo-expected.svg'))
self.assertFalse(Port.is_test_file(filesystem, '', 'foo-expected.xht'))
self.assertFalse(Port.is_test_file(filesystem, '', 'foo-expected-mismatch.html'))
self.assertFalse(Port.is_test_file(filesystem, '', 'foo-expected-mismatch.svg'))
self.assertFalse(Port.is_test_file(filesystem, '', 'foo-expected-mismatch.xhtml'))
self.assertFalse(Port.is_test_file(filesystem, '', 'foo-ref.html'))
self.assertFalse(Port.is_test_file(filesystem, '', 'foo-notref.html'))
self.assertFalse(Port.is_test_file(filesystem, '', 'foo-notref.xht'))
self.assertFalse(Port.is_test_file(filesystem, '', 'foo-ref.xhtml'))
self.assertFalse(Port.is_test_file(filesystem, '', 'ref-foo.html'))
self.assertFalse(Port.is_test_file(filesystem, '', 'notref-foo.xhr'))
def test_parse_reftest_list(self):
port = self.make_port(with_tests=True)
port.host.filesystem.files['bar/reftest.list'] = "\n".join(["== test.html test-ref.html",
"",
"# some comment",
"!= test-2.html test-notref.html # more comments",
"== test-3.html test-ref.html",
"== test-3.html test-ref2.html",
"!= test-3.html test-notref.html",
"fuzzy(80,500) == test-3 test-ref.html"])
# Note that we don't support the syntax in the last line; the code should ignore it, rather than crashing.
reftest_list = Port._parse_reftest_list(port.host.filesystem, 'bar')
self.assertEqual(reftest_list, {'bar/test.html': [('==', 'bar/test-ref.html')],
'bar/test-2.html': [('!=', 'bar/test-notref.html')],
'bar/test-3.html': [('==', 'bar/test-ref.html'), ('==', 'bar/test-ref2.html'), ('!=', 'bar/test-notref.html')]})
def test_reference_files(self):
port = self.make_port(with_tests=True)
self.assertEqual(port.reference_files('passes/svgreftest.svg'), [('==', port.layout_tests_dir() + '/passes/svgreftest-expected.svg')])
self.assertEqual(port.reference_files('passes/xhtreftest.svg'), [('==', port.layout_tests_dir() + '/passes/xhtreftest-expected.html')])
self.assertEqual(port.reference_files('passes/phpreftest.php'), [('!=', port.layout_tests_dir() + '/passes/phpreftest-expected-mismatch.svg')])
def test_operating_system(self):
self.assertEqual('mac', self.make_port().operating_system())
def test_http_server_supports_ipv6(self):
port = self.make_port()
self.assertTrue(port.http_server_supports_ipv6())
port.host.platform.os_name = 'cygwin'
self.assertFalse(port.http_server_supports_ipv6())
port.host.platform.os_name = 'win'
self.assertFalse(port.http_server_supports_ipv6())
def test_check_httpd_success(self):
port = self.make_port(executive=MockExecutive2())
port.path_to_apache = lambda: '/usr/sbin/httpd'
capture = OutputCapture()
capture.capture_output()
self.assertTrue(port.check_httpd())
_, _, logs = capture.restore_output()
self.assertEqual('', logs)
def test_httpd_returns_error_code(self):
port = self.make_port(executive=MockExecutive2(exit_code=1))
port.path_to_apache = lambda: '/usr/sbin/httpd'
capture = OutputCapture()
capture.capture_output()
self.assertFalse(port.check_httpd())
_, _, logs = capture.restore_output()
self.assertEqual('httpd seems broken. Cannot run http tests.\n', logs)
def test_test_exists(self):
port = self.make_port(with_tests=True)
self.assertTrue(port.test_exists('passes'))
self.assertTrue(port.test_exists('passes/text.html'))
self.assertFalse(port.test_exists('passes/does_not_exist.html'))
self.assertTrue(port.test_exists('virtual'))
self.assertFalse(port.test_exists('virtual/does_not_exist.html'))
self.assertTrue(port.test_exists('virtual/passes/text.html'))
def test_test_isfile(self):
port = self.make_port(with_tests=True)
self.assertFalse(port.test_isfile('passes'))
self.assertTrue(port.test_isfile('passes/text.html'))
self.assertFalse(port.test_isfile('passes/does_not_exist.html'))
self.assertFalse(port.test_isfile('virtual'))
self.assertTrue(port.test_isfile('virtual/passes/text.html'))
self.assertFalse(port.test_isfile('virtual/does_not_exist.html'))
def test_test_isdir(self):
port = self.make_port(with_tests=True)
self.assertTrue(port.test_isdir('passes'))
self.assertFalse(port.test_isdir('passes/text.html'))
self.assertFalse(port.test_isdir('passes/does_not_exist.html'))
self.assertFalse(port.test_isdir('passes/does_not_exist/'))
self.assertTrue(port.test_isdir('virtual'))
self.assertFalse(port.test_isdir('virtual/does_not_exist.html'))
self.assertFalse(port.test_isdir('virtual/does_not_exist/'))
self.assertFalse(port.test_isdir('virtual/passes/text.html'))
def test_tests(self):
port = self.make_port(with_tests=True)
tests = port.tests([])
self.assertIn('passes/text.html', tests)
self.assertIn('virtual/passes/text.html', tests)
tests = port.tests(['passes'])
self.assertIn('passes/text.html', tests)
self.assertIn('passes/passes/test-virtual-passes.html', tests)
self.assertNotIn('virtual/passes/text.html', tests)
tests = port.tests(['virtual/passes'])
self.assertNotIn('passes/text.html', tests)
self.assertIn('virtual/passes/test-virtual-passes.html', tests)
self.assertIn('virtual/passes/passes/test-virtual-passes.html', tests)
self.assertNotIn('virtual/passes/test-virtual-virtual/passes.html', tests)
self.assertNotIn('virtual/passes/virtual/passes/test-virtual-passes.html', tests)
def test_build_path(self):
port = self.make_port(options=optparse.Values({'build_directory': '/my-build-directory/'}))
self.assertEqual(port._build_path(), '/my-build-directory/Release')
def test_dont_require_http_server(self):
port = self.make_port()
self.assertEqual(port.requires_http_server(), False)
class NaturalCompareTest(unittest.TestCase):
def setUp(self):
self._port = TestPort(MockSystemHost())
def assert_cmp(self, x, y, result):
self.assertEqual(cmp(self._port._natural_sort_key(x), self._port._natural_sort_key(y)), result)
def test_natural_compare(self):
self.assert_cmp('a', 'a', 0)
self.assert_cmp('ab', 'a', 1)
self.assert_cmp('a', 'ab', -1)
self.assert_cmp('', '', 0)
self.assert_cmp('', 'ab', -1)
self.assert_cmp('1', '2', -1)
self.assert_cmp('2', '1', 1)
self.assert_cmp('1', '10', -1)
self.assert_cmp('2', '10', -1)
self.assert_cmp('foo_1.html', 'foo_2.html', -1)
self.assert_cmp('foo_1.1.html', 'foo_2.html', -1)
self.assert_cmp('foo_1.html', 'foo_10.html', -1)
self.assert_cmp('foo_2.html', 'foo_10.html', -1)
self.assert_cmp('foo_23.html', 'foo_10.html', 1)
self.assert_cmp('foo_23.html', 'foo_100.html', -1)
class KeyCompareTest(unittest.TestCase):
def setUp(self):
self._port = TestPort(MockSystemHost())
def assert_cmp(self, x, y, result):
self.assertEqual(cmp(self._port.test_key(x), self._port.test_key(y)), result)
def test_test_key(self):
self.assert_cmp('/a', '/a', 0)
self.assert_cmp('/a', '/b', -1)
self.assert_cmp('/a2', '/a10', -1)
self.assert_cmp('/a2/foo', '/a10/foo', -1)
self.assert_cmp('/a/foo11', '/a/foo2', 1)
self.assert_cmp('/ab', '/a/a/b', -1)
self.assert_cmp('/a/a/b', '/ab', 1)
self.assert_cmp('/foo-bar/baz', '/foo/baz', -1)
class VirtualTestSuiteTest(unittest.TestCase):
def test_basic(self):
suite = VirtualTestSuite('suite', 'base/foo', ['--args'])
self.assertEqual(suite.name, 'virtual/suite/base/foo')
self.assertEqual(suite.base, 'base/foo')
self.assertEqual(suite.args, ['--args'])
def test_no_slash(self):
suite = VirtualTestSuite('suite/bar', 'base/foo', ['--args'])
self.assertFalse(hasattr(suite, 'name'))
self.assertFalse(hasattr(suite, 'base'))
self.assertFalse(hasattr(suite, 'args'))
def test_legacy(self):
suite = VirtualTestSuite('suite/bar', 'base/foo', ['--args'], use_legacy_naming=True)
self.assertEqual(suite.name, 'virtual/suite/bar')
self.assertEqual(suite.base, 'base/foo')
self.assertEqual(suite.args, ['--args'])
|
|
# _ _ _____ _ _____ _ _ _
# | | | | | __ \ | | / ____| | | | | |
# | |__| | _____ _____ | |__) |__ __| | | | ___ _ __ | |_ _ __ ___ | | | ___ _ __
# | __ |/ _ \ \/ / _ \| ___/ _ \ / _` | | | / _ \| '_ \| __| '__/ _ \| | |/ _ \ '__|
# | | | | __/> < (_) | | | (_) | (_| | | |___| (_) | | | | |_| | | (_) | | | __/ |
# |_| |_|\___/_/\_\___/|_| \___/ \__,_| \_____\___/|_| |_|\__|_| \___/|_|_|\___|_|
# -----------------------------------------------------------------------------------------
# This program is designed to control a HexoPod built by the University of Abertay, Dundee
# This is the primary controller class for all the servos
# Source available at: https://github.com/megalan247/HexoPod/
# Written by Rory Shanks and Harry Swan
# University of Abertay, Dundee
# Licenced under CC BY-NC-SA 4.0 (HRS available at http://creativecommons.org/licenses/by-nc-sa/4.0/)
#-------------------------------------------------
# Imports and initilization
print "Please wait while the application is initialized..."
from utilities import LegClass as LegClass
from time import sleep
import threading
import sys
# import pygame
# import DanceCode
# debug=True
# pygame.init()
# Init Joysticks
# pygame.joystick.init()
# joystick = pygame.joystick.Joystick(0)
# joystick.init()
#-------------------------------------------------
# LEG A SET-UP
Legs = []
LegLimits = []
LegLimits.append({'servo1_high': 490, 'servo1_mid': 295, 'servo1_low': 100,
'servo2_high': 475, 'servo2_mid': 280, 'servo2_low': 85,
'servo3_high': 540, 'servo3_mid': 335, 'servo3_low': 155})
Legs.append(LegClass.leg(10,9,8, 0x41, LegLimits[0]))
#-------------------------------------------------
# LEG B SET-UP
LegLimits.append({'servo1_high': 503, 'servo1_mid': 308, 'servo1_low': 113,
'servo2_high': 480, 'servo2_mid': 285, 'servo2_low': 90,
'servo3_high': 530, 'servo3_mid': 325, 'servo3_low': 145})
Legs.append(LegClass.leg(6,7,11, 0x41, LegLimits[1]))
#-------------------------------------------------
# LEG C SET-UP
LegLimits.append({'servo1_high': 477, 'servo1_mid': 282, 'servo1_low': 87,
'servo2_high': 500, 'servo2_mid': 305, 'servo2_low': 110,
'servo3_high': 530, 'servo3_mid': 325, 'servo3_low': 145})
Legs.append(LegClass.leg(0,1,2, 0x41, LegLimits[2]))
#-------------------------------------------------
# LEG D SET-UP
LegLimits.append({'servo1_high': 504, 'servo1_mid': 309, 'servo1_low': 114,
'servo2_high': 590, 'servo2_mid': 295, 'servo2_low': 100,
'servo3_high': 530, 'servo3_mid': 325, 'servo3_low': 145})
Legs.append(LegClass.leg(12,13,14, 0x41, LegLimits[3]))
#-------------------------------------------------
# LEG E SET-UP
LegLimits.append({'servo1_high': 470, 'servo1_mid': 275, 'servo1_low': 80,
'servo2_high': 453, 'servo2_mid': 258, 'servo2_low': 63,
'servo3_high': 501, 'servo3_mid': 296, 'servo3_low': 116})
Legs.append(LegClass.leg(3,4,5, 0x41, LegLimits[4]))
#-------------------------------------------------
# LEG F SET-UP
LegLimits.append({'servo1_high': 487, 'servo1_mid': 292, 'servo1_low': 97,
'servo2_high': 483, 'servo2_mid': 288, 'servo2_low': 93,
'servo3_high': 513, 'servo3_mid': 308, 'servo3_low': 128})
Legs.append(LegClass.leg(1,2,3, 0x42, LegLimits[5]))
#-------------------------------------------------
# Define classes here
class MoveLeg(threading.Thread):
def __init__ (self, legNum, serv1, serv2, serv3,speedList=[100,100,100],waitForCompletion=False):
threading.Thread.__init__(self)
self.legNum = legNum
self.serv1 = serv1
self.serv2 = serv2
self.serv3 = serv3
self.speedList = speedList
self.waitForCompletion = waitForCompletion
def run(self):
serv1Mid = LegLimits[self.legNum]['servo1_mid'] + self.serv1
serv2Mid = LegLimits[self.legNum]['servo2_mid'] + self.serv2
serv3Mid = LegLimits[self.legNum]['servo3_mid'] + self.serv3
LegsPosition = {'servo1': serv1Mid, 'servo2': serv2Mid, 'servo3': serv3Mid}
Legs[self.legNum].moveTo(LegsPosition,self.speedList,self.waitForCompletion)
class walkLoop(threading.Thread):
def __init__():
while(True):
self.walk()
# TODO: Ensurethat cross thread variable addressing works
# TODO: If not, have a function to post data to the class so it can adjust strafepos and forwardpos
# TODO: Ensure that the leg locations work as intended and stuff
# TODO: Create class to update the global variable or post data to the walkloop class based on input from sixaxis.
def walk(forwardPos, strafePos, rotatePos):
timing = 0.1
walkspeed1 = 100
walkspeed2 = 100
walkspeed3 = 100
rightMargin = 0
leftMargin = 0
forwardMargin = 0
servo3Margin = strafePo
servo2Correction = servo3Margin
forwardAmount = int(-45 * forwardPos)
forwardAmountNeg = int(45 * forwardPos)
moveMatrx=([0, walkspeed1, 65 + servo2Correction, walkspeed2, -70 + servo3Margin, walkspeed3],
[0, walkspeed1, 180 - servo2Correction, walkspeed2, -90 - servo3Margin, walkspeed3],
[0, walkspeed1, 65 + servo2Correction, walkspeed2, -70 + servo3Margin, walkspeed3],
[0, walkspeed1, 180 - servo2Correction, walkspeed2, -90 - servo3Margin, walkspeed3],
[0, walkspeed1, 65 + servo2Correction, walkspeed2, -70 + servo3Margin, walkspeed3],
[0, walkspeed1, 180 - servo2Correction, walkspeed2, -90 - servo3Margin, walkspeed3])
moveAllLegs(moveMatrx)
sleep(timing)
# print "Movement 1"
moveMatrx=([forwardAmount, walkspeed1, 65 + servo2Correction, walkspeed2, -70 + servo3Margin, walkspeed3],
[forwardAmountNeg, walkspeed1, 180 - servo2Correction, walkspeed2, -90 - servo3Margin, walkspeed3],
[forwardAmount, walkspeed1, 65 + servo2Correction, walkspeed2, -70 + servo3Margin, walkspeed3],
[forwardAmountNeg, walkspeed1, 180 - servo2Correction, walkspeed2, -90 - servo3Margin, walkspeed3],
[forwardAmount, walkspeed1, 65 + servo2Correction, walkspeed2, -70 + servo3Margin, walkspeed3],
[forwardAmountNeg, walkspeed1, 180 - servo2Correction, walkspeed2, -90 - servo3Margin, walkspeed3])
moveAllLegs(moveMatrx)
sleep(timing)
# print "Movement 2"
moveMatrx=([forwardAmount, walkspeed1, 65 + servo2Correction, walkspeed2, -70 + servo3Margin, walkspeed3],
[forwardAmountNeg, walkspeed1, 65 - servo2Correction, walkspeed2, -70 - servo3Margin, walkspeed3],
[forwardAmount, walkspeed1, 65 + servo2Correction, walkspeed2, -70 + servo3Margin, walkspeed3],
[forwardAmountNeg, walkspeed1, 65 - servo2Correction, walkspeed2, -70 - servo3Margin, walkspeed3],
[forwardAmount, walkspeed1, 65 + servo2Correction, walkspeed2, -70 + servo3Margin, walkspeed3],
[forwardAmountNeg, walkspeed1, 65 - servo2Correction, walkspeed2, -70 - servo3Margin, walkspeed3])
moveAllLegs(moveMatrx)
sleep(timing)
servo2Correction = 0
servo3Margin = 0
# print "Movement 3"
moveMatrx=([forwardAmount, walkspeed1, 180 + servo2Correction, walkspeed2, -90 + servo3Margin, walkspeed3],
[forwardAmountNeg, walkspeed1, 65 - servo2Correction, walkspeed2, -70 - servo3Margin, walkspeed3],
[forwardAmount, walkspeed1, 180 + servo2Correction, walkspeed2, -90 + servo3Margin, walkspeed3],
[forwardAmountNeg, walkspeed1, 65 - servo2Correction, walkspeed2, -70 - servo3Margin, walkspeed3],
[forwardAmount, walkspeed1, 180 + servo2Correction, walkspeed2, -90 + servo3Margin, walkspeed3],
[forwardAmountNeg, walkspeed1, 65 - servo2Correction, walkspeed2, -70 - servo3Margin, walkspeed3])
moveAllLegs(moveMatrx)
sleep(timing)
# print "Movement 4"
moveMatrx=([forwardAmountNeg, walkspeed1, 180 + servo2Correction, walkspeed2, -90 + servo3Margin, walkspeed3],
[forwardAmount, walkspeed1, 65 - servo2Correction, walkspeed2, -70 - servo3Margin, walkspeed3],
[forwardAmountNeg, walkspeed1, 180 + servo2Correction, walkspeed2, -90 + servo3Margin, walkspeed3],
[forwardAmount, walkspeed1, 65 - servo2Correction, walkspeed2, -70 - servo3Margin, walkspeed3],
[forwardAmountNeg, walkspeed1, 180 + servo2Correction, walkspeed2, -90 + servo3Margin, walkspeed3],
[forwardAmount, walkspeed1, 65 - servo2Correction, walkspeed2, -70 - servo3Margin, walkspeed3])
moveAllLegs(moveMatrx)
sleep(timing)
# print "Movement 5"
moveMatrx=([forwardAmountNeg, walkspeed1, 65 + servo2Correction, walkspeed2, -70 + servo3Margin, walkspeed3],
[forwardAmount, walkspeed1, 65 - servo2Correction, walkspeed2, -70 - servo3Margin, walkspeed3],
[forwardAmountNeg, walkspeed1, 65 + servo2Correction, walkspeed2, -70 + servo3Margin, walkspeed3],
[forwardAmount, walkspeed1, 65 - servo2Correction, walkspeed2, -70 - servo3Margin, walkspeed3],
[forwardAmountNeg, walkspeed1, 65 + servo2Correction, walkspeed2, -70 + servo3Margin, walkspeed3],
[forwardAmount, walkspeed1, 65 - servo2Correction, walkspeed2, -70 - servo3Margin, walkspeed3])
moveAllLegs(moveMatrx)
sleep(timing)
# print "Movement 6"
moveMatrx=([forwardAmountNeg, walkspeed1, 65 + servo2Correction, walkspeed2, -70 + servo3Margin, walkspeed3],
[forwardAmount, walkspeed1, 180 - servo2Correction, walkspeed2, -70 - servo3Margin, walkspeed3],
[forwardAmountNeg, walkspeed1, 65 + servo2Correction, walkspeed2, -70 + servo3Margin, walkspeed3],
[forwardAmount, walkspeed1, 180 - servo2Correction, walkspeed2, -70 - servo3Margin, walkspeed3],
[forwardAmountNeg, walkspeed1, 65 + servo2Correction, walkspeed2, -70 + servo3Margin, walkspeed3],
[forwardAmount, walkspeed1, 180 - servo2Correction, walkspeed2, -70 - servo3Margin, walkspeed3])
moveAllLegs(moveMatrx)
sleep(timing)
#-------------------------------------------------
# Define functions and stuff here
def isButtonPressed(inputVal):
if (inputVal > 0):
return True
else:
return False
def moveAllLegs(legsPosition,waitForCompletion=False):
# |--------------------Leg---------------------|
# /---Servo1---\ /---Servo2---\ /---Servo3---\
# Position,Speed, Position,Speed, Position,Speed
for x in range(0,6):
speedListTemp=[legsPosition[x][1],legsPosition[x][3],legsPosition[x][5]]
servo1Position=legsPosition[x][0]
servo2Position=legsPosition[x][2]
servo3Position=legsPosition[x][4]
if(debug):
pyFunctions.printDebug('Leg {0} moved to location {1}, {2}, {3} at speed {4}. WFC = {5}'.format(x,servo1Position,servo2Position,servo3Position,speedListTemp,str(waitForCompletion)))
thread = MoveLeg(x,servo1Position,servo2Position,servo3Position,speedListTemp)
thread.start()
if(waitForCompletion):
if(debug):
pyFunctions.printDebug('Waiting for completion of leg movement')
for x in range(0,6):
serv1Mid = LegLimits[x]['servo1_mid'] + legsPosition[x][0]
serv2Mid = LegLimits[x]['servo2_mid'] + legsPosition[x][2]
serv3Mid = LegLimits[x]['servo3_mid'] + legsPosition[x][4]
locationArray= Legs[x].getCurrentPosition()
while(locationArray[0] != serv1Mid):
locationArray= Legs[x].getCurrentPosition()
while(locationArray[1] != serv2Mid):
locationArray= Legs[x].getCurrentPosition()
while(locationArray[2] != serv3Mid):
locationArray= Legs[x].getCurrentPosition()
def setToStartPos():
moveMatrx=([0, 100, 180, 100, -90, 100],
[0, 100, 180, 100, -90, 100],
[0, 100, 180, 100, -90, 100],
[0, 100, 180, 100, -90, 100],
[0, 100, 180, 100, -90, 100],
[0, 100, 180, 100, -90, 100])
moveAllLegs(moveMatrx)
def walk(forwardPos, strafePos, rotatePos):
timing = 0.1
walkspeed1 = 100
walkspeed2 = 100
walkspeed3 = 100
rightMargin = 0
leftMargin = 0
forwardMargin = 0
servo3Margin = strafePos
servo2Correction = servo3Margin
forwardAmount = int(-45 * forwardPos)
forwardAmountNeg = int(45 * forwardPos)
moveMatrx=([0, walkspeed1, 65 + servo2Correction, walkspeed2, -70 + servo3Margin, walkspeed3],
[0, walkspeed1, 180 - servo2Correction, walkspeed2, -90 - servo3Margin, walkspeed3],
[0, walkspeed1, 65 + servo2Correction, walkspeed2, -70 + servo3Margin, walkspeed3],
[0, walkspeed1, 180 - servo2Correction, walkspeed2, -90 - servo3Margin, walkspeed3],
[0, walkspeed1, 65 + servo2Correction, walkspeed2, -70 + servo3Margin, walkspeed3],
[0, walkspeed1, 180 - servo2Correction, walkspeed2, -90 - servo3Margin, walkspeed3])
moveAllLegs(moveMatrx)
sleep(timing)
# print "Movement 1"
moveMatrx=([forwardAmount, walkspeed1, 65 + servo2Correction, walkspeed2, -70 + servo3Margin, walkspeed3],
[forwardAmountNeg, walkspeed1, 180 - servo2Correction, walkspeed2, -90 - servo3Margin, walkspeed3],
[forwardAmount, walkspeed1, 65 + servo2Correction, walkspeed2, -70 + servo3Margin, walkspeed3],
[forwardAmountNeg, walkspeed1, 180 - servo2Correction, walkspeed2, -90 - servo3Margin, walkspeed3],
[forwardAmount, walkspeed1, 65 + servo2Correction, walkspeed2, -70 + servo3Margin, walkspeed3],
[forwardAmountNeg, walkspeed1, 180 - servo2Correction, walkspeed2, -90 - servo3Margin, walkspeed3])
moveAllLegs(moveMatrx)
sleep(timing)
# print "Movement 2"
moveMatrx=([forwardAmount, walkspeed1, 65 + servo2Correction, walkspeed2, -70 + servo3Margin, walkspeed3],
[forwardAmountNeg, walkspeed1, 65 - servo2Correction, walkspeed2, -70 - servo3Margin, walkspeed3],
[forwardAmount, walkspeed1, 65 + servo2Correction, walkspeed2, -70 + servo3Margin, walkspeed3],
[forwardAmountNeg, walkspeed1, 65 - servo2Correction, walkspeed2, -70 - servo3Margin, walkspeed3],
[forwardAmount, walkspeed1, 65 + servo2Correction, walkspeed2, -70 + servo3Margin, walkspeed3],
[forwardAmountNeg, walkspeed1, 65 - servo2Correction, walkspeed2, -70 - servo3Margin, walkspeed3])
moveAllLegs(moveMatrx)
sleep(timing)
servo2Correction = 0
servo3Margin = 0
# print "Movement 3"
moveMatrx=([forwardAmount, walkspeed1, 180 + servo2Correction, walkspeed2, -90 + servo3Margin, walkspeed3],
[forwardAmountNeg, walkspeed1, 65 - servo2Correction, walkspeed2, -70 - servo3Margin, walkspeed3],
[forwardAmount, walkspeed1, 180 + servo2Correction, walkspeed2, -90 + servo3Margin, walkspeed3],
[forwardAmountNeg, walkspeed1, 65 - servo2Correction, walkspeed2, -70 - servo3Margin, walkspeed3],
[forwardAmount, walkspeed1, 180 + servo2Correction, walkspeed2, -90 + servo3Margin, walkspeed3],
[forwardAmountNeg, walkspeed1, 65 - servo2Correction, walkspeed2, -70 - servo3Margin, walkspeed3])
moveAllLegs(moveMatrx)
sleep(timing)
# print "Movement 4"
moveMatrx=([forwardAmountNeg, walkspeed1, 180 + servo2Correction, walkspeed2, -90 + servo3Margin, walkspeed3],
[forwardAmount, walkspeed1, 65 - servo2Correction, walkspeed2, -70 - servo3Margin, walkspeed3],
[forwardAmountNeg, walkspeed1, 180 + servo2Correction, walkspeed2, -90 + servo3Margin, walkspeed3],
[forwardAmount, walkspeed1, 65 - servo2Correction, walkspeed2, -70 - servo3Margin, walkspeed3],
[forwardAmountNeg, walkspeed1, 180 + servo2Correction, walkspeed2, -90 + servo3Margin, walkspeed3],
[forwardAmount, walkspeed1, 65 - servo2Correction, walkspeed2, -70 - servo3Margin, walkspeed3])
moveAllLegs(moveMatrx)
sleep(timing)
# print "Movement 5"
moveMatrx=([forwardAmountNeg, walkspeed1, 65 + servo2Correction, walkspeed2, -70 + servo3Margin, walkspeed3],
[forwardAmount, walkspeed1, 65 - servo2Correction, walkspeed2, -70 - servo3Margin, walkspeed3],
[forwardAmountNeg, walkspeed1, 65 + servo2Correction, walkspeed2, -70 + servo3Margin, walkspeed3],
[forwardAmount, walkspeed1, 65 - servo2Correction, walkspeed2, -70 - servo3Margin, walkspeed3],
[forwardAmountNeg, walkspeed1, 65 + servo2Correction, walkspeed2, -70 + servo3Margin, walkspeed3],
[forwardAmount, walkspeed1, 65 - servo2Correction, walkspeed2, -70 - servo3Margin, walkspeed3])
moveAllLegs(moveMatrx)
sleep(timing)
# print "Movement 6"
moveMatrx=([forwardAmountNeg, walkspeed1, 65 + servo2Correction, walkspeed2, -70 + servo3Margin, walkspeed3],
[forwardAmount, walkspeed1, 180 - servo2Correction, walkspeed2, -70 - servo3Margin, walkspeed3],
[forwardAmountNeg, walkspeed1, 65 + servo2Correction, walkspeed2, -70 + servo3Margin, walkspeed3],
[forwardAmount, walkspeed1, 180 - servo2Correction, walkspeed2, -70 - servo3Margin, walkspeed3],
[forwardAmountNeg, walkspeed1, 65 + servo2Correction, walkspeed2, -70 + servo3Margin, walkspeed3],
[forwardAmount, walkspeed1, 180 - servo2Correction, walkspeed2, -70 - servo3Margin, walkspeed3])
moveAllLegs(moveMatrx)
sleep(timing)
def wave(x):
sleep(2)
legMove(x, 0, 180, 0)
sleep(0.5)
legMove(x, 0, 180, 100)
sleep(0.2)
legMove(x, 0, 180, 80)
sleep(0.2)
legMove(x, 0, 180, 100)
sleep(0.2)
legMove(x, 0, 180, 80)
sleep(0.2)
legMove(x, 0, 180, 100)
sleep(0.2)
legMove(x, 0, 180, 80)
sleep(0.2)
def stand():
moveMatrx=([0, 100, 65, 100, -70, 70],
[0, 100, 65, 100, -70, 70],
[0, 100, 65, 100, -70, 70],
[0, 100, 65, 100, -70, 70],
[0, 100, 65, 100, -70, 70],
[0, 100, 65, 100, -70, 70])
moveAllLegs(moveMatrx, True)
def sit():
for x in range(0, 6):
legMove(x, 0, 80, -80)
sleep(0.01)
for x in range(0, 6):
legMove(x, 0, 130, -95)
sleep(0.01)
setToStartPos()
def sitStandWave():
for x in range(0, 6):
stand(x)
sleep(2)
for x in range(0, 6):
wave(x)
legMove(x, 0, 65, -70)
sleep(2)
for x in range(0, 6):
sit(x)
def mainProgram():
done = False
print """
_ _ _____ _
| | | | | __ \ | |
| |__| | _____ _____ | |__) |__ __| |
| __ |/ _ \ \/ / _ \| ___/ _ \ / _` |
| | | | __/> < (_) | | | (_) | (_| |
|_| |_|\___/_/\_\___/|_| \___/ \__,_|
-----------------------------------------------------
This is the controller program for a HexoPod.
Written by Rory Shanks and Harry Swan for the University of Abertay, Dundee
With help from Gerry and folks"""
walk(1,0,0)
forwardAmount = 0
mainProgram()
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accesses the google.devtools.cloudtrace.v2 TraceService API."""
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.grpc_helpers
import google.api_core.path_template
import grpc
from google.cloud.trace_v2.gapic import enums
from google.cloud.trace_v2.gapic import trace_service_client_config
from google.cloud.trace_v2.gapic.transports import trace_service_grpc_transport
from google.cloud.trace_v2.proto import trace_pb2
from google.cloud.trace_v2.proto import tracing_pb2
from google.cloud.trace_v2.proto import tracing_pb2_grpc
from google.protobuf import empty_pb2
from google.protobuf import timestamp_pb2
from google.protobuf import wrappers_pb2
from google.rpc import status_pb2
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution(
'google-cloud-trace', ).version
class TraceServiceClient(object):
"""
This file describes an API for collecting and viewing traces and spans
within a trace. A Trace is a collection of spans corresponding to a single
operation or set of operations for an application. A span is an individual
timed event which forms a node of the trace tree. A single trace may
contain span(s) from multiple services.
"""
SERVICE_ADDRESS = 'cloudtrace.googleapis.com:443'
"""The default address of the service."""
# The name of the interface for this client. This is the key used to
# find the method configuration in the client_config dictionary.
_INTERFACE_NAME = 'google.devtools.cloudtrace.v2.TraceService'
@classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
TraceServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename)
kwargs['credentials'] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@classmethod
def project_path(cls, project):
"""Return a fully-qualified project string."""
return google.api_core.path_template.expand(
'projects/{project}',
project=project,
)
@classmethod
def span_path(cls, project, trace, span):
"""Return a fully-qualified span string."""
return google.api_core.path_template.expand(
'projects/{project}/traces/{trace}/spans/{span}',
project=project,
trace=trace,
span=span,
)
def __init__(self,
transport=None,
channel=None,
credentials=None,
client_config=trace_service_client_config.config,
client_info=None):
"""Constructor.
Args:
transport (Union[~.TraceServiceGrpcTransport,
Callable[[~.Credentials, type], ~.TraceServiceGrpcTransport]): A transport
instance, responsible for actually making the API calls.
The default transport uses the gRPC protocol.
This argument may also be a callable which returns a
transport instance. Callables will be sent the credentials
as the first argument and the default transport class as
the second argument.
channel (grpc.Channel): DEPRECATED. A ``Channel`` instance
through which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is mutually exclusive with providing a
transport instance to ``transport``; doing so will raise
an exception.
client_config (dict): DEPRECATED. A dictionary of call options for
each method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Raise deprecation warnings for things we want to go away.
if client_config:
warnings.warn('The `client_config` argument is deprecated.',
PendingDeprecationWarning)
if channel:
warnings.warn(
'The `channel` argument is deprecated; use '
'`transport` instead.', PendingDeprecationWarning)
# Instantiate the transport.
# The transport is responsible for handling serialization and
# deserialization and actually sending data to the service.
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=trace_service_grpc_transport.
TraceServiceGrpcTransport,
)
else:
if credentials:
raise ValueError(
'Received both a transport instance and '
'credentials; these are mutually exclusive.')
self.transport = transport
else:
self.transport = trace_service_grpc_transport.TraceServiceGrpcTransport(
address=self.SERVICE_ADDRESS,
channel=channel,
credentials=credentials,
)
if client_info is None:
client_info = (
google.api_core.gapic_v1.client_info.DEFAULT_CLIENT_INFO)
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
self._client_info = client_info
# Parse out the default settings for retry and timeout for each RPC
# from the client configuration.
# (Ordinarily, these are the defaults specified in the `*_config.py`
# file next to this one.)
self._method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config['interfaces'][self._INTERFACE_NAME], )
# Save a dictionary of cached API call functions.
# These are the actual callables which invoke the proper
# transport methods, wrapped with `wrap_method` to add retry,
# timeout, and the like.
self._inner_api_calls = {}
# Service calls
def batch_write_spans(self,
name,
spans,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Sends new spans to new or existing traces. You cannot update
existing spans.
Example:
>>> from google.cloud import trace_v2
>>>
>>> client = trace_v2.TraceServiceClient()
>>>
>>> name = client.project_path('[PROJECT]')
>>>
>>> # TODO: Initialize ``spans``:
>>> spans = []
>>>
>>> client.batch_write_spans(name, spans)
Args:
name (str): Required. The name of the project where the spans belong. The format is
``projects/[PROJECT_ID]``.
spans (list[Union[dict, ~google.cloud.trace_v2.types.Span]]): A list of new spans. The span names must not match existing
spans, or the results are undefined.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.trace_v2.types.Span`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'batch_write_spans' not in self._inner_api_calls:
self._inner_api_calls[
'batch_write_spans'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.batch_write_spans,
default_retry=self._method_configs['BatchWriteSpans'].
retry,
default_timeout=self._method_configs['BatchWriteSpans'].
timeout,
client_info=self._client_info,
)
request = tracing_pb2.BatchWriteSpansRequest(
name=name,
spans=spans,
)
self._inner_api_calls['batch_write_spans'](
request, retry=retry, timeout=timeout, metadata=metadata)
def create_span(self,
name,
span_id,
display_name,
start_time,
end_time,
parent_span_id=None,
attributes=None,
stack_trace=None,
time_events=None,
links=None,
status=None,
same_process_as_parent_span=None,
child_span_count=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Creates a new span.
Example:
>>> from google.cloud import trace_v2
>>>
>>> client = trace_v2.TraceServiceClient()
>>>
>>> name = client.span_path('[PROJECT]', '[TRACE]', '[SPAN]')
>>>
>>> # TODO: Initialize ``span_id``:
>>> span_id = ''
>>>
>>> # TODO: Initialize ``display_name``:
>>> display_name = {}
>>>
>>> # TODO: Initialize ``start_time``:
>>> start_time = {}
>>>
>>> # TODO: Initialize ``end_time``:
>>> end_time = {}
>>>
>>> response = client.create_span(name, span_id, display_name, start_time, end_time)
Args:
name (str): The resource name of the span in the following format:
::
projects/[PROJECT_ID]/traces/[TRACE_ID]/spans/[SPAN_ID]
[TRACE_ID] is a unique identifier for a trace within a project;
it is a 32-character hexadecimal encoding of a 16-byte array.
[SPAN_ID] is a unique identifier for a span within a trace; it
is a 16-character hexadecimal encoding of an 8-byte array.
span_id (str): The [SPAN_ID] portion of the span's resource name.
display_name (Union[dict, ~google.cloud.trace_v2.types.TruncatableString]): A description of the span's operation (up to 128 bytes).
Stackdriver Trace displays the description in the
{% dynamic print site_values.console_name %}.
For example, the display name can be a qualified method name or a file name
and a line number where the operation is called. A best practice is to use
the same display name within an application and at the same call point.
This makes it easier to correlate spans in different traces.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.trace_v2.types.TruncatableString`
start_time (Union[dict, ~google.cloud.trace_v2.types.Timestamp]): The start time of the span. On the client side, this is the time kept by
the local machine where the span execution starts. On the server side, this
is the time when the server's application handler starts running.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.trace_v2.types.Timestamp`
end_time (Union[dict, ~google.cloud.trace_v2.types.Timestamp]): The end time of the span. On the client side, this is the time kept by
the local machine where the span execution ends. On the server side, this
is the time when the server application handler stops running.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.trace_v2.types.Timestamp`
parent_span_id (str): The [SPAN_ID] of this span's parent span. If this is a root span,
then this field must be empty.
attributes (Union[dict, ~google.cloud.trace_v2.types.Attributes]): A set of attributes on the span. You can have up to 32 attributes per
span.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.trace_v2.types.Attributes`
stack_trace (Union[dict, ~google.cloud.trace_v2.types.StackTrace]): Stack trace captured at the start of the span.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.trace_v2.types.StackTrace`
time_events (Union[dict, ~google.cloud.trace_v2.types.TimeEvents]): A set of time events. You can have up to 32 annotations and 128 message
events per span.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.trace_v2.types.TimeEvents`
links (Union[dict, ~google.cloud.trace_v2.types.Links]): Links associated with the span. You can have up to 128 links per Span.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.trace_v2.types.Links`
status (Union[dict, ~google.cloud.trace_v2.types.Status]): An optional final status for this span.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.trace_v2.types.Status`
same_process_as_parent_span (Union[dict, ~google.cloud.trace_v2.types.BoolValue]): (Optional) Set this parameter to indicate whether this span is in
the same process as its parent. If you do not set this parameter,
Stackdriver Trace is unable to take advantage of this helpful
information.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.trace_v2.types.BoolValue`
child_span_count (Union[dict, ~google.cloud.trace_v2.types.Int32Value]): An optional number of child spans that were generated while this span
was active. If set, allows implementation to detect missing child spans.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.trace_v2.types.Int32Value`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.trace_v2.types.Span` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'create_span' not in self._inner_api_calls:
self._inner_api_calls[
'create_span'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.create_span,
default_retry=self._method_configs['CreateSpan'].retry,
default_timeout=self._method_configs['CreateSpan'].timeout,
client_info=self._client_info,
)
request = trace_pb2.Span(
name=name,
span_id=span_id,
display_name=display_name,
start_time=start_time,
end_time=end_time,
parent_span_id=parent_span_id,
attributes=attributes,
stack_trace=stack_trace,
time_events=time_events,
links=links,
status=status,
same_process_as_parent_span=same_process_as_parent_span,
child_span_count=child_span_count,
)
return self._inner_api_calls['create_span'](
request, retry=retry, timeout=timeout, metadata=metadata)
|
|
'''
Code taken from: https://github.com/eugenium/mmd
(modified slightly for efficiency/PEP by Stephanie Hyland)
Python implementation of MMD and Covariance estimates for Relative MMD
Some code is based on code from Vincent Van Asch
which is based on matlab code from Arthur Gretton
Eugene Belilovsky
[email protected]
'''
import numpy as np
import scipy as sp
from numpy import sqrt
from sklearn.metrics.pairwise import rbf_kernel
from functools import partial
import pdb
def my_kernel(X, Y, sigma):
gamma = 1 / (2 * sigma**2)
if len(X.shape) == 2:
X_sqnorms = np.einsum('...i,...i', X, X)
Y_sqnorms = np.einsum('...i,...i', Y, Y)
XY = np.einsum('ia,ja', X, Y)
elif len(X.shape) == 3:
X_sqnorms = np.einsum('...ij,...ij', X, X)
Y_sqnorms = np.einsum('...ij,...ij', Y, Y)
XY = np.einsum('iab,jab', X, Y)
else:
pdb.set_trace()
Kxy = np.exp(-gamma*(X_sqnorms.reshape(-1, 1) - 2*XY + Y_sqnorms.reshape(1, -1)))
return Kxy
def MMD_3_Sample_Test(X, Y, Z, sigma=-1, SelectSigma=True, computeMMDs=False):
'''Performs the relative MMD test which returns a test statistic for whether Y is closer to X or than Z.
See http://arxiv.org/pdf/1511.04581.pdf
The bandwith heuristic is based on the median heuristic (see Smola,Gretton).
'''
if(sigma<0):
#Similar heuristics
if SelectSigma:
siz=np.min((1000, X.shape[0]))
sigma1=kernelwidthPair(X[0:siz], Y[0:siz]);
sigma2=kernelwidthPair(X[0:siz], Z[0:siz]);
sigma=(sigma1+sigma2)/2.
else:
siz=np.min((1000, X.shape[0]*3))
Zem=np.r_[X[0:siz/3], Y[0:siz/3], Z[0:siz/3]]
sigma=kernelwidth(Zem);
#kernel = partial(rbf_kernel, gamma=1.0/(sigma**2))
kernel = partial(my_kernel, sigma=sigma)
#kernel = partial(grbf, sigma=sigma)
Kyy = kernel(Y, Y)
Kzz = kernel(Z, Z)
Kxy = kernel(X, Y)
Kxz = kernel(X, Z)
Kyynd = Kyy-np.diag(np.diagonal(Kyy))
Kzznd = Kzz-np.diag(np.diagonal(Kzz))
m = Kxy.shape[0];
n = Kyy.shape[0];
r = Kzz.shape[0];
u_yy=np.sum(Kyynd)*( 1./(n*(n-1)) )
u_zz=np.sum(Kzznd)*( 1./(r*(r-1)) )
u_xy=np.sum(Kxy)/(m*n)
u_xz=np.sum(Kxz)/(m*r)
#Compute the test statistic
t=u_yy - 2.*u_xy - (u_zz-2.*u_xz)
Diff_Var, Diff_Var_z2, data=MMD_Diff_Var(Kyy, Kzz, Kxy, Kxz)
pvalue=sp.stats.norm.cdf(-t/np.sqrt((Diff_Var)))
# pvalue_z2=sp.stats.norm.cdf(-t/np.sqrt((Diff_Var_z2)))
tstat=t/sqrt(Diff_Var)
if(computeMMDs):
Kxx = kernel(X, X)
Kxxnd = Kxx-np.diag(np.diagonal(Kxx))
u_xx=np.sum(Kxxnd)*( 1./(m*(m-1)) )
MMDXY=u_xx+u_yy-2.*u_xy
MMDXZ=u_xx+u_zz-2.*u_xz
else:
MMDXY=None
MMDXZ=None
return pvalue, tstat, sigma, MMDXY, MMDXZ
def MMD_Diff_Var(Kyy, Kzz, Kxy, Kxz):
'''
Compute the variance of the difference statistic MMDXY-MMDXZ
See http://arxiv.org/pdf/1511.04581.pdf Appendix for derivations
'''
m = Kxy.shape[0];
n = Kyy.shape[0];
r = Kzz.shape[0];
Kyynd = Kyy-np.diag(np.diagonal(Kyy));
Kzznd = Kzz-np.diag(np.diagonal(Kzz));
u_yy=np.sum(Kyynd)*( 1./(n*(n-1)) );
u_zz=np.sum(Kzznd)*( 1./(r*(r-1)) );
u_xy=np.sum(Kxy)/(m*n);
u_xz=np.sum(Kxz)/(m*r);
#compute zeta1
t1=(1./n**3)*np.sum(Kyynd.T.dot(Kyynd))-u_yy**2;
t2=(1./(n**2*m))*np.sum(Kxy.T.dot(Kxy))-u_xy**2;
t3=(1./(n*m**2))*np.sum(Kxy.dot(Kxy.T))-u_xy**2;
t4=(1./r**3)*np.sum(Kzznd.T.dot(Kzznd))-u_zz**2;
t5=(1./(r*m**2))*np.sum(Kxz.dot(Kxz.T))-u_xz**2;
t6=(1./(r**2*m))*np.sum(Kxz.T.dot(Kxz))-u_xz**2;
t7=(1./(n**2*m))*np.sum(Kyynd.dot(Kxy.T))-u_yy*u_xy;
t8=(1./(n*m*r))*np.sum(Kxy.T.dot(Kxz))-u_xz*u_xy;
t9=(1./(r**2*m))*np.sum(Kzznd.dot(Kxz.T))-u_zz*u_xz;
zeta1=(t1+t2+t3+t4+t5+t6-2.*(t7+t8+t9));
zeta2=(1/m/(m-1))*np.sum((Kyynd-Kzznd-Kxy.T-Kxy+Kxz+Kxz.T)**2)-(u_yy - 2.*u_xy - (u_zz-2.*u_xz))**2;
data=dict({'t1':t1,
't2':t2,
't3':t3,
't4':t4,
't5':t5,
't6':t6,
't7':t7,
't8':t8,
't9':t9,
'zeta1':zeta1,
'zeta2':zeta2,
})
#TODO more precise version for zeta2
# xx=(1/m^2)*sum(sum(Kxxnd.*Kxxnd))-u_xx^2;
# yy=(1/n^2)*sum(sum(Kyynd.*Kyynd))-u_yy^2;
#xy=(1/(n*m))*sum(sum(Kxy.*Kxy))-u_xy^2;
#xxy=(1/(n*m^2))*sum(sum(Kxxnd*Kxy))-u_xx*u_xy;
#yyx=(1/(n^2*m))*sum(sum(Kyynd*Kxy'))-u_yy*u_xy;
#zeta2=(xx+yy+xy+xy-2*(xxy+xxy +yyx+yyx))
Var=(4.*(m-2)/(m*(m-1)))*zeta1;
Var_z2=Var+(2./(m*(m-1)))*zeta2;
return Var, Var_z2, data
def grbf(x1, x2, sigma):
'''Calculates the Gaussian radial base function kernel'''
n, nfeatures = x1.shape
m, mfeatures = x2.shape
k1 = np.sum((x1*x1), 1)
q = np.tile(k1, (m, 1)).transpose()
del k1
k2 = np.sum((x2*x2), 1)
r = np.tile(k2.T, (n, 1))
del k2
h = q + r
del q, r
# The norm
h = h - 2*np.dot(x1, x2.transpose())
h = np.array(h, dtype=float)
return np.exp(-1.*h/(2.*pow(sigma, 2)))
def kernelwidthPair(x1, x2):
'''Implementation of the median heuristic. See Gretton 2012
Pick sigma such that the exponent of exp(- ||x-y|| / (2*sigma2)),
in other words ||x-y|| / (2*sigma2), equals 1 for the median distance x
and y of all distances between points from both data sets X and Y.
'''
n, nfeatures = x1.shape
m, mfeatures = x2.shape
k1 = np.sum((x1*x1), 1)
q = np.tile(k1, (m, 1)).transpose()
del k1
k2 = np.sum((x2*x2), 1)
r = np.tile(k2, (n, 1))
del k2
h= q + r
del q, r
# The norm
h = h - 2*np.dot(x1, x2.transpose())
h = np.array(h, dtype=float)
mdist = np.median([i for i in h.flat if i])
sigma = sqrt(mdist/2.0)
if not sigma: sigma = 1
return sigma
def kernelwidth(Zmed):
'''Alternative median heuristic when we cant partition the points
'''
m= Zmed.shape[0]
k1 = np.expand_dims(np.sum((Zmed*Zmed), axis=1), 1)
q = np.kron(np.ones((1, m)), k1)
r = np.kron(np.ones((m, 1)), k1.T)
del k1
h= q + r
del q, r
# The norm
h = h - 2.*Zmed.dot(Zmed.T)
h = np.array(h, dtype=float)
mdist = np.median([i for i in h.flat if i])
sigma = sqrt(mdist/2.0)
if not sigma: sigma = 1
return sigma
def MMD_unbiased(Kxx, Kyy, Kxy):
#The estimate when distribution of x is not equal to y
m = Kxx.shape[0]
n = Kyy.shape[0]
t1 = (1./(m*(m-1)))*np.sum(Kxx - np.diag(np.diagonal(Kxx)))
t2 = (2./(m*n)) * np.sum(Kxy)
t3 = (1./(n*(n-1)))* np.sum(Kyy - np.diag(np.diagonal(Kyy)))
MMDsquared = (t1-t2+t3)
return MMDsquared
|
|
#!/usr/bin/env python2.7
"""Check CFC - Check Compile Flow Consistency
This is a compiler wrapper for testing that code generation is consistent with
different compilation processes. It checks that code is not unduly affected by
compiler options or other changes which should not have side effects.
To use:
-Ensure that the compiler under test (i.e. clang, clang++) is on the PATH
-On Linux copy this script to the name of the compiler
e.g. cp check_cfc.py clang && cp check_cfc.py clang++
-On Windows use setup.py to generate check_cfc.exe and copy that to clang.exe
and clang++.exe
-Enable the desired checks in check_cfc.cfg (in the same directory as the
wrapper)
e.g.
[Checks]
dash_g_no_change = true
dash_s_no_change = false
-The wrapper can be run using its absolute path or added to PATH before the
compiler under test
e.g. export PATH=<path to check_cfc>:$PATH
-Compile as normal. The wrapper intercepts normal -c compiles and will return
non-zero if the check fails.
e.g.
$ clang -c test.cpp
Code difference detected with -g
--- /tmp/tmp5nv893.o
+++ /tmp/tmp6Vwjnc.o
@@ -1 +1 @@
- 0: 48 8b 05 51 0b 20 00 mov 0x200b51(%rip),%rax
+ 0: 48 39 3d 51 0b 20 00 cmp %rdi,0x200b51(%rip)
-To run LNT with Check CFC specify the absolute path to the wrapper to the --cc
and --cxx options
e.g.
lnt runtest nt --cc <path to check_cfc>/clang \\
--cxx <path to check_cfc>/clang++ ...
To add a new check:
-Create a new subclass of WrapperCheck
-Implement the perform_check() method. This should perform the alternate compile
and do the comparison.
-Add the new check to check_cfc.cfg. The check has the same name as the
subclass.
"""
from __future__ import print_function
import imp
import os
import platform
import shutil
import subprocess
import sys
import tempfile
import ConfigParser
import io
import obj_diff
def is_windows():
"""Returns True if running on Windows."""
return platform.system() == 'Windows'
class WrapperStepException(Exception):
"""Exception type to be used when a step other than the original compile
fails."""
def __init__(self, msg, stdout, stderr):
self.msg = msg
self.stdout = stdout
self.stderr = stderr
class WrapperCheckException(Exception):
"""Exception type to be used when a comparison check fails."""
def __init__(self, msg):
self.msg = msg
def main_is_frozen():
"""Returns True when running as a py2exe executable."""
return (hasattr(sys, "frozen") or # new py2exe
hasattr(sys, "importers") or # old py2exe
imp.is_frozen("__main__")) # tools/freeze
def get_main_dir():
"""Get the directory that the script or executable is located in."""
if main_is_frozen():
return os.path.dirname(sys.executable)
return os.path.dirname(sys.argv[0])
def remove_dir_from_path(path_var, directory):
"""Remove the specified directory from path_var, a string representing
PATH"""
pathlist = path_var.split(os.pathsep)
norm_directory = os.path.normpath(os.path.normcase(directory))
pathlist = filter(lambda x: os.path.normpath(
os.path.normcase(x)) != norm_directory, pathlist)
return os.pathsep.join(pathlist)
def path_without_wrapper():
"""Returns the PATH variable modified to remove the path to this program."""
scriptdir = get_main_dir()
path = os.environ['PATH']
return remove_dir_from_path(path, scriptdir)
def flip_dash_g(args):
"""Search for -g in args. If it exists then return args without. If not then
add it."""
if '-g' in args:
# Return args without any -g
return [x for x in args if x != '-g']
else:
# No -g, add one
return args + ['-g']
def derive_output_file(args):
"""Derive output file from the input file (if just one) or None
otherwise."""
infile = get_input_file(args)
if infile is None:
return None
else:
return '{}.o'.format(os.path.splitext(infile)[0])
def get_output_file(args):
"""Return the output file specified by this command or None if not
specified."""
grabnext = False
for arg in args:
if grabnext:
return arg
if arg == '-o':
# Specified as a separate arg
grabnext = True
elif arg.startswith('-o'):
# Specified conjoined with -o
return arg[2:]
assert grabnext == False
return None
def is_output_specified(args):
"""Return true is output file is specified in args."""
return get_output_file(args) is not None
def replace_output_file(args, new_name):
"""Replaces the specified name of an output file with the specified name.
Assumes that the output file name is specified in the command line args."""
replaceidx = None
attached = False
for idx, val in enumerate(args):
if val == '-o':
replaceidx = idx + 1
attached = False
elif val.startswith('-o'):
replaceidx = idx
attached = True
if replaceidx is None:
raise Exception
replacement = new_name
if attached == True:
replacement = '-o' + new_name
args[replaceidx] = replacement
return args
def add_output_file(args, output_file):
"""Append an output file to args, presuming not already specified."""
return args + ['-o', output_file]
def set_output_file(args, output_file):
"""Set the output file within the arguments. Appends or replaces as
appropriate."""
if is_output_specified(args):
args = replace_output_file(args, output_file)
else:
args = add_output_file(args, output_file)
return args
gSrcFileSuffixes = ('.c', '.cpp', '.cxx', '.c++', '.cp', '.cc')
def get_input_file(args):
"""Return the input file string if it can be found (and there is only
one)."""
inputFiles = list()
for arg in args:
testarg = arg
quotes = ('"', "'")
while testarg.endswith(quotes):
testarg = testarg[:-1]
testarg = os.path.normcase(testarg)
# Test if it is a source file
if testarg.endswith(gSrcFileSuffixes):
inputFiles.append(arg)
if len(inputFiles) == 1:
return inputFiles[0]
else:
return None
def set_input_file(args, input_file):
"""Replaces the input file with that specified."""
infile = get_input_file(args)
if infile:
infile_idx = args.index(infile)
args[infile_idx] = input_file
return args
else:
# Could not find input file
assert False
def is_normal_compile(args):
"""Check if this is a normal compile which will output an object file rather
than a preprocess or link. args is a list of command line arguments."""
compile_step = '-c' in args
# Bitcode cannot be disassembled in the same way
bitcode = '-flto' in args or '-emit-llvm' in args
# Version and help are queries of the compiler and override -c if specified
query = '--version' in args or '--help' in args
# Options to output dependency files for make
dependency = '-M' in args or '-MM' in args
# Check if the input is recognised as a source file (this may be too
# strong a restriction)
input_is_valid = bool(get_input_file(args))
return compile_step and not bitcode and not query and not dependency and input_is_valid
def run_step(command, my_env, error_on_failure):
"""Runs a step of the compilation. Reports failure as exception."""
# Need to use shell=True on Windows as Popen won't use PATH otherwise.
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=my_env, shell=is_windows())
(stdout, stderr) = p.communicate()
if p.returncode != 0:
raise WrapperStepException(error_on_failure, stdout, stderr)
def get_temp_file_name(suffix):
"""Get a temporary file name with a particular suffix. Let the caller be
reponsible for deleting it."""
tf = tempfile.NamedTemporaryFile(suffix=suffix, delete=False)
tf.close()
return tf.name
class WrapperCheck(object):
"""Base class for a check. Subclass this to add a check."""
def __init__(self, output_file_a):
"""Record the base output file that will be compared against."""
self._output_file_a = output_file_a
def perform_check(self, arguments, my_env):
"""Override this to perform the modified compilation and required
checks."""
raise NotImplementedError("Please Implement this method")
class dash_g_no_change(WrapperCheck):
def perform_check(self, arguments, my_env):
"""Check if different code is generated with/without the -g flag."""
output_file_b = get_temp_file_name('.o')
alternate_command = list(arguments)
alternate_command = flip_dash_g(alternate_command)
alternate_command = set_output_file(alternate_command, output_file_b)
run_step(alternate_command, my_env, "Error compiling with -g")
# Compare disassembly (returns first diff if differs)
difference = obj_diff.compare_object_files(self._output_file_a,
output_file_b)
if difference:
raise WrapperCheckException(
"Code difference detected with -g\n{}".format(difference))
# Clean up temp file if comparison okay
os.remove(output_file_b)
class dash_s_no_change(WrapperCheck):
def perform_check(self, arguments, my_env):
"""Check if compiling to asm then assembling in separate steps results
in different code than compiling to object directly."""
output_file_b = get_temp_file_name('.o')
alternate_command = arguments + ['-via-file-asm']
alternate_command = set_output_file(alternate_command, output_file_b)
run_step(alternate_command, my_env,
"Error compiling with -via-file-asm")
# Compare if object files are exactly the same
exactly_equal = obj_diff.compare_exact(self._output_file_a, output_file_b)
if not exactly_equal:
# Compare disassembly (returns first diff if differs)
difference = obj_diff.compare_object_files(self._output_file_a,
output_file_b)
if difference:
raise WrapperCheckException(
"Code difference detected with -S\n{}".format(difference))
# Code is identical, compare debug info
dbgdifference = obj_diff.compare_debug_info(self._output_file_a,
output_file_b)
if dbgdifference:
raise WrapperCheckException(
"Debug info difference detected with -S\n{}".format(dbgdifference))
raise WrapperCheckException("Object files not identical with -S\n")
# Clean up temp file if comparison okay
os.remove(output_file_b)
if __name__ == '__main__':
# Create configuration defaults from list of checks
default_config = """
[Checks]
"""
# Find all subclasses of WrapperCheck
checks = [cls.__name__ for cls in vars()['WrapperCheck'].__subclasses__()]
for c in checks:
default_config += "{} = false\n".format(c)
config = ConfigParser.RawConfigParser()
config.readfp(io.BytesIO(default_config))
scriptdir = get_main_dir()
config_path = os.path.join(scriptdir, 'check_cfc.cfg')
try:
config.read(os.path.join(config_path))
except:
print("Could not read config from {}, "
"using defaults.".format(config_path))
my_env = os.environ.copy()
my_env['PATH'] = path_without_wrapper()
arguments_a = list(sys.argv)
# Prevent infinite loop if called with absolute path.
arguments_a[0] = os.path.basename(arguments_a[0])
# Sanity check
enabled_checks = [check_name
for check_name in checks
if config.getboolean('Checks', check_name)]
checks_comma_separated = ', '.join(enabled_checks)
print("Check CFC, checking: {}".format(checks_comma_separated))
# A - original compilation
output_file_orig = get_output_file(arguments_a)
if output_file_orig is None:
output_file_orig = derive_output_file(arguments_a)
p = subprocess.Popen(arguments_a, env=my_env, shell=is_windows())
p.communicate()
if p.returncode != 0:
sys.exit(p.returncode)
if not is_normal_compile(arguments_a) or output_file_orig is None:
# Bail out here if we can't apply checks in this case.
# Does not indicate an error.
# Maybe not straight compilation (e.g. -S or --version or -flto)
# or maybe > 1 input files.
sys.exit(0)
# Sometimes we generate files which have very long names which can't be
# read/disassembled. This will exit early if we can't find the file we
# expected to be output.
if not os.path.isfile(output_file_orig):
sys.exit(0)
# Copy output file to a temp file
temp_output_file_orig = get_temp_file_name('.o')
shutil.copyfile(output_file_orig, temp_output_file_orig)
# Run checks, if they are enabled in config and if they are appropriate for
# this command line.
current_module = sys.modules[__name__]
for check_name in checks:
if config.getboolean('Checks', check_name):
class_ = getattr(current_module, check_name)
checker = class_(temp_output_file_orig)
try:
checker.perform_check(arguments_a, my_env)
except WrapperCheckException as e:
# Check failure
print("{} {}".format(get_input_file(arguments_a), e.msg), file=sys.stderr)
# Remove file to comply with build system expectations (no
# output file if failed)
os.remove(output_file_orig)
sys.exit(1)
except WrapperStepException as e:
# Compile step failure
print(e.msg, file=sys.stderr)
print("*** stdout ***", file=sys.stderr)
print(e.stdout, file=sys.stderr)
print("*** stderr ***", file=sys.stderr)
print(e.stderr, file=sys.stderr)
# Remove file to comply with build system expectations (no
# output file if failed)
os.remove(output_file_orig)
sys.exit(1)
|
|
# core.py
#
# Copyright (C) 2016 Diamond Light Source, Karl Levik
#
# 2016-11-30
#
# Methods to store and retrieve data in the core tables
#
import copy
import ispyb.interface.core
from ispyb.strictordereddict import StrictOrderedDict
class Core(ispyb.interface.core.IF):
"""Core provides methods to store and retrieve data in the core tables."""
def __init__(self):
pass
_proposal_params = StrictOrderedDict(
[
("id", None),
("person_id", None),
("title", None),
("proposal_code", None),
("proposal_number", None),
("proposal_type", None),
("external_pk_uuid", None),
]
)
_session_for_proposal_code_number_params = StrictOrderedDict(
[
("id", None),
("proposal_code", None),
("proposal_number", None),
("visit_number", None),
("beamline_setup_id", None),
("start_date", None),
("end_date", None),
("beamline_name", None),
("title", None),
("beamline_operator", None),
("nb_shifts", None),
("scheduled", None),
("used_flag", None),
("comments", None),
("external_pk_id", None),
("external_pk_uuid", None),
]
)
_person_params = StrictOrderedDict(
[
("id", None),
("laboratory_id", None),
("family_name", None),
("given_name", None),
("title", None),
("email_address", None),
("phone_number", None),
("login", None),
("external_pk_id", None),
("external_pk_uuid", None),
]
)
_proposal_has_person_params = StrictOrderedDict(
[("id", None), ("proposal_id", None), ("person_id", None), ("role", None)]
)
_session_has_person_params = StrictOrderedDict(
[("session_id", None), ("person_id", None), ("role", None), ("remote", None)]
)
_sample_params = StrictOrderedDict(
[
("id", None),
("authLogin", None),
("crystalid", None),
("containerid", None),
("name", None),
("code", None),
("location", None),
("holder_length", None),
("loop_length", None),
("loop_type", None),
("wire_width", None),
("comments", None),
("status", None),
("is_in_sc", None),
]
)
@classmethod
def get_proposal_params(cls):
return copy.deepcopy(cls._proposal_params)
@classmethod
def get_session_for_proposal_code_number_params(cls):
return copy.deepcopy(cls._session_for_proposal_code_number_params)
@classmethod
def get_person_params(cls):
return copy.deepcopy(cls._person_params)
@classmethod
def get_proposal_has_person_params(cls):
return copy.deepcopy(cls._proposal_has_person_params)
@classmethod
def get_session_has_person_params(cls):
return copy.deepcopy(cls._session_has_person_params)
@classmethod
def get_sample_params(cls):
return copy.deepcopy(cls._sample_params)
def upsert_proposal(self, values):
"""Insert or update a proposal"""
return self.get_connection().call_sp_write(
procname="upsert_proposal", args=values
)
def upsert_session_for_proposal_code_number(self, values):
"""Insert or update a session for a certain proposal with given proposal code and number."""
return self.get_connection().call_sp_write(
procname="upsert_session_for_proposal_code_number", args=values
)
def upsert_person(self, values):
"""Insert or update a person"""
return self.get_connection().call_sp_write(
procname="upsert_person", args=values
)
def upsert_session_has_person(self, values):
"""Insert or update a session-person association"""
return self.get_connection().call_sp_write(
procname="upsert_session_has_person", args=values
)
def upsert_proposal_has_person(self, values):
"""Insert or update a proposal-person association"""
return self.get_connection().call_sp_write(
procname="upsert_proposal_has_person", args=values
)
def upsert_sample(self, values):
"""Insert or update sample."""
return self.get_connection().call_sp_write(
procname="upsert_sample", args=values
)
def retrieve_samples_not_loaded_for_container_reg_barcode(self, barcode):
"""Retrieve the not-yet loaded samples in the most recent container that corresponds with the given container registry barcode"""
return self.get_connection().call_sp_retrieve(
procname="retrieve_samples_not_loaded_for_container_reg_barcode",
args=(barcode,),
)
def retrieve_visit_id(self, visit):
"""Get the database ID for a visit on the form mx1234-5."""
return self.get_connection().call_sf_retrieve(
funcname="retrieve_visit_id", args=(visit,)
)
def retrieve_datacollection_id(self, img_filename, img_fileloc):
"""Get the database ID for the data collection corresponding to the given diffraction image file."""
return self.get_connection().call_sf_retrieve(
funcname="retrieve_datacollection_id", args=(img_filename, img_fileloc)
)
def retrieve_current_sessions(self, beamline, tolerance_mins=0):
"""Get a result-set with the currently active sessions on the given beamline."""
return self.get_connection().call_sp_retrieve(
procname="retrieve_current_sessions", args=(beamline, tolerance_mins)
)
def retrieve_sessions_for_beamline_and_run(self, beamline, run):
"""Get a result-set with the sessions associated with the given beamline/instrument and run."""
return self.get_connection().call_sp_retrieve(
procname="retrieve_sessions_for_beamline_and_run", args=(beamline, run)
)
def retrieve_sessions_for_person_login(self, login):
"""Get a result-set with the sessions associated with the given unique person login."""
return self.get_connection().call_sp_retrieve(
procname="retrieve_sessions_for_person_login", args=(login,)
)
def retrieve_current_sessions_for_person(self, beamline, fed_id, tolerance_mins=0):
"""Get a result-set with the currently active sessions on the given beamline."""
return self.get_connection().call_sp_retrieve(
procname="retrieve_current_sessions_for_person",
args=(beamline, fed_id, tolerance_mins),
)
def retrieve_most_recent_session(self, beamline, proposal_code):
"""Get a result-set with the most recent session on the given beamline for the given proposal code"""
return self.get_connection().call_sp_retrieve(
procname="retrieve_most_recent_session", args=(beamline, proposal_code)
)
def retrieve_expired_sessions_for_instrument_and_period(
self, instrument, start_date, end_date
):
"""Returns a multi-row result-set with the sessions that ended within the window defined by start_ate and end_date on instrument given by p_instrument (can contain database wildcards)"""
return self.get_connection().call_sp_retrieve(
procname="retrieve_expired_sessions_for_instrument_and_period",
args=(instrument, start_date, end_date),
)
def retrieve_persons_for_proposal(self, proposal_code, proposal_number):
"""Get a result-set with the persons associated with a given proposal specified by proposal code, proposal_number"""
return self.get_connection().call_sp_retrieve(
procname="retrieve_persons_for_proposal",
args=(proposal_code, proposal_number),
)
def retrieve_persons_for_session(
self, proposal_code, proposal_number, visit_number
):
"""Get a result-set with the persons associated with a given session specified by proposal code, proposal_number, visit_number"""
return self.get_connection().call_sp_retrieve(
procname="retrieve_persons_for_session",
args=(proposal_code, proposal_number, visit_number),
)
def retrieve_current_cm_sessions(self, beamline):
"""Get a result-set with the currently active commissioning (cm) sessions on the given beamline."""
return self.get_connection().call_sp_retrieve(
procname="retrieve_current_cm_sessions", args=(beamline,)
)
def retrieve_active_plates(self, beamline):
"""Get a result-set with the submitted plates not yet in local storage on a given beamline"""
return self.get_connection().call_sp_retrieve(
procname="retrieve_containers_submitted_non_ls", args=(beamline,)
)
def retrieve_proposal_title(self, proposal_code, proposal_number, auth_login=None):
"""Get the title of a given proposal"""
return self.get_connection().call_sp_retrieve(
procname="retrieve_proposal_title",
args=(proposal_code, proposal_number, auth_login),
)
|
|
import mock
import pytest
from django.conf import settings
from django.core.management import call_command
from django.core.management.base import CommandError
from olympia import amo
from olympia.addons.management.commands import approve_addons
from olympia.devhub.models import AddonLog
from olympia.editors.models import ReviewerScore
# Where to monkeypatch "lib.crypto.tasks.sign_addons" so it's correctly mocked.
SIGN_ADDONS = 'olympia.addons.management.commands.sign_addons.sign_addons'
# Test the "sign_addons" command.
def test_no_overridden_settings(monkeypatch):
assert not settings.SIGNING_SERVER
assert not settings.PRELIMINARY_SIGNING_SERVER
def no_endpoint(ids, **kwargs):
assert not settings.SIGNING_SERVER
assert not settings.PRELIMINARY_SIGNING_SERVER
monkeypatch.setattr(SIGN_ADDONS, no_endpoint)
call_command('sign_addons', 123)
def test_override_SIGNING_SERVER_setting(monkeypatch):
"""You can override the SIGNING_SERVER settings."""
assert not settings.SIGNING_SERVER
def signing_server(ids, **kwargs):
assert settings.SIGNING_SERVER == 'http://example.com'
monkeypatch.setattr(SIGN_ADDONS, signing_server)
call_command('sign_addons', 123, signing_server='http://example.com')
def test_override_PRELIMINARY_SIGNING_SERVER_setting(monkeypatch):
"""You can override the PRELIMINARY_SIGNING_SERVER settings."""
assert not settings.PRELIMINARY_SIGNING_SERVER
def preliminary_signing_server(ids, **kwargs):
assert settings.PRELIMINARY_SIGNING_SERVER == 'http://example.com'
monkeypatch.setattr(SIGN_ADDONS, preliminary_signing_server)
call_command('sign_addons', 123,
preliminary_signing_server='http://example.com')
def test_force_signing(monkeypatch):
"""You can force signing an addon even if it's already signed."""
def not_forced(ids, force, reason):
assert not force
monkeypatch.setattr(SIGN_ADDONS, not_forced)
call_command('sign_addons', 123)
def is_forced(ids, force, reason):
assert force
monkeypatch.setattr(SIGN_ADDONS, is_forced)
call_command('sign_addons', 123, force=True)
def test_reason(monkeypatch):
"""You can pass a reason."""
def has_reason(ids, force, reason):
assert reason == 'expiry'
monkeypatch.setattr(SIGN_ADDONS, has_reason)
call_command('sign_addons', 123, reason='expiry')
# Test the "approve_addons" command.
@pytest.mark.django_db
def test_approve_addons_get_files_incomplete():
"""An incomplete add-on can't be approved."""
addon = amo.tests.addon_factory(status=amo.STATUS_NULL)
assert approve_addons.get_files([addon.guid]) == []
@pytest.mark.django_db
def test_approve_addons_get_files_bad_guid():
"""An add-on with another guid doesn't get approved."""
addon1 = amo.tests.addon_factory(status=amo.STATUS_UNREVIEWED, guid='foo')
addon1_file = addon1.latest_version.files.get()
addon1_file.update(status=amo.STATUS_UNREVIEWED)
# Create another add-on that we won't get the files for.
addon2 = amo.tests.addon_factory(status=amo.STATUS_UNREVIEWED, guid='bar')
addon2_file = addon2.latest_version.files.get()
addon2_file.update(status=amo.STATUS_UNREVIEWED)
# There's only the addon1's file returned, no other.
assert approve_addons.get_files(['foo']) == [addon1_file]
def id_function(fixture_value):
"""Convert a param from the use_case fixture to a nicer name.
By default, the name (used in the test generated from the parameterized
fixture) will use the fixture name and a number.
Eg: test_foo[use_case0]
Providing explicit 'ids' (either as strings, or as a function) will use
those names instead. Here the name will be something like
test_foo[public-unreviewed-full], for the status values, and if the file is
unreviewed.
"""
addon_status, file_status, review_type = fixture_value
return '{0}-{1}-{2}'.format(amo.STATUS_CHOICES_API[addon_status],
amo.STATUS_CHOICES_API[file_status],
review_type)
@pytest.fixture(
params=[(amo.STATUS_UNREVIEWED, amo.STATUS_UNREVIEWED, 'prelim'),
(amo.STATUS_LITE, amo.STATUS_UNREVIEWED, 'prelim'),
(amo.STATUS_NOMINATED, amo.STATUS_UNREVIEWED, 'full'),
(amo.STATUS_PUBLIC, amo.STATUS_UNREVIEWED, 'full'),
(amo.STATUS_LITE_AND_NOMINATED, amo.STATUS_LITE, 'full')],
# ids are used to build better names for the tests using this fixture.
ids=id_function)
def use_case(request, db):
"""This fixture will return quadruples for different use cases.
Addon | File1 and 2 | Review type
==============================================================
waiting for prelim | unreviewed | prelim reviewed
prelim reviewed | unreviewed | prelim reviewed
waiting for full | unreviewed | fully reviewed
fully reviewed | unreviewed | fully reviewed
prelim waiting for full | prelim reviewed | fully reviewed
"""
addon_status, file_status, review_type = request.param
addon = amo.tests.addon_factory(status=addon_status, guid='foo')
version = addon.latest_version
file1 = version.files.get()
file1.update(status=file_status)
# A second file for good measure.
file2 = amo.tests.file_factory(version=version, status=file_status)
# If the addon is public, and we change its only file to something else
# than public, it'll change to unreviewed.
addon.update(status=addon_status)
assert addon.reload().status == addon_status
assert file1.reload().status == file_status
assert file2.reload().status == file_status
return (addon, file1, file2, review_type)
@pytest.fixture
def mozilla_user(db):
"""Create and return the "mozilla" user used to auto approve addons."""
return amo.tests.user_factory(id=settings.TASK_USER_ID)
def test_approve_addons_get_files(use_case):
"""Files that need to get approved are returned in the list.
Use cases are quadruples taken from the "use_case" fixture above.
"""
addon, file1, file2, review_type = use_case
assert approve_addons.get_files([addon.guid]) == [file1, file2]
@pytest.mark.django_db
def test_approve_addons_approve_files_no_review_type():
"""Files which don't need approval don't change status."""
# Create the "mozilla" user, needed for the log.
amo.tests.user_factory(id=settings.TASK_USER_ID)
addon = amo.tests.addon_factory(status=amo.STATUS_PUBLIC)
file_ = addon.versions.get().files.get()
file_.update(status=amo.STATUS_PUBLIC)
approve_addons.approve_files([(file_, None)])
# Nothing changed.
assert addon.reload().status == amo.STATUS_PUBLIC
assert file_.reload().status == amo.STATUS_PUBLIC
def test_approve_addons_approve_files(use_case, mozilla_user):
"""Files are approved using the correct review type.
Use cases are quadruples taken from the "use_case" fixture above.
"""
addon, file1, file2, review_type = use_case
approve_addons.approve_files([(file1, review_type),
(file2, review_type)])
assert file1.reload().status == (
amo.STATUS_LITE if review_type == 'prelim' else amo.STATUS_PUBLIC)
assert file2.reload().status == (
amo.STATUS_LITE if review_type == 'prelim' else amo.STATUS_PUBLIC)
logs = AddonLog.objects.filter(addon=addon)
assert len(logs) == 2 # One per file.
file1_log, file2_log = logs
# An AddonLog has been created for each approval.
assert file1_log.activity_log.details['comments'] == u'bulk approval'
assert file1_log.activity_log.user == mozilla_user
assert file2_log.activity_log.details['comments'] == u'bulk approval'
assert file2_log.activity_log.user == mozilla_user
# No ReviewerScore was granted, it's an automatic approval.
assert not ReviewerScore.objects.all()
@pytest.mark.django_db
def test_approve_addons_get_review_type_already_approved():
"""The review type for a file that doesn't need approval is None."""
addon = amo.tests.addon_factory(status=amo.STATUS_PUBLIC)
file_ = addon.versions.get().files.get()
file_.update(status=amo.STATUS_PUBLIC)
assert approve_addons.get_review_type(file_) is None
def test_approve_addons_get_review_type(use_case):
"""Review type depends on the file and addon status.
Use cases are quadruples taken from the "use_case" fixture above.
"""
addon, file1, _, review_type = use_case
assert approve_addons.get_review_type(file1) == review_type
# fix_let_scope_bustage.
def test_fix_let_scope_bustage_no_addon_id():
"""If no add-on id is provided, raise."""
with pytest.raises(CommandError) as exc_info:
call_command('fix_let_scope_bustage')
assert 'Please provide at least one add-on id to fix.' in exc_info.value
@mock.patch('olympia.addons.management.commands.fix_let_scope_bustage.'
'fix_let_scope_bustage_in_addons.delay')
def test_fix_let_scope_bustage(mock_fixer):
"""The command should call the task with the list of add-on id provided."""
call_command('fix_let_scope_bustage', 1, 2, 3)
mock_fixer.assert_called_once_with([1, 2, 3])
|
|
from __future__ import division
import os
import six
import numpy as np
from rdkit import Chem
from rdkit.Chem.rdPartialCharges import ComputeGasteigerCharges
from ._base import Descriptor
from ._util import atoms_to_numpy
halogen = {9, 17, 35, 53, 85, 117}
getter_list = []
if six.PY2:
from collections import OrderedDict
getters = OrderedDict()
else:
getters = {}
def getter(short, **attrs):
def proc(f):
f.short = short
for a, v in attrs.items():
setattr(f, a, v)
getters[short] = f
getter_list.append(f)
return f
if short in getters:
raise ValueError("duplicated short name of atomic property")
return proc
@getter(short="c", long="gasteiger charge", gasteiger_charges=True)
def get_gasteiger_charge(atom):
return (
atom.GetDoubleProp("_GasteigerCharge") + atom.GetDoubleProp("_GasteigerHCharge")
if atom.HasProp("_GasteigerHCharge")
else 0.0
)
class PeriodicTable(object):
__slots__ = ("data",)
_datadir = os.path.join(os.path.dirname(__file__), "data")
def __init__(self, data=None):
self.data = data
@classmethod
def load(cls, name, conv=float):
def read(v):
if "-" in v:
return np.nan
try:
return conv(v)
except ValueError:
return
self = cls()
with open(os.path.join(cls._datadir, name)) as file:
self.data = [
v for v in (read(l.split("#")[0]) for l in file) if v is not None
]
return self
def __getitem__(self, i):
if i < 1:
return np.nan
try:
return self.data[i - 1]
except IndexError:
return np.nan
def map(self, f):
new = self.__class__()
new.data = [f(d) for d in self.data]
return new
mass = PeriodicTable.load("mass.txt")
vdw_radii = PeriodicTable.load("van_der_waals_radii.txt")
vdw_volume = vdw_radii.map(lambda r: 4.0 / 3.0 * np.pi * r ** 3)
sanderson = PeriodicTable.load("sanderson_electron_negativity.txt")
pauling = PeriodicTable.load("pauling_electron_negativity.txt")
allred_rocow = PeriodicTable.load("allred_rocow_electron_negativity.txt")
polarizability94 = PeriodicTable.load("polarizalibity94.txt")
polarizability78 = PeriodicTable.load("polarizalibity78.txt")
ionization_potentials = PeriodicTable.load("ionization_potential.txt")
period = PeriodicTable(
([1] * 2)
+ ([2] * 8)
+ ([3] * 8)
+ ([4] * 18)
+ ([5] * 18)
+ ([6] * 32)
+ ([7] * 32)
)
mc_gowan_volume = PeriodicTable.load("mc_gowan_volume.txt")
_table = Chem.GetPeriodicTable()
def GetElementSymbol(i):
return _table.GetElementSymbol(i)
if six.PY2:
def GetAtomicNumber(symbol):
if isinstance(symbol, unicode): # noqa: F821
symbol = str(symbol)
return _table.GetAtomicNumber(symbol)
else:
def GetAtomicNumber(symbol):
return _table.GetAtomicNumber(symbol)
# http://dx.doi.org/10.1002%2Fjps.2600721016
@getter(short="dv", long="valence electrons", valence=True)
def get_valence_electrons(atom):
N = atom.GetAtomicNum()
if N == 1:
return 0
Zv = _table.GetNOuterElecs(N) - atom.GetFormalCharge()
Z = atom.GetAtomicNum() - atom.GetFormalCharge()
hi = atom.GetTotalNumHs()
he = sum(1 for a in atom.GetNeighbors() if a.GetAtomicNum() == 1)
h = hi + he
return (Zv - h) / (Z - Zv - 1)
@getter(short="d", long="sigma electrons", valence=True)
def get_sigma_electrons(atom):
return sum(1 for a in atom.GetNeighbors() if a.GetAtomicNum() != 1)
# http://www.edusoft-lc.com/molconn/manuals/400/chaptwo.html
# p. 283
@getter(short="s", long="intrinsic state", require_connected=True, valence=True)
def get_intrinsic_state(atom):
i = atom.GetAtomicNum()
d = get_sigma_electrons(atom)
dv = get_valence_electrons(atom)
if d == 0:
return np.nan
return ((2.0 / period[i]) ** 2 * dv + 1) / d
def get_core_count(atom):
Z = atom.GetAtomicNum()
if Z == 1:
return 0.0
Zv = _table.GetNOuterElecs(Z)
PN = period[Z]
return (Z - Zv) / (Zv * (PN - 1))
def get_eta_epsilon(atom):
Zv = _table.GetNOuterElecs(atom.GetAtomicNum())
return 0.3 * Zv - get_core_count(atom)
def get_eta_beta_sigma(atom):
e = get_eta_epsilon(atom)
return sum(
0.5 if abs(get_eta_epsilon(a) - e) <= 0.3 else 0.75
for a in atom.GetNeighbors()
if a.GetAtomicNum() != 1
)
def get_eta_nonsigma_contribute(bond):
if bond.GetBondType() is Chem.BondType.SINGLE:
return 0.0
f = 1.0
if bond.GetBondTypeAsDouble() == Chem.BondType.TRIPLE:
f = 2.0
a = bond.GetBeginAtom()
b = bond.GetEndAtom()
dEps = abs(get_eta_epsilon(a) - get_eta_epsilon(b))
if bond.GetIsAromatic():
y = 2.0
elif dEps > 0.3:
y = 1.5
else:
y = 1.0
return y * f
def get_eta_beta_delta(atom):
if (
atom.GetIsAromatic()
or atom.IsInRing()
or _table.GetNOuterElecs(atom.GetAtomicNum()) - atom.GetTotalValence() <= 0
):
return 0.0
for b in atom.GetNeighbors():
if b.GetIsAromatic():
return 0.5
return 0.0
def get_other_atom(bond, atom):
begin = bond.GetBeginAtom()
if atom.GetIdx() != begin.GetIdx():
return begin
return bond.GetEndAtom()
def get_eta_beta_non_sigma(atom):
return sum(
get_eta_nonsigma_contribute(b)
for b in atom.GetBonds()
if get_other_atom(b, atom).GetAtomicNum() != 1
)
def get_eta_gamma(atom):
beta = (
get_eta_beta_sigma(atom)
+ get_eta_beta_non_sigma(atom)
+ get_eta_beta_delta(atom)
)
if beta == 0:
return np.nan
return get_core_count(atom) / beta
@getter(short="Z", long="atomic number")
def get_atomic_number(a):
return a.GetAtomicNum()
@getter(short="m", long="mass")
def get_mass(a):
return mass[a.GetAtomicNum()]
@getter(short="v", long="vdw volume")
def get_vdw_volume(a):
return vdw_volume[a.GetAtomicNum()]
@getter(short="se", long="sanderson EN")
def get_sanderson_en(a):
return sanderson[a.GetAtomicNum()]
@getter(short="pe", long="pauling EN")
def get_pauling_en(a):
return pauling[a.GetAtomicNum()]
@getter(short="are", long="allred-rocow EN")
def get_allred_rocow_en(a):
return allred_rocow[a.GetAtomicNum()]
@getter(short="p", long="polarizability")
def get_polarizability(a):
return polarizability94[a.GetAtomicNum()]
@getter(short="i", long="ionization potential")
def get_ionization_potential(a):
return ionization_potentials[a.GetAtomicNum()]
def get_mc_gowan_volume(a):
return mc_gowan_volume[a.GetAtomicNum()]
def get_properties(charge=False, valence=False):
for f in getters.values():
if not charge and getattr(f, "gasteiger_charges", False):
continue
if not valence and getattr(f, "valence", False):
continue
yield f.short
class AtomicProperty(Descriptor):
__slots__ = "explicit_hydrogens", "prop", "_initialized"
def __str__(self):
return "Prop{}".format(self.as_argument)
def get_long(self):
return getattr(self.prop, "long", self.prop.__name__)
@property
def as_argument(self):
return getattr(self.prop, "short", self.prop.__name__)
def parameters(self):
return self.explicit_hydrogens, self.prop
def __new__(cls, explicit_hydrogens, prop):
if isinstance(prop, cls):
prop._initialized = True
return prop
return super(AtomicProperty, cls).__new__(cls)
def __init__(self, explicit_hydrogens, prop):
if getattr(self, "_initialized", False):
return
self.explicit_hydrogens = explicit_hydrogens
self.prop = getters.get(prop)
if self.prop is not None:
return
if callable(prop):
self.prop = prop
return
raise TypeError("atomic property is not callable: {!r}".format(prop))
def calculate(self):
if getattr(self.prop, "gasteiger_charges", False):
ComputeGasteigerCharges(self.mol)
r = atoms_to_numpy(self.prop, self.mol)
nans = np.isnan(r)
if np.any(nans):
atms = set(np.array([a.GetSymbol() for a in self.mol.GetAtoms()])[nans])
self.fail(
ValueError("missing {} for {}".format(self.get_long(), list(atms)))
)
return r
_carbon = Chem.Atom(6)
@property
def carbon(self):
return self.prop(self._carbon)
|
|
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License
# Version 1.1 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
# License for the specific language governing rights and limitations
# under the License.
#
# The Original Code is Komodo code.
#
# The Initial Developer of the Original Code is ActiveState Software Inc.
# Portions created by ActiveState Software Inc are Copyright (C) 2000-2007
# ActiveState Software Inc. All Rights Reserved.
#
# Contributor(s):
# ActiveState Software Inc
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
#
# Contributors:
# Eric Promislow ([email protected])
"""Tcl parsing support for codeintel/tclcile.py"""
import string
import sys
import re
import textwrap
import logging
log = logging.getLogger("tcl_parser")
from SilverCity import ScintillaConstants
from codeintel2 import tcl_lexer
from codeintel2 import shared_lexer
from codeintel2 import shared_parser
from codeintel2.parser_data import Name_LineNum, VarInfo, Node, ClassNode, \
FileNode, ArgNode, MethodNode, ModuleNode, VariableNode, BlockNode, \
update_collection
from codeintel2.parser_data import VAR_KIND_GLOBAL, VAR_KIND_LOCAL
class TclClassifier(shared_parser.CommonClassifier):
_quote_patterns = {ScintillaConstants.SCE_TCL_STRING: re.compile(
'^\"(.*)\"$'),
ScintillaConstants.SCE_TCL_DEFAULT: re.compile('^.(.*).$'),
}
def get_builtin_type(self, tok, callback):
if self.is_number(tok):
numval = tok['text']
if numval.find(".") >= 0:
return "Float"
else:
return "Fixnum"
elif self.is_string(tok):
return "String"
return None
def is_any_operator(self, tok):
return tok['style'] == ScintillaConstants.SCE_TCL_OPERATOR
def is_comment(self, tok):
return tok['style'] == ScintillaConstants.SCE_TCL_COMMENT
def is_comment_structured(self, tok, callback):
return False
def is_identifier(self, tok, allow_keywords=False):
return (tok['style'] == ScintillaConstants.SCE_TCL_IDENTIFIER or
(allow_keywords and
tok['style'] == ScintillaConstants.SCE_TCL_WORD))
def is_interpolating_string(self, tok, callback):
return tok['style'] == ScintillaConstants.SCE_TCL_STRING
def is_keyword(self, tok, target):
return tok['style'] == ScintillaConstants.SCE_TCL_WORD and tok['text'] == target
def is_number(self, tok):
return tok['style'] == ScintillaConstants.SCE_TCL_NUMBER
def is_operator(self, tok, target):
return tok['style'] == ScintillaConstants.SCE_TCL_OPERATOR and tok['text'] == target
def is_string(self, tok):
return tok['style'] in [ScintillaConstants.SCE_TCL_STRING,
ScintillaConstants.SCE_TCL_CHARACTER,
ScintillaConstants.SCE_TCL_LITERAL
]
def is_symbol(self, tok):
return False
def quote_patterns_cb(self, tok):
tval = tok['text']
if tval[0] == '"':
return self._quote_patterns[ScintillaConstants.SCE_TCL_STRING]
elif tval[0] == '\'':
return self._quote_patterns[ScintillaConstants.SCE_TCL_CHARACTER]
else:
return self._quote_patterns[ScintillaConstants.SCE_TCL_DEFAULT] # Fallback
# Accessors for where we'd rather work with a style than call a predicate
# fn
@property
def style_identifier(self):
return ScintillaConstants.SCE_TCL_IDENTIFIER
@property
def style_operator(self):
return ScintillaConstants.SCE_TCL_OPERATOR
@property
def style_word(self):
return ScintillaConstants.SCE_TCL_WORD
lang_specific_classes = {"Tcl": TclClassifier,
"AOL": shared_parser.UDLClassifier}
leading_hash_re = re.compile(r'^\s*\#+\s*')
mostly_dashes = re.compile(r'\s*-{10}')
spaces_and_braces_re = re.compile(r'\s*\}\s*$')
def remove_hashes(lines):
len1 = len(lines)
if len1 == 0:
return []
set1 = [leading_hash_re.sub("", s) for s in lines]
if len1 > 0 and mostly_dashes.match(set1[0]):
del set1[0]
if len1 > 1 and mostly_dashes.match(set1[-1]):
del set1[-1]
return set1
# Parse Tcl code
class Parser:
def __init__(self, tokenizer, lang):
self.tokenizer = tokenizer
self.block_stack = []
self.tree = FileNode()
self.curr_node = self.tree
self.classifier = lang_specific_classes[lang]()
self.containers = {VAR_KIND_GLOBAL: [self.tree.global_vars],
VAR_KIND_LOCAL: [self.tree.local_vars]} # locals
def _get_fully_qualified_braced_name(self, start_line, start_column):
brace_count = 1
name_parts = []
while 1:
tok = self.tokenizer.get_next_token(skip_ws=0)
if tok['style'] == shared_lexer.EOF_STYLE:
break
elif self.classifier.is_any_operator(tok):
if tok['text'] == "{":
brace_count += 1
elif tok['text'] == "}":
brace_count -= 1
if brace_count == 0:
break
if tok['start_line'] > start_line or tok['start_column'] > start_column:
name_parts.append(" ")
start_column = tok['end_column'] + 1
start_line = tok['start_line']
name_parts.append(tok['text']) # XXX backslashes?
return "".join(name_parts)
def get_fully_qualified_name(self):
tok = self.tokenizer.get_next_token()
if tok['style'] == shared_lexer.EOF_STYLE:
return (None, None)
line_start = tok['start_line']
if self.classifier.is_operator(tok, "{"):
return (self._get_fully_qualified_braced_name(line_start, tok['end_column'] + 1), line_start)
name_start = tok['text']
# Watch out if it starts with a "::"
if name_start == "::":
col = tok['end_column'] + 1
tok = self.tokenizer.get_next_token()
if tok['start_column'] != col or not self.classifier.is_identifier(tok):
self.tokenizer.put_back(tok)
return (name_start, line_start)
name_start += tok['text']
col = tok['end_column'] + 1
while 1:
# Collect operator-type methods
tok = self.tokenizer.get_next_token()
if tok['start_column'] == col and self.classifier.is_operator(tok, "::"):
name_start += tok['text']
col += 2
else:
self.tokenizer.put_back(tok)
break
tok = self.tokenizer.get_next_token()
if tok['start_column'] == col and self.classifier.is_identifier(tok, True):
name_start += tok['text']
col = tok['end_column'] + 1
else:
self.tokenizer.put_back(tok)
break
return (name_start, line_start)
def parse(self):
while self.parse_aux(self.tree):
pass
return self.tree
def get_parsing_objects(self, kwd):
return {
"namespace": [ModuleNode, self.parse_aux],
"proc": [MethodNode, self.parse_method]
}.get(kwd, [None, None])
def _parse_name_list(self):
vars = []
while True:
tok = self.tokenizer.get_next_token()
if tok['style'] == shared_lexer.EOF_STYLE or \
self.classifier.is_operator(tok, "}"):
break
if self.classifier.is_identifier(tok):
vars.append(tok['text'])
return vars
def parse_method(self, curr_node):
# Syntax: proc name { args } { body }
tok = self.tokenizer.get_next_token()
if self.classifier.is_operator(tok, "{"):
# Standard, keep going
do_regular_args = True
elif self.classifier.is_identifier(tok):
# Assume it's the one arg
if tok['text'] == "args":
curr_node.add_arg(tok['text'], None, "varargs")
else:
curr_node.add_arg(tok['text'])
curr_node.signature = "%s {%s}" % (curr_node.name, tok['text'])
do_regular_args = False
else:
self.tokenizer.put_back(tok)
return
if do_regular_args:
braceCount = 1
init_indentation = curr_node.indentation
tok_count = 0
tok_lim = 1000
self.tokenizer.start_sig()
argStart = True
while 1:
tok = self.tokenizer.get_next_token()
if self.classifier.is_any_operator(tok):
argStart = False
tval = tok['text']
if tval == "{":
braceCount += 1
if braceCount == 2:
argStart = True
elif tval == "}":
braceCount -= 1
if braceCount <= 0:
break
elif braceCount == 1:
argStart = True
elif argStart:
if braceCount == 2: # Wait for a } to get next arg.
argStart = False
if self.classifier.is_identifier(tok, True):
if tok['text'] == "args" and braceCount == 1:
# We need to peek at the next token
tok2 = self.tokenizer.get_next_token()
if self.classifier.is_operator(tok2, "}"):
curr_node.add_arg(tok['text'], None, "varargs")
break
else:
self.tokenizer.put_back(tok2)
curr_node.add_arg(tok['text'])
tok_count += 1
if tok_count > tok_lim and tok['start_column'] < init_indentation:
break
self.tokenizer.stop_sig()
# XXX Check white-space in the sig
# We don't know we've hit the end of the sig until we hit
# that final "}", so we need to pull it out.
curr_node.signature = "%s {%s}" % (curr_node.name,
spaces_and_braces_re.sub('', self.tokenizer.get_sig()))
# Now get the body
tok = self.tokenizer.get_next_token()
if not self.classifier.is_operator(tok, "{"):
# Give up
self.tokenizer.put_back(tok)
return
braceCount = 1
self.parse_aux(curr_node, 1) # Count the brace we just saw.
# end parse_method
def parse_assignment(self, tok_text, start_line, isLocal=True):
# Don't bother trying to type it yet.
# Figure out whether we're in a proc or not
if isLocal:
collectionA = self.containers[VAR_KIND_LOCAL]
else:
collectionA = self.containers[VAR_KIND_GLOBAL]
if len(collectionA) == 0 or collectionA[-1] is None:
return
possibleType = self._finishVarAssignment(
collectionA, tok_text, start_line)
update_collection(collectionA[-1], tok_text, start_line, possibleType)
def _finishVarAssignment(self, collectionA, var_name, start_line):
# XXX Add type info
return None
def parse_aux(self, curr_node, braceCount=0):
init_indentation = curr_node.indentation
tok_count = 0
tok_lim = 1000
cmdStart = True
curr_globals = {}
while 1:
tok = self.tokenizer.get_next_token()
if tok['style'] == shared_lexer.EOF_STYLE:
break
# style, text, start_column, start_line, end_column, end_line = tok
style, text = tok['style'], tok['text']
if style == self.classifier.style_word and \
(cmdStart or tok['start_column'] == self.tokenizer.get_curr_indentation()):
cmdStart = False
if text in ["namespace", "proc"]:
curr_indent = self.tokenizer.get_curr_indentation()
if text == "namespace":
tok1 = self.tokenizer.get_next_token()
if not (self.classifier.is_identifier(tok1, True) and tok1['text'] == "eval"):
continue
node_class, node_parser = self.get_parsing_objects(text)
if node_class is None:
sys.stderr.write(
"Couldn't get parsing objects for type %s\n" % text)
break
# Get the comments before further parsing.
comment_lines = remove_hashes(
self.tokenizer.curr_comment())
nm_token = self.get_fully_qualified_name()
fqname = nm_token[0]
if not fqname:
break
# Handle only local names for now
if fqname.startswith("::") and text == "namespace":
fqname = fqname[2:]
new_node = node_class(fqname, tok['start_line'])
new_node.doc_lines = comment_lines
new_node.indentation = curr_indent
self.block_stack.append(new_node)
curr_node.append_node(new_node)
# Push new containers on the symbol table
self.containers[VAR_KIND_LOCAL].append(new_node.local_vars)
node_parser(new_node) # Has self bound to it
self.block_stack.pop()
self.containers[VAR_KIND_LOCAL].pop()
# Clear any comment that's hanging around
self.tokenizer.clear_comments()
elif text == "package":
tok1 = self.tokenizer.get_next_token()
if self.classifier.is_identifier(tok1, True):
if tok1['text'] == "require":
tok2 = self.tokenizer.get_next_token()
if self.classifier.is_identifier(tok2, True) and tok2['text'] != "Tcl":
curr_node.imports.append(Name_LineNum(
tok2['text'], tok['start_line']))
elif text == "global":
# XXX: all tokens following 'global' should be declared
# vars
tok = self.tokenizer.get_next_token()
if self.classifier.is_identifier(tok, True):
curr_globals[tok['text']] = None
elif text == "set":
# XXX: Needs to handle lappend, append, incr, variable
# XXX: possibly dict set, array set, upvar, lassign,
# XXX: foreach, regsub (non-inline)
tok = self.tokenizer.get_next_token()
if self.classifier.is_identifier(tok, True):
if tok['text'] in curr_globals:
pass
else:
self.parse_assignment(tok['text'], tok[
'start_line'], isinstance(curr_node, MethodNode))
elif text == "lassign":
tok = self.tokenizer.get_next_token()
if self.classifier.is_operator(tok, "{"):
start_line = tok['start_line']
isLocal = isinstance(curr_node, MethodNode)
if isLocal:
collectionA = self.containers[VAR_KIND_LOCAL]
else:
collectionA = self.containers[VAR_KIND_GLOBAL]
vars = self._parse_name_list()
for v in vars:
update_collection(collectionA[-1], v, start_line)
elif self.classifier.is_any_operator(tok):
cmdStart = False
if text == "{":
braceCount += 1
elif text == "}":
braceCount -= 1
if braceCount <= 0:
break
elif text in (";", "["):
cmdStart = True
elif text == "\\":
# Skip the next token, whatever it is - bug 74850
tok = self.tokenizer.get_next_token()
else:
cmdStart = False
# Sanity check to make sure we haven't gone too far.
tok_count += 1
if tok_count > tok_lim and tok['start_column'] < init_indentation:
break
# end while
curr_node.set_line_end_num(self.tokenizer.curr_line_no())
return tok['style'] != shared_lexer.EOF_STYLE
# end parse_aux()
# end class Parser
if __name__ == "__main__":
if len(sys.argv) == 1:
sample_code = tcl_lexer.provide_sample_code()
fs = None
elif sys.argv[1] == "-":
fs = sys.stdin
closefs = False
else:
fs = open(sys.argv[1], "r")
closefs = True
if fs is not None:
sample_code = shared_lexer.read_and_detab(fs, closefs)
# fs comes back closed
tokenizer = tcl_lexer.TclLexer(sample_code)
parser = Parser(tokenizer, "Tcl")
tree = parser.parse()
print("Analyze the parse tree")
tree.dump()
|
|
"""Gaussian Mixture Model."""
# Author: Wei Xue <[email protected]>
# Modified by Thierry Guillemot <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import linalg
from .base import BaseMixture, _check_shape
from ..externals.six.moves import zip
from ..utils import check_array
from ..utils.validation import check_is_fitted
from ..utils.extmath import row_norms
###############################################################################
# Gaussian mixture shape checkers used by the GaussianMixture class
def _check_weights(weights, n_components):
"""Check the user provided 'weights'.
Parameters
----------
weights : array-like, shape (n_components,)
The proportions of components of each mixture.
n_components : int
Number of components.
Returns
-------
weights : array, shape (n_components,)
"""
weights = check_array(weights, dtype=[np.float64, np.float32],
ensure_2d=False)
_check_shape(weights, (n_components,), 'weights')
# check range
if (any(np.less(weights, 0.)) or
any(np.greater(weights, 1.))):
raise ValueError("The parameter 'weights' should be in the range "
"[0, 1], but got max value %.5f, min value %.5f"
% (np.min(weights), np.max(weights)))
# check normalization
if not np.allclose(np.abs(1. - np.sum(weights)), 0.):
raise ValueError("The parameter 'weights' should be normalized, "
"but got sum(weights) = %.5f" % np.sum(weights))
return weights
def _check_means(means, n_components, n_features):
"""Validate the provided 'means'.
Parameters
----------
means : array-like, shape (n_components, n_features)
The centers of the current components.
n_components : int
Number of components.
n_features : int
Number of features.
Returns
-------
means : array, (n_components, n_features)
"""
means = check_array(means, dtype=[np.float64, np.float32], ensure_2d=False)
_check_shape(means, (n_components, n_features), 'means')
return means
def _check_precision_positivity(precision, covariance_type):
"""Check a precision vector is positive-definite."""
if np.any(np.less_equal(precision, 0.0)):
raise ValueError("'%s precision' should be "
"positive" % covariance_type)
def _check_precision_matrix(precision, covariance_type):
"""Check a precision matrix is symmetric and positive-definite."""
if not (np.allclose(precision, precision.T) and
np.all(linalg.eigvalsh(precision) > 0.)):
raise ValueError("'%s precision' should be symmetric, "
"positive-definite" % covariance_type)
def _check_precisions_full(precisions, covariance_type):
"""Check the precision matrices are symmetric and positive-definite."""
for prec in precisions:
_check_precision_matrix(prec, covariance_type)
def _check_precisions(precisions, covariance_type, n_components, n_features):
"""Validate user provided precisions.
Parameters
----------
precisions : array-like,
'full' : shape of (n_components, n_features, n_features)
'tied' : shape of (n_features, n_features)
'diag' : shape of (n_components, n_features)
'spherical' : shape of (n_components,)
covariance_type : string
n_components : int
Number of components.
n_features : int
Number of features.
Returns
-------
precisions : array
"""
precisions = check_array(precisions, dtype=[np.float64, np.float32],
ensure_2d=False,
allow_nd=covariance_type == 'full')
precisions_shape = {'full': (n_components, n_features, n_features),
'tied': (n_features, n_features),
'diag': (n_components, n_features),
'spherical': (n_components,)}
_check_shape(precisions, precisions_shape[covariance_type],
'%s precision' % covariance_type)
_check_precisions = {'full': _check_precisions_full,
'tied': _check_precision_matrix,
'diag': _check_precision_positivity,
'spherical': _check_precision_positivity}
_check_precisions[covariance_type](precisions, covariance_type)
return precisions
###############################################################################
# Gaussian mixture parameters estimators (used by the M-Step)
def _estimate_gaussian_covariances_full(resp, X, nk, means, reg_covar):
"""Estimate the full covariance matrices.
Parameters
----------
resp : array-like, shape (n_samples, n_components)
X : array-like, shape (n_samples, n_features)
nk : array-like, shape (n_components,)
means : array-like, shape (n_components, n_features)
reg_covar : float
Returns
-------
covariances : array, shape (n_components, n_features, n_features)
The covariance matrix of the current components.
"""
n_components, n_features = means.shape
covariances = np.empty((n_components, n_features, n_features))
for k in range(n_components):
diff = X - means[k]
covariances[k] = np.dot(resp[:, k] * diff.T, diff) / nk[k]
covariances[k].flat[::n_features + 1] += reg_covar
return covariances
def _estimate_gaussian_covariances_tied(resp, X, nk, means, reg_covar):
"""Estimate the tied covariance matrix.
Parameters
----------
resp : array-like, shape (n_samples, n_components)
X : array-like, shape (n_samples, n_features)
nk : array-like, shape (n_components,)
means : array-like, shape (n_components, n_features)
reg_covar : float
Returns
-------
covariance : array, shape (n_features, n_features)
The tied covariance matrix of the components.
"""
avg_X2 = np.dot(X.T, X)
avg_means2 = np.dot(nk * means.T, means)
covariance = avg_X2 - avg_means2
covariance /= nk.sum()
covariance.flat[::len(covariance) + 1] += reg_covar
return covariance
def _estimate_gaussian_covariances_diag(resp, X, nk, means, reg_covar):
"""Estimate the diagonal covariance vectors.
Parameters
----------
responsibilities : array-like, shape (n_samples, n_components)
X : array-like, shape (n_samples, n_features)
nk : array-like, shape (n_components,)
means : array-like, shape (n_components, n_features)
reg_covar : float
Returns
-------
covariances : array, shape (n_components, n_features)
The covariance vector of the current components.
"""
avg_X2 = np.dot(resp.T, X * X) / nk[:, np.newaxis]
avg_means2 = means ** 2
avg_X_means = means * np.dot(resp.T, X) / nk[:, np.newaxis]
return avg_X2 - 2 * avg_X_means + avg_means2 + reg_covar
def _estimate_gaussian_covariances_spherical(resp, X, nk, means, reg_covar):
"""Estimate the spherical variance values.
Parameters
----------
responsibilities : array-like, shape (n_samples, n_components)
X : array-like, shape (n_samples, n_features)
nk : array-like, shape (n_components,)
means : array-like, shape (n_components, n_features)
reg_covar : float
Returns
-------
variances : array, shape (n_components,)
The variance values of each components.
"""
return _estimate_gaussian_covariances_diag(resp, X, nk,
means, reg_covar).mean(1)
def _estimate_gaussian_parameters(X, resp, reg_covar, covariance_type):
"""Estimate the Gaussian distribution parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The input data array.
resp : array-like, shape (n_samples, n_components)
The responsibilities for each data sample in X.
reg_covar : float
The regularization added to the diagonal of the covariance matrices.
covariance_type : {'full', 'tied', 'diag', 'spherical'}
The type of precision matrices.
Returns
-------
nk : array-like, shape (n_components,)
The numbers of data samples in the current components.
means : array-like, shape (n_components, n_features)
The centers of the current components.
covariances : array-like
The covariance matrix of the current components.
The shape depends of the covariance_type.
"""
nk = resp.sum(axis=0) + 10 * np.finfo(resp.dtype).eps
means = np.dot(resp.T, X) / nk[:, np.newaxis]
covariances = {"full": _estimate_gaussian_covariances_full,
"tied": _estimate_gaussian_covariances_tied,
"diag": _estimate_gaussian_covariances_diag,
"spherical": _estimate_gaussian_covariances_spherical
}[covariance_type](resp, X, nk, means, reg_covar)
return nk, means, covariances
def _compute_precision_cholesky(covariances, covariance_type):
"""Compute the Cholesky decomposition of the precisions.
Parameters
----------
covariances : array-like
The covariance matrix of the current components.
The shape depends of the covariance_type.
covariance_type : {'full', 'tied', 'diag', 'spherical'}
The type of precision matrices.
Returns
-------
precisions_cholesky : array-like
The cholesky decomposition of sample precisions of the current
components. The shape depends of the covariance_type.
"""
estimate_precision_error_message = (
"Fitting the mixture model failed because some components have "
"ill-defined empirical covariance (for instance caused by singleton "
"or collapsed samples). Try to decrease the number of components, "
"or increase reg_covar.")
if covariance_type in 'full':
n_components, n_features, _ = covariances.shape
precisions_chol = np.empty((n_components, n_features, n_features))
for k, covariance in enumerate(covariances):
try:
cov_chol = linalg.cholesky(covariance, lower=True)
except linalg.LinAlgError:
raise ValueError(estimate_precision_error_message)
precisions_chol[k] = linalg.solve_triangular(cov_chol,
np.eye(n_features),
lower=True).T
elif covariance_type == 'tied':
_, n_features = covariances.shape
try:
cov_chol = linalg.cholesky(covariances, lower=True)
except linalg.LinAlgError:
raise ValueError(estimate_precision_error_message)
precisions_chol = linalg.solve_triangular(cov_chol, np.eye(n_features),
lower=True).T
else:
if np.any(np.less_equal(covariances, 0.0)):
raise ValueError(estimate_precision_error_message)
precisions_chol = 1. / np.sqrt(covariances)
return precisions_chol
###############################################################################
# Gaussian mixture probability estimators
def _compute_log_det_cholesky(matrix_chol, covariance_type, n_features):
"""Compute the log-det of the cholesky decomposition of matrices.
Parameters
----------
matrix_chol : array-like,
Cholesky decompositions of the matrices.
'full' : shape of (n_components, n_features, n_features)
'tied' : shape of (n_features, n_features)
'diag' : shape of (n_components, n_features)
'spherical' : shape of (n_components,)
covariance_type : {'full', 'tied', 'diag', 'spherical'}
n_features : int
Number of features.
Returns
-------
log_det_precision_chol : array-like, shape (n_components,)
The determinant of the precision matrix for each component.
"""
if covariance_type == 'full':
n_components, _, _ = matrix_chol.shape
log_det_chol = (np.sum(np.log(
matrix_chol.reshape(
n_components, -1)[:, ::n_features + 1]), 1))
elif covariance_type == 'tied':
log_det_chol = (np.sum(np.log(np.diag(matrix_chol))))
elif covariance_type == 'diag':
log_det_chol = (np.sum(np.log(matrix_chol), axis=1))
else:
log_det_chol = n_features * (np.log(matrix_chol))
return log_det_chol
def _estimate_log_gaussian_prob(X, means, precisions_chol, covariance_type):
"""Estimate the log Gaussian probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
means : array-like, shape (n_components, n_features)
precisions_chol : array-like,
Cholesky decompositions of the precision matrices.
'full' : shape of (n_components, n_features, n_features)
'tied' : shape of (n_features, n_features)
'diag' : shape of (n_components, n_features)
'spherical' : shape of (n_components,)
covariance_type : {'full', 'tied', 'diag', 'spherical'}
Returns
-------
log_prob : array, shape (n_samples, n_components)
"""
n_samples, n_features = X.shape
n_components, _ = means.shape
# det(precision_chol) is half of det(precision)
log_det = _compute_log_det_cholesky(
precisions_chol, covariance_type, n_features)
if covariance_type == 'full':
log_prob = np.empty((n_samples, n_components))
for k, (mu, prec_chol) in enumerate(zip(means, precisions_chol)):
y = np.dot(X, prec_chol) - np.dot(mu, prec_chol)
log_prob[:, k] = np.sum(np.square(y), axis=1)
elif covariance_type == 'tied':
log_prob = np.empty((n_samples, n_components))
for k, mu in enumerate(means):
y = np.dot(X, precisions_chol) - np.dot(mu, precisions_chol)
log_prob[:, k] = np.sum(np.square(y), axis=1)
elif covariance_type == 'diag':
precisions = precisions_chol ** 2
log_prob = (np.sum((means ** 2 * precisions), 1) -
2. * np.dot(X, (means * precisions).T) +
np.dot(X ** 2, precisions.T))
elif covariance_type == 'spherical':
precisions = precisions_chol ** 2
log_prob = (np.sum(means ** 2, 1) * precisions -
2 * np.dot(X, means.T * precisions) +
np.outer(row_norms(X, squared=True), precisions))
return -.5 * (n_features * np.log(2 * np.pi) + log_prob) + log_det
class GaussianMixture(BaseMixture):
"""Gaussian Mixture.
Representation of a Gaussian mixture model probability distribution.
This class allows to estimate the parameters of a Gaussian mixture
distribution.
Read more in the :ref:`User Guide <gmm>`.
.. versionadded:: 0.18
Parameters
----------
n_components : int, defaults to 1.
The number of mixture components.
covariance_type : {'full', 'tied', 'diag', 'spherical'},
defaults to 'full'.
String describing the type of covariance parameters to use.
Must be one of::
'full' (each component has its own general covariance matrix),
'tied' (all components share the same general covariance matrix),
'diag' (each component has its own diagonal covariance matrix),
'spherical' (each component has its own single variance).
tol : float, defaults to 1e-3.
The convergence threshold. EM iterations will stop when the
lower bound average gain is below this threshold.
reg_covar : float, defaults to 1e-6.
Non-negative regularization added to the diagonal of covariance.
Allows to assure that the covariance matrices are all positive.
max_iter : int, defaults to 100.
The number of EM iterations to perform.
n_init : int, defaults to 1.
The number of initializations to perform. The best results are kept.
init_params : {'kmeans', 'random'}, defaults to 'kmeans'.
The method used to initialize the weights, the means and the
precisions.
Must be one of::
'kmeans' : responsibilities are initialized using kmeans.
'random' : responsibilities are initialized randomly.
weights_init : array-like, shape (n_components, ), optional
The user-provided initial weights, defaults to None.
If it None, weights are initialized using the `init_params` method.
means_init : array-like, shape (n_components, n_features), optional
The user-provided initial means, defaults to None,
If it None, means are initialized using the `init_params` method.
precisions_init : array-like, optional.
The user-provided initial precisions (inverse of the covariance
matrices), defaults to None.
If it None, precisions are initialized using the 'init_params' method.
The shape depends on 'covariance_type'::
(n_components,) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
warm_start : bool, default to False.
If 'warm_start' is True, the solution of the last fitting is used as
initialization for the next call of fit(). This can speed up
convergence when fit is called several time on similar problems.
verbose : int, default to 0.
Enable verbose output. If 1 then it prints the current
initialization and each iteration step. If greater than 1 then
it prints also the log probability and the time needed
for each step.
verbose_interval : int, default to 10.
Number of iteration done before the next print.
Attributes
----------
weights_ : array-like, shape (n_components,)
The weights of each mixture components.
means_ : array-like, shape (n_components, n_features)
The mean of each mixture component.
covariances_ : array-like
The covariance of each mixture component.
The shape depends on `covariance_type`::
(n_components,) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
precisions_ : array-like
The precision matrices for each component in the mixture. A precision
matrix is the inverse of a covariance matrix. A covariance matrix is
symmetric positive definite so the mixture of Gaussian can be
equivalently parameterized by the precision matrices. Storing the
precision matrices instead of the covariance matrices makes it more
efficient to compute the log-likelihood of new samples at test time.
The shape depends on `covariance_type`::
(n_components,) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
precisions_cholesky_ : array-like
The cholesky decomposition of the precision matrices of each mixture
component. A precision matrix is the inverse of a covariance matrix.
A covariance matrix is symmetric positive definite so the mixture of
Gaussian can be equivalently parameterized by the precision matrices.
Storing the precision matrices instead of the covariance matrices makes
it more efficient to compute the log-likelihood of new samples at test
time. The shape depends on `covariance_type`::
(n_components,) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
n_iter_ : int
Number of step used by the best fit of EM to reach the convergence.
lower_bound_ : float
Log-likelihood of the best fit of EM.
See Also
--------
BayesianGaussianMixture : Gaussian mixture model fit with a variational
inference.
"""
def __init__(self, n_components=1, covariance_type='full', tol=1e-3,
reg_covar=1e-6, max_iter=100, n_init=1, init_params='kmeans',
weights_init=None, means_init=None, precisions_init=None,
random_state=None, warm_start=False,
verbose=0, verbose_interval=10):
super(GaussianMixture, self).__init__(
n_components=n_components, tol=tol, reg_covar=reg_covar,
max_iter=max_iter, n_init=n_init, init_params=init_params,
random_state=random_state, warm_start=warm_start,
verbose=verbose, verbose_interval=verbose_interval)
self.covariance_type = covariance_type
self.weights_init = weights_init
self.means_init = means_init
self.precisions_init = precisions_init
def _check_parameters(self, X):
"""Check the Gaussian mixture parameters are well defined."""
_, n_features = X.shape
if self.covariance_type not in ['spherical', 'tied', 'diag', 'full']:
raise ValueError("Invalid value for 'covariance_type': %s "
"'covariance_type' should be in "
"['spherical', 'tied', 'diag', 'full']"
% self.covariance_type)
if self.weights_init is not None:
self.weights_init = _check_weights(self.weights_init,
self.n_components)
if self.means_init is not None:
self.means_init = _check_means(self.means_init,
self.n_components, n_features)
if self.precisions_init is not None:
self.precisions_init = _check_precisions(self.precisions_init,
self.covariance_type,
self.n_components,
n_features)
def _initialize(self, X, resp):
"""Initialization of the Gaussian mixture parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
resp : array-like, shape (n_samples, n_components)
"""
n_samples, _ = X.shape
weights, means, covariances = _estimate_gaussian_parameters(
X, resp, self.reg_covar, self.covariance_type)
weights /= n_samples
self.weights_ = (weights if self.weights_init is None
else self.weights_init)
self.means_ = means if self.means_init is None else self.means_init
if self.precisions_init is None:
self.covariances_ = covariances
self.precisions_cholesky_ = _compute_precision_cholesky(
covariances, self.covariance_type)
elif self.covariance_type == 'full':
self.precisions_cholesky_ = np.array(
[linalg.cholesky(prec_init, lower=True)
for prec_init in self.precisions_init])
elif self.covariance_type == 'tied':
self.precisions_cholesky_ = linalg.cholesky(self.precisions_init,
lower=True)
else:
self.precisions_cholesky_ = self.precisions_init
def _m_step(self, X, log_resp):
"""M step.
Parameters
----------
X : array-like, shape (n_samples, n_features)
log_resp : array-like, shape (n_samples, n_components)
Logarithm of the posterior probabilities (or responsibilities) of
the point of each sample in X.
"""
n_samples, _ = X.shape
self.weights_, self.means_, self.covariances_ = (
_estimate_gaussian_parameters(X, np.exp(log_resp), self.reg_covar,
self.covariance_type))
self.weights_ /= n_samples
self.precisions_cholesky_ = _compute_precision_cholesky(
self.covariances_, self.covariance_type)
def _estimate_log_prob(self, X):
return _estimate_log_gaussian_prob(
X, self.means_, self.precisions_cholesky_, self.covariance_type)
def _estimate_log_weights(self):
return np.log(self.weights_)
def _compute_lower_bound(self, _, log_prob_norm):
return log_prob_norm
def _check_is_fitted(self):
check_is_fitted(self, ['weights_', 'means_', 'precisions_cholesky_'])
def _get_parameters(self):
return (self.weights_, self.means_, self.covariances_,
self.precisions_cholesky_)
def _set_parameters(self, params):
(self.weights_, self.means_, self.covariances_,
self.precisions_cholesky_) = params
# Attributes computation
_, n_features = self.means_.shape
if self.covariance_type == 'full':
self.precisions_ = np.empty(self.precisions_cholesky_.shape)
for k, prec_chol in enumerate(self.precisions_cholesky_):
self.precisions_[k] = np.dot(prec_chol, prec_chol.T)
elif self.covariance_type == 'tied':
self.precisions_ = np.dot(self.precisions_cholesky_,
self.precisions_cholesky_.T)
else:
self.precisions_ = self.precisions_cholesky_ ** 2
def _n_parameters(self):
"""Return the number of free parameters in the model."""
_, n_features = self.means_.shape
if self.covariance_type == 'full':
cov_params = self.n_components * n_features * (n_features + 1) / 2.
elif self.covariance_type == 'diag':
cov_params = self.n_components * n_features
elif self.covariance_type == 'tied':
cov_params = n_features * (n_features + 1) / 2.
elif self.covariance_type == 'spherical':
cov_params = self.n_components
mean_params = n_features * self.n_components
return int(cov_params + mean_params + self.n_components - 1)
def bic(self, X):
"""Bayesian information criterion for the current model on the input X.
Parameters
----------
X : array of shape (n_samples, n_dimensions)
Returns
-------
bic : float
The lower the better.
"""
return (-2 * self.score(X) * X.shape[0] +
self._n_parameters() * np.log(X.shape[0]))
def aic(self, X):
"""Akaike information criterion for the current model on the input X.
Parameters
----------
X : array of shape (n_samples, n_dimensions)
Returns
-------
aic : float
The lower the better.
"""
return -2 * self.score(X) * X.shape[0] + 2 * self._n_parameters()
|
|
#!/usr/bin/env python
"""A slippy map GUI.
Implements a tiled slippy map using Tk canvas. Displays map tiles using
whatever projection the tiles are in and only knows about tile coordinates,
(as opposed to geospatial coordinates.) This assumes that the tile-space is
organized as a power-of-two pyramid, with the origin in the upper left corner.
This currently has several spots that are hard-coded for 256x256 tiles, even
though MapOverlay tries to track this.
Supports mouse-based pan and zoom as well as tile upsampling while waiting
for new tiles to load. The map to display is specified by a MapOverlay, and
added to the GUI on creation or manually using addOverlay()
gui = MapClient(MakeOverlay(mapid))
Tiles are referenced using a key of (level, x, y) throughout.
Several of the functions are named to match the Google Maps Javascript API,
and therefore violate style guidelines.
"""
# TODO(user):
# 1) Add a zoom bar.
# 2) When the move() is happening inside the Drag function, it'd be
# a good idea to use a semaphore to keep new tiles from being added
# and subsequently moved.
from __future__ import print_function
import collections
import functools
import math
import sys
import threading
import six
from six.moves import queue
from six.moves import urllib
from six.moves import xrange
# check if the Python imaging libraries used by the mapclient module are
# installed
try:
# Python3
from PIL import ImageTk # pylint: disable=g-import-not-at-top
from PIL import Image # pylint: disable=g-import-not-at-top
except ImportError:
try:
# Python2
import ImageTk # pylint: disable=g-import-not-at-top
import Image # pylint: disable=g-import-not-at-top
except ImportError:
print("""
ERROR: A Python library (PIL) used by the Earth Engine API mapclient module
was not found. Information on PIL can be found at:
http://pypi.python.org/pypi/PIL
""")
raise
try:
from six.moves import tkinter as Tkinter # pylint: disable=g-import-not-at-top
except ImportError:
print("""
ERROR: A Python library (Tkinter) used by the Earth Engine API mapclient
module was not found. Instructions for installing Tkinter can be found at:
http://tkinter.unpythonic.net/wiki/How_to_install_Tkinter
""")
raise
# The default URL to fetch tiles from. We could pull this from the EE library,
# however this doesn't have any other dependencies on that yet, so let's not.
BASE_URL = 'https://earthengine.googleapis.com'
# This is a URL pattern for creating an overlay from the google maps base map.
# The z, x and y arguments at the end correspond to level, x, y here.
DEFAULT_MAP_URL_PATTERN = ('http://mt1.google.com/vt/lyrs=m@176000000&hl=en&'
'src=app&z=%d&x=%d&y=%d')
class MapClient(threading.Thread):
"""A simple discrete zoom level map viewer."""
def __init__(self, opt_overlay=None, opt_width=1024, opt_height=768):
"""Initialize the MapClient UI.
Args:
opt_overlay: A mapoverlay to display. If not specified, the default
Google Maps basemap is used.
opt_width: The default width of the frame to construct.
opt_height: The default height of the frame to construct.
"""
threading.Thread.__init__(self)
self.ready = False # All initialization is done.
self.tiles = {} # The cached stack of images at each grid cell.
self.tktiles = {} # The cached PhotoImage at each grid cell.
self.level = 2 # Starting zoom level
self.origin_x = None # The map origin x offset at the current level.
self.origin_y = None # The map origin y offset at the current level.
self.parent = None # A handle to the top level Tk widget.
self.frame = None # A handle to the Tk frame.
self.canvas = None # A handle to the Tk canvas
self.width = opt_width
self.height = opt_height
self.anchor_x = None # Drag anchor.
self.anchor_y = None # Drag anchor.
# Map origin offsets; start at the center of the map.
self.origin_x = (-(2 ** self.level) * 128) + self.width / 2
self.origin_y = (-(2 ** self.level) * 128) + self.height / 2
if not opt_overlay:
# Default to a google maps basemap
opt_overlay = MapOverlay(DEFAULT_MAP_URL_PATTERN)
# The array of overlays are displayed as last on top.
self.overlays = [opt_overlay]
self.start()
def run(self):
"""Set up the user interface."""
width = self.width
height = self.height
# Build the UI
self.parent = Tkinter.Tk()
self.frame = frame = Tkinter.Frame(self.parent, width=width, height=height)
frame.pack(fill=Tkinter.BOTH, expand=Tkinter.YES)
self.canvas = canvas = Tkinter.Canvas(frame,
width=self.GetFrameSize()[0],
height=self.GetFrameSize()[1])
canvas.pack(side=Tkinter.TOP, fill=Tkinter.BOTH, expand=Tkinter.YES)
canvas.create_rectangle(0, 0, self.GetMapSize()[0], self.GetMapSize()[1],
fill='#888888')
canvas.bind('<Button-1>', self.ClickHandler)
canvas.bind('<ButtonRelease-1>', self.ReleaseHandler)
# Button-4 and Button-5 are scroll wheel up/down events.
canvas.bind('<Button-4>', functools.partial(self.Zoom, direction=1))
canvas.bind('<Button-5>', functools.partial(self.Zoom, direction=-1))
canvas.bind('<Double-Button-1>', functools.partial(self.Zoom, direction=1))
frame.bind('<Configure>', self.ResizeHandler)
frame.bind_all('<Key>', self.KeypressHandler)
def SetReady():
self.ready = True
self.parent.after_idle(SetReady)
self.parent.mainloop()
def addOverlay(self, overlay): # pylint: disable=g-bad-name
"""Add an overlay to the map."""
self.overlays.append(overlay)
self.LoadTiles()
def GetFrameSize(self):
if self.frame:
return (int(self.frame.cget('width')), int(self.frame.cget('height')))
else:
return (self.width, self.height)
def GetMapSize(self):
if self.frame:
return (int(self.canvas.cget('width')), int(self.canvas.cget('height')))
else:
return (self.width, self.height)
def GetViewport(self):
"""Return the visible portion of the map as [xlo, ylo, xhi, yhi]."""
width, height = self.GetMapSize()
# pylint: disable=invalid-unary-operand-type
return [-self.origin_x, -self.origin_y,
-self.origin_x + width, -self.origin_y + height]
def LoadTiles(self):
"""Refresh the entire map."""
# Start with the overlay on top.
if not self.ready:
return
for i, overlay in reversed(list(enumerate(self.overlays))):
tile_list = overlay.CalcTiles(self.level, self.GetViewport())
for key in tile_list:
overlay.getTile(key, functools.partial(
self.AddTile, key=key, overlay=overlay, layer=i))
def Flush(self):
"""Empty out all the image fetching queues."""
for overlay in self.overlays:
overlay.Flush()
def CompositeTiles(self, key):
"""Composite together all the tiles in this cell into a single image."""
composite = None
for layer in sorted(self.tiles[key]):
image = self.tiles[key][layer]
if not composite:
composite = image.copy()
else:
composite.paste(image, (0, 0), image)
return composite
def AddTile(self, image, key, overlay, layer):
"""Add a tile to the map.
This keeps track of the tiles for each overlay in each grid cell.
As new tiles come in, all the tiles in a grid cell are composited together
into a new tile and any old tile for that spot is replaced.
Args:
image: The image tile to display.
key: A tuple containing the key of the image (level, x, y)
overlay: The overlay this tile belongs to.
layer: The layer number this overlay corresponds to. Only used
for caching purposes.
"""
# TODO(user): This function is called from multiple threads, and
# could use some synchronization, but it seems to work.
if self.level == key[0]: # Don't add late tiles from another level.
self.tiles[key] = self.tiles.get(key, {})
self.tiles[key][layer] = image
newtile = self.CompositeTiles(key)
if key not in self.tktiles:
newtile = ImageTk.PhotoImage(newtile)
xpos = key[1] * overlay.TILE_WIDTH + self.origin_x
ypos = key[2] * overlay.TILE_HEIGHT + self.origin_y
self.canvas.create_image(
xpos, ypos, anchor=Tkinter.NW, image=newtile, tags=['tile', key])
self.tktiles[key] = newtile # Hang on to the new tile.
else:
self.tktiles[key].paste(newtile)
def Zoom(self, event, direction):
"""Zoom the map.
Args:
event: The event that caused this zoom request.
direction: The direction to zoom. +1 for higher zoom, -1 for lower.
"""
if self.level + direction >= 0:
# Discard everything cached in the MapClient, and flush the fetch queues.
self.Flush()
self.canvas.delete(Tkinter.ALL)
self.tiles = {}
self.tktiles = {}
if direction > 0:
self.origin_x = self.origin_x * 2 - event.x
self.origin_y = self.origin_y * 2 - event.y
else:
self.origin_x = (self.origin_x + event.x) / 2
self.origin_y = (self.origin_y + event.y) / 2
self.level += direction
self.LoadTiles()
def ClickHandler(self, event):
"""Records the anchor location and sets drag handler."""
self.anchor_x = event.x
self.anchor_y = event.y
self.canvas.bind('<Motion>', self.DragHandler)
def DragHandler(self, event):
"""Updates the map position and anchor position."""
dx = event.x - self.anchor_x
dy = event.y - self.anchor_y
if dx or dy:
self.canvas.move(Tkinter.ALL, dx, dy)
self.origin_x += dx
self.origin_y += dy
self.anchor_x = event.x
self.anchor_y = event.y
def ReleaseHandler(self, unused_event):
"""Unbind drag handler and redraw."""
self.canvas.unbind('<Motion>')
self.LoadTiles()
def ResizeHandler(self, event):
"""Handle resize events."""
# There's a 2 pixel border.
self.canvas.config(width=event.width - 2, height=event.height - 2)
self.LoadTiles()
def CenterMap(self, lon, lat, opt_zoom=None):
"""Center the map at the given lon, lat and zoom level."""
if self.canvas:
self.Flush()
self.canvas.delete(Tkinter.ALL)
self.tiles = {}
self.tktiles = {}
width, height = self.GetMapSize()
if opt_zoom is not None:
self.level = opt_zoom
# From maps/api/javascript/geometry/mercator_projection.js
mercator_range = 256.0
scale = 2 ** self.level
origin_x = (mercator_range / 2.0) * scale
origin_y = (mercator_range / 2.0) * scale
pixels_per_lon_degree = (mercator_range / 360.0) * scale
pixels_per_lon_radian = (mercator_range / (2 * math.pi)) * scale
x = origin_x + (lon * pixels_per_lon_degree)
siny = math.sin(lat * math.pi / 180.0)
# Prevent sin() overflow.
e = 1 - 1e-15
if siny > e:
siny = e
elif siny < -e:
siny = -e
y = origin_y + (0.5 * math.log((1 + siny) / (1 - siny)) *
-pixels_per_lon_radian)
self.origin_x = -x + width / 2
self.origin_y = -y + height / 2
self.LoadTiles()
def KeypressHandler(self, event):
"""Handle keypress events."""
if event.char == 'q' or event.char == 'Q':
self.parent.destroy()
class MapOverlay(object):
"""A class representing a map overlay."""
TILE_WIDTH = 256
TILE_HEIGHT = 256
MAX_CACHE = 1000 # The maximum number of tiles to cache.
_images = {} # The tile cache, keyed by (url, level, x, y).
_lru_keys = [] # Keys to the cached tiles, for cache ejection.
def __init__(self, url, tile_fetcher=None):
"""Initialize the MapOverlay."""
self.url = url
self.tile_fetcher = tile_fetcher
# Make 10 workers.
self.queue = queue.Queue()
self.fetchers = [MapOverlay.TileFetcher(self) for unused_x in range(10)]
self.constant = None
def getTile(self, key, callback): # pylint: disable=g-bad-name
"""Get the requested tile.
If the requested tile is already cached, it's returned (sent to the
callback) directly. If it's not cached, a check is made to see if
a lower-res version is cached, and if so that's interpolated up, before
a request for the actual tile is made.
Args:
key: The key of the tile to fetch.
callback: The callback to call when the tile is available. The callback
may be called more than once if a low-res version is available.
"""
result = self.GetCachedTile(key)
if result:
callback(result)
else:
# Interpolate what we have and put the key on the fetch queue.
self.queue.put((key, callback))
self.Interpolate(key, callback)
def Flush(self):
"""Empty the tile queue."""
while not self.queue.empty():
self.queue.get_nowait()
def CalcTiles(self, level, bbox):
"""Calculate which tiles to load based on the visible viewport.
Args:
level: The level at which to calculate the required tiles.
bbox: The viewport coordinates as a tuple (xlo, ylo, xhi, yhi])
Returns:
The list of tile keys to fill the given viewport.
"""
tile_list = []
for y in xrange(int(bbox[1] / MapOverlay.TILE_HEIGHT),
int(bbox[3] / MapOverlay.TILE_HEIGHT + 1)):
for x in xrange(int(bbox[0] / MapOverlay.TILE_WIDTH),
int(bbox[2] / MapOverlay.TILE_WIDTH + 1)):
tile_list.append((level, x, y))
return tile_list
def Interpolate(self, key, callback):
"""Upsample a lower res tile if one is available.
Args:
key: The tile key to upsample.
callback: The callback to call when the tile is ready.
"""
level, x, y = key
delta = 1
result = None
while level - delta > 0 and result is None:
prevkey = (level - delta, x / 2, y / 2)
result = self.GetCachedTile(prevkey)
if not result:
(_, x, y) = prevkey
delta += 1
if result:
px = (key[1] % 2 ** delta) * MapOverlay.TILE_WIDTH / 2 ** delta
py = (key[2] % 2 ** delta) * MapOverlay.TILE_HEIGHT / 2 ** delta
image = (result.crop([px, py,
px + MapOverlay.TILE_WIDTH / 2 ** delta,
py + MapOverlay.TILE_HEIGHT / 2 ** delta])
.resize((MapOverlay.TILE_WIDTH, MapOverlay.TILE_HEIGHT)))
callback(image)
def PutCacheTile(self, key, image):
"""Insert a new tile in the cache and eject old ones if it's too big."""
cache_key = (self.url,) + key
MapOverlay._images[cache_key] = image
MapOverlay._lru_keys.append(cache_key)
while len(MapOverlay._lru_keys) > MapOverlay.MAX_CACHE:
remove_key = MapOverlay._lru_keys.pop(0)
try:
MapOverlay._images.pop(remove_key)
except KeyError:
# Just in case someone removed this before we did.
pass
def GetCachedTile(self, key):
"""Returns the specified tile if it's in the cache."""
cache_key = (self.url,) + key
return MapOverlay._images.get(cache_key, None)
class TileFetcher(threading.Thread):
"""A threaded URL fetcher."""
def __init__(self, overlay):
threading.Thread.__init__(self)
self.overlay = overlay
self.setDaemon(True)
self.start()
def run(self):
"""Pull URLs off the ovelay's queue and call the callback when done."""
while True:
(key, callback) = self.overlay.queue.get()
# Check one more time that we don't have this yet.
if not self.overlay.GetCachedTile(key):
(level, x, y) = key
if x >= 0 and y >= 0 and x <= 2 ** level-1 and y <= 2 ** level-1:
try:
if self.overlay.tile_fetcher is not None:
data = self.overlay.tile_fetcher.fetch_tile(x=x, y=y, z=level)
else:
url = self.overlay.url % key
data = urllib.request.urlopen(url).read()
except Exception as e: # pylint: disable=broad-except
print(e, file=sys.stderr)
else:
# PhotoImage can't handle alpha on LA images.
image = Image.open(six.BytesIO(data)).convert('RGBA')
callback(image)
self.overlay.PutCacheTile(key, image)
def MakeOverlay(mapid, baseurl=BASE_URL):
"""Create an overlay from a mapid."""
url = (baseurl + '/map/' + mapid['mapid'] + '/%d/%d/%d?token=' +
mapid['token'])
return MapOverlay(url, tile_fetcher=mapid['tile_fetcher'])
#
# A global MapClient instance for addToMap convenience.
#
map_instance = None
# pylint: disable=g-bad-name
def addToMap(eeobject, vis_params=None, *unused_args):
"""Adds a layer to the default map instance.
Args:
eeobject: the object to add to the map.
vis_params: a dictionary of visualization parameters. See
ee.data.getMapId().
*unused_args: unused arguments, left for compatibility with the JS API.
This call exists to be an equivalent to the playground addToMap() call.
It uses a global MapInstance to hang on to "the map". If the MapInstance
isn't initialized, this creates a new one.
"""
# Flatten any lists to comma separated strings.
if vis_params:
vis_params = dict(vis_params)
for key in vis_params:
item = vis_params.get(key)
if (isinstance(item, collections.Iterable) and
not isinstance(item, six.string_types)):
vis_params[key] = ','.join([str(x) for x in item])
overlay = MakeOverlay(eeobject.getMapId(vis_params))
global map_instance
if not map_instance:
map_instance = MapClient()
map_instance.addOverlay(overlay)
def centerMap(lng, lat, zoom): # pylint: disable=g-bad-name
"""Center the default map instance at the given lat, lon and zoom values."""
global map_instance
if not map_instance:
map_instance = MapClient()
map_instance.CenterMap(lng, lat, zoom)
|
|
import numpy as np
from ..utils import check_random_state
# Maze state is represented as a 2-element NumPy array: (Y, X). Increasing Y is South.
# Possible actions, expressed as (delta-y, delta-x).
maze_actions = {
'N': np.array([-1, 0]),
'S': np.array([1, 0]),
'E': np.array([0, 1]),
'W': np.array([0, -1]),
}
def parse_topology(topology):
return np.array([list(row) for row in topology])
class Maze(object):
"""
Simple wrapper around a NumPy 2D array to handle flattened indexing and staying in bounds.
"""
def __init__(self, topology, true_obs_prob=.8, easy_obs_model=True):
self.topology = parse_topology(topology)
self.flat_topology = self.topology.ravel()
self.shape = self.topology.shape
self.true_obs_prob = true_obs_prob
self.easy_obs_model = easy_obs_model
#If the observation model is easy, the agent can observe which directions have walls
#If the observation model is not easy, the agent only observes how many of its four neighbors are walls.
self.num_observations = 16 if easy_obs_model else 5
def in_bounds_flat(self, position):
return 0 <= position < np.product(self.shape)
def in_bounds_unflat(self, position):
return 0 <= position[0] < self.shape[0] and 0 <= position[1] < self.shape[1]
def get_flat(self, position):
if not self.in_bounds_flat(position):
raise IndexError("Position out of bounds: {}".format(position))
return self.flat_topology[position]
def get_unflat(self, position):
if not self.in_bounds_unflat(position):
raise IndexError("Position out of bounds: {}".format(position))
return self.topology[tuple(position)]
def flatten_index(self, index_tuple):
return np.ravel_multi_index(index_tuple, self.shape)
def unflatten_index(self, flattened_index):
return np.unravel_index(flattened_index, self.shape)
def flat_positions_containing(self, x):
return list(np.nonzero(self.flat_topology == x)[0])
def flat_positions_not_containing(self, x):
return list(np.nonzero(self.flat_topology != x)[0])
def get_inbound_index(self, index_tuple):
x = min(max(index_tuple[0],0),self.shape[0]-1)
y = min(max(index_tuple[1],0),self.shape[1]-1)
return x, y
def true_observation(self, index_tuple):
it = index_tuple
if type(it) == np.int64:
it = self.unflatten_index(it)
neighbors = [(it[0]+1,it[1]),
(it[0]-1,it[1]),
(it[0],it[1]+1),
(it[0],it[1]-1)]
neighbors = [n for n in neighbors if self.in_bounds_unflat(n)]
if_wall = [self.get_unflat(n)=='#' for n in neighbors]
if self.easy_obs_model:
obs = sum(if_wall)
else:
obs = sum(np.array([8,4,2,1])*if_wall)
return obs
def obs_distribution(self, index_tuple):
if type(index_tuple) == int:
index_tuple = self.unflatten_index(index_tuple)
other_obs_prob = (1-self.true_obs_prob)/(self.num_observations-1)
obs_distribution = [other_obs_prob] * self.num_observations
true_obs = self.true_observation(index_tuple)
obs_distribution[true_obs] = self.true_obs_prob
return obs_distribution
def get_all_obs_distribution(self):
return [self.obs_distribution((x,y)) for x in range(self.shape[0]) for y in range(self.shape[1])]
def observation(self, index_tuple):
if type(index_tuple) == int:
index_tuple = self.unflatten_index(index_tuple)
obs_distribution = self.obs_distribution(index_tuple)
obs = np.random.multinomial(1, obs_distribution)
return obs.tolist().index(1)
def __str__(self):
return '\n'.join(''.join(row) for row in self.topology.tolist())
def __repr__(self):
return 'Maze({})'.format(repr(self.topology.tolist()))
def move_avoiding_walls(maze, position, action):
"""
Return the new position after moving, and the event that happened ('hit-wall' or 'moved').
Works with the position and action as a (row, column) array.
"""
# Compute new position
new_position = position + action
# Compute collisions with walls, including implicit walls at the ends of the world.
if not maze.in_bounds_unflat(new_position) or maze.get_unflat(new_position) == '#':
return position, 'hit-wall'
return new_position, 'moved'
class GridWorld(object):
"""
A simple task in a maze: get to the goal.
Parameters
----------
maze : list of strings or lists
maze topology (see below)
rewards: dict of string to number. default: {'*': 10}.
Rewards obtained by being in a maze grid with the specified contents,
or experiencing the specified event (either 'hit-wall' or 'moved'). The
contributions of content reward and event reward are summed. For
example, you might specify a cost for moving by passing
rewards={'*': 10, 'moved': -1}.
terminal_markers: sequence of chars, default '*'
A grid cell containing any of these markers will be considered a
"terminal" state.
action_error_prob: float
With this probability, the requested action is ignored and a random
action is chosen instead.
random_state: None, int, or RandomState object
For repeatable experiments, you can pass a random state here. See
http://scikit-learn.org/stable/modules/generated/sklearn.utils.check_random_state.html
Notes
-----
Maze topology is expressed textually. Key:
'#': wall
'.': open (really, anything that's not '#')
'*': goal
'o': origin
"""
def __init__(self, maze, rewards={'*': 10}, terminal_markers='*',
action_error_prob=0, random_state=None, directions="NSEW", pomdp=False):
self.maze = Maze(maze) if not isinstance(maze, Maze) else maze
self.rewards = rewards
self.terminal_markers = terminal_markers
self.action_error_prob = action_error_prob
self.random_state = check_random_state(random_state)
self.actions = [maze_actions[direction] for direction in directions]
self.num_actions = len(self.actions)
self.state = None
self.reset()
self.num_states = self.maze.shape[0] * self.maze.shape[1]
self.pomdp = pomdp
def __repr__(self):
return 'GridWorld(maze={maze!r}, rewards={rewards}, terminal_markers={terminal_markers}, action_error_prob={action_error_prob})'.format(**self.__dict__)
def reset(self):
"""
Reset the position to a starting position (an 'o'), chosen at random.
"""
options = self.maze.flat_positions_containing('o')
self.state = options[self.random_state.choice(len(options))]
def is_terminal(self, state):
"""Check if the given state is a terminal state."""
return self.maze.get_flat(state) in self.terminal_markers
def observe(self):
"""
Return the current state as an integer.
The state is the index into the flattened maze.
"""
o = self.maze.observation(self.state) if self.pomdp else self.state
return o
def perform_action(self, action_idx):
"""Perform an action (specified by index), yielding a new state and reward."""
# In the absorbing end state, nothing does anything.
if self.is_terminal(self.state):
return self.observe(), 0
if self.action_error_prob and self.random_state.rand() < self.action_error_prob:
action_idx = self.random_state.choice(self.num_actions)
action = self.actions[action_idx]
new_state_tuple, result = move_avoiding_walls(self.maze, self.maze.unflatten_index(self.state), action)
self.state = self.maze.flatten_index(new_state_tuple)
reward = self.rewards.get(self.maze.get_flat(self.state), 0) + self.rewards.get(result, 0)
return self.observe(), reward
def as_mdp(self):
transition_probabilities = np.zeros((self.num_states, self.num_actions, self.num_states))
rewards = np.zeros((self.num_states, self.num_actions, self.num_states))
action_rewards = np.zeros((self.num_states, self.num_actions))
destination_rewards = np.zeros(self.num_states)
for state in range(self.num_states):
destination_rewards[state] = self.rewards.get(self.maze.get_flat(state), 0)
is_terminal_state = np.zeros(self.num_states, dtype=np.bool)
for state in range(self.num_states):
if self.is_terminal(state):
is_terminal_state[state] = True
transition_probabilities[state, :, state] = 1.
else:
for action in range(self.num_actions):
new_state_tuple, result = move_avoiding_walls(self.maze, self.maze.unflatten_index(state), self.actions[action])
new_state = self.maze.flatten_index(new_state_tuple)
transition_probabilities[state, action, new_state] = 1.
action_rewards[state, action] = self.rewards.get(result, 0)
# Now account for action noise.
transitions_given_random_action = transition_probabilities.mean(axis=1, keepdims=True)
transition_probabilities *= (1 - self.action_error_prob)
transition_probabilities += self.action_error_prob * transitions_given_random_action
rewards_given_random_action = action_rewards.mean(axis=1, keepdims=True)
action_rewards = (1 - self.action_error_prob) * action_rewards + self.action_error_prob * rewards_given_random_action
rewards = action_rewards[:, :, None] + destination_rewards[None, None, :]
rewards[is_terminal_state] = 0
return transition_probabilities, rewards
def get_max_reward(self):
transition_probabilities, rewards = self.as_mdp()
return rewards.max()
### Old API, where terminal states were None.
def observe_old(self):
return None if self.is_terminal(self.state) else self.state
def perform_action_old(self, action_idx):
new_state, reward = self.perform_action(action_idx)
if self.is_terminal(new_state):
return None, reward
else:
return new_state, reward
samples = {
'trivial': [
'###',
'#o#',
'#.#',
'#*#',
'###'],
'larger': [
'#########',
'#..#....#',
'#..#..#.#',
'#..#..#.#',
'#..#.##.#',
'#....*#.#',
'#######.#',
'#o......#',
'#########']
}
def construct_cliff_task(width, height, goal_reward=50, move_reward=-1, cliff_reward=-100, **kw):
"""
Construct a 'cliff' task, a GridWorld with a "cliff" between the start and
goal. Falling off the cliff gives a large negative reward and ends the
episode.
Any other parameters, like action_error_prob, are passed on to the
GridWorld constructor.
"""
maze = ['.' * width] * (height - 1) # middle empty region
maze.append('o' + 'X' * (width - 2) + '*') # bottom goal row
rewards = {
'*': goal_reward,
'moved': move_reward,
'hit-wall': move_reward,
'X': cliff_reward
}
return GridWorld(maze, rewards=rewards, terminal_markers='*X', **kw)
|
|
# -*- coding: utf-8 -*-
""" Sahana Eden Transport Model
@copyright: 2012-13 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ["S3TransportModel",
]
from gluon import *
from gluon.storage import Storage
from ..s3 import *
# =============================================================================
class S3TransportModel(S3Model):
"""
http://eden.sahanafoundation.org/wiki/BluePrint/Transport
"""
names = ["transport_airport",
"transport_heliport",
"transport_seaport",
]
def model(self):
T = current.T
messages = current.messages
UNKNOWN_OPT = messages.UNKNOWN_OPT
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
location_id = self.gis_location_id
organisation_id = self.org_organisation_id
super_link = self.super_link
# ---------------------------------------------------------------------
# Airports
#
storage_types = {
1: T("covered"),
2: T("uncovered"),
}
transport_airport_capacity_opts = {
1: "",
2: T("number of planes"),
3: T("m3")
}
tablename = "transport_airport"
define_table(tablename,
super_link("site_id", "org_site"),
Field("name", notnull=True,
length=64, # Mayon Compatibility
label=T("Name")),
Field("code",
length=10,
# Deployments that don't wants office codes can hide them
#readable=False,
#writable=False,
# Mayon compatibility
# @ToDo: Deployment Setting to add validator to make these unique
#notnull=True,
#unique=True,
label=T("Code")),
organisation_id(),
location_id(),
Field("restrictions", "text",
label=T("Restrictions")),
Field("ils", "boolean",
represent=lambda bool: \
(bool and [T("Yes")] or [T("No")])[0],
label=T("Instrument Landing System")),
Field("lighting", "boolean",
represent=lambda bool: \
(bool and [T("Yes")] or [T("No")])[0],
label=T("Lighting")),
Field("immigration_customs_capabilities", "text",
label=T("Immigration and Customs Capabilities")),
Field("aircraft_max_size", "text",
label=T("Aircraft Maximum Size")),
Field("security_desc", "text",
label=T("Security Description"),
comment=DIV(_class="tooltip",
_title="%s|%s" % (T("Security Description"),
T("Description of perimeter fencing, security guards, security lighting.")))),
# @ToDo: put storage type inline
Field("storage_capacity", "double",
label=T("Storage Capacity (m3)")),
Field("storage_type", "integer",
requires = IS_EMPTY_OR(IS_IN_SET(storage_types)),
label=T("Storage Type")),
# @ToDo: put units inline
Field("parking_tarmac_space", "double",
label=T("Parking/Tarmac Space Capacity")),
Field("capacity", "integer",
label = T("Parking/Tarmac Space Units"),
requires = IS_IN_SET(transport_airport_capacity_opts, zero=None),
default = 1,
represent = lambda opt: \
transport_airport_capacity_opts.get(opt, UNKNOWN_OPT)),
Field("helipad_info", "text",
label=T("Helipad Information")),
self.pr_person_id(label=T("Information Source")),
Field("obsolete", "boolean",
label=T("Obsolete"),
represent=lambda bool: \
(bool and [T("Obsolete")] or [current.messages["NONE"]])[0],
default=False,
readable=False,
writable=False),
s3_comments(),
*s3_meta_fields())
# CRUD strings
ADD_AIRPORT = T("Create Airport")
crud_strings[tablename] = Storage(
label_create=T("Create Airport"),
title_display=T("Airport Details"),
title_list=T("Airports"),
title_update=T("Edit Airport"),
title_upload=T("Import Airports"),
label_list_button=T("List Airports"),
label_delete_button=T("Delete Airport"),
msg_record_created=T("Airport added"),
msg_record_modified=T("Airport updated"),
msg_record_deleted=T("Airport deleted"),
msg_list_empty=T("No Airports currently registered"))
configure(tablename,
onaccept = self.transport_airport_onaccept,
super_entity = "org_site",
)
# ---------------------------------------------------------------------
# Heliports
#
tablename = "transport_heliport"
define_table(tablename,
super_link("site_id", "org_site"),
Field("name", notnull=True,
length=64, # Mayon Compatibility
label=T("Name")),
Field("code",
length=10,
# Deployments that don't wants office codes can hide them
#readable=False,
#writable=False,
# Mayon compatibility
# @ToDo: Deployment Setting to add validator to make these unique
#notnull=True,
#unique=True,
label=T("Code")),
organisation_id(),
location_id(),
Field("obsolete", "boolean",
label=T("Obsolete"),
represent=lambda opt: \
(opt and [T("Obsolete")] or [current.messages["NONE"]])[0],
default=False,
readable=False,
writable=False),
s3_comments(),
*s3_meta_fields())
# CRUD strings
ADD_HELIPORT = T("Create Heliport")
crud_strings[tablename] = Storage(
label_create=T("Create Heliport"),
title_display=T("Heliport Details"),
title_list=T("Heliports"),
title_update=T("Edit Heliport"),
title_upload=T("Import Heliports"),
label_list_button=T("List Heliports"),
label_delete_button=T("Delete Heliport"),
msg_record_created=T("Heliport added"),
msg_record_modified=T("Heliport updated"),
msg_record_deleted=T("Heliport deleted"),
msg_list_empty=T("No Heliports currently registered"))
configure(tablename,
onaccept = self.transport_heliport_onaccept,
super_entity = "org_site",
)
# ---------------------------------------------------------------------
# Seaports
#
ownership_opts = {
1: T("Public"),
2: T("Private")
}
unit_opts = {
1: T("ft"),
2: T("m")
}
tablename = "transport_seaport"
define_table(tablename,
super_link("site_id", "org_site"),
Field("name", notnull=True,
length=64, # Mayon Compatibility
label=T("Name")),
Field("code",
length=10,
# Deployments that don't wants office codes can hide them
#readable=False,
#writable=False,
# Mayon compatibility
# @ToDo: Deployment Setting to add validator to make these unique
#notnull=True,
#unique=True,
label=T("Code")),
Field("ownership_type", "integer",
requires = IS_IN_SET(ownership_opts, zero=None),
default = 1,
label = T("Ownership"),
represent = lambda opt: \
ownership_opts.get(opt, UNKNOWN_OPT)),
Field("max_height", "double",
label=T("Max Height")),
Field("max_height_units", "integer",
requires = IS_IN_SET(unit_opts, zero=None),
default = 1,
label = T("Units"),
represent = lambda opt: \
unit_opts.get(opt, UNKNOWN_OPT)),
Field("roll_on_off", "boolean",
default=False,
represent=lambda opt: \
(opt and [T("Yes")] or [T("No")])[0],
label=T("Roll On Roll Off Berth")),
Field("cargo_pier_depth", "double",
label=T("Cargo Pier Depth")),
Field("cargo_pier_depth_units", "integer",
requires = IS_IN_SET(unit_opts, zero=None),
default = 1,
label = T("Units"),
represent = lambda opt: \
unit_opts.get(opt, UNKNOWN_OPT)),
Field("oil_terminal_depth", "double",
label=T("Oil Terminal Depth")),
Field("oil_terminal_depth_units", "integer",
requires = IS_IN_SET(unit_opts, zero=None),
default = 1,
label = T("Units"),
represent = lambda opt: \
unit_opts.get(opt, UNKNOWN_OPT)),
Field("dry_dock", "boolean",
default=False,
represent=lambda opt: \
(opt and [T("Yes")] or [T("No")])[0],
label=T("Dry Dock")),
Field("vessel_max_length", "double",
label=T("Vessel Max Length")),
Field("vessel_max_length_units", "integer",
requires = IS_IN_SET(unit_opts, zero=None),
default = 1,
label = T("Units"),
represent = lambda opt: \
unit_opts.get(opt, UNKNOWN_OPT)),
Field("repairs", "text",
label=T("Repairs")),
Field ("shelter", "text",
label=T("Shelter")),
Field("warehouse_capacity", "double",
label=T("Warehousing Storage Capacity")),
Field("secure_storage_capacity", "double",
label=T("Secure Storage Capacity")),
Field("customs_warehouse_capacity", "double",
label=T("Customs Warehousing Storage Capacity")),
Field("tugs", "integer",
label=T("Number of Tugboats")),
Field("tug_capacity", "double",
label=T("Tugboat Capacity")),
Field("barges", "integer",
label=T("Number of Barges")),
Field("barge_capacity", "double",
label=T("Barge Capacity")),
Field("loading_equipment", "text",
label=T("Loading Equipment")),
Field("customs_capacity", "text",
label=T("Customs Capacity")),
Field("security", "text",
label=T("Security")),
Field("high_tide_depth", "double",
label=T("High Tide Depth")),
Field("high_tide_depth_units", "integer",
requires = IS_IN_SET(unit_opts, zero=None),
default = 1,
label = T("Units"),
represent = lambda opt: \
unit_opts.get(opt, UNKNOWN_OPT)),
Field("low_tide_depth", "double",
label=T("Low Tide Depth")),
Field("low_tide_depth_units", "integer",
requires = IS_IN_SET(unit_opts, zero=None),
default = 1,
label = T("Units"),
represent = lambda opt: \
unit_opts.get(opt, UNKNOWN_OPT)),
Field("flood_depth", "double",
label=T("Flood Depth")),
Field("flood_depth_units", "integer",
requires = IS_IN_SET(unit_opts, zero=None),
default = 1,
label = T("Units"),
represent = lambda opt: \
unit_opts.get(opt, UNKNOWN_OPT)),
organisation_id(),
location_id(),
Field("obsolete", "boolean",
label=T("Obsolete"),
represent=lambda opt: \
(opt and [T("Closed")] or [T("Operational")])[0],
default=False,
),
s3_comments(),
*s3_meta_fields())
# CRUD strings
ADD_SEAPORT = T("Create Seaport")
crud_strings[tablename] = Storage(
label_create=T("Create Seaport"),
title_display=T("Seaport Details"),
title_list=T("Seaports"),
title_update=T("Edit Seaport"),
title_upload=T("Import Seaports"),
label_list_button=T("List Seaports"),
label_delete_button=T("Delete Seaport"),
msg_record_created=T("Seaport added"),
msg_record_modified=T("Seaport updated"),
msg_record_deleted=T("Seaport deleted"),
msg_list_empty=T("No Seaports currently registered"))
configure(tablename,
onaccept = self.transport_seaport_onaccept,
super_entity = "org_site",
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return dict()
# -------------------------------------------------------------------------
@staticmethod
def transport_airport_onaccept(form):
"""
Update Affiliation, record ownership and component ownership
"""
current.s3db.org_update_affiliations("transport_airport", form.vars)
# -------------------------------------------------------------------------
@staticmethod
def transport_heliport_onaccept(form):
"""
Update Affiliation, record ownership and component ownership
"""
current.s3db.org_update_affiliations("transport_heliport", form.vars)
# -------------------------------------------------------------------------
@staticmethod
def transport_seaport_onaccept(form):
"""
Update Affiliation, record ownership and component ownership
"""
current.s3db.org_update_affiliations("transport_seaport", form.vars)
# END =========================================================================
|
|
"""Test to verify that Home Assistant core works."""
# pylint: disable=protected-access
import asyncio
import unittest
from unittest.mock import patch, MagicMock
from datetime import datetime, timedelta
import pytz
import homeassistant.core as ha
from homeassistant.exceptions import InvalidEntityFormatError
from homeassistant.util.async import (
run_callback_threadsafe, run_coroutine_threadsafe)
import homeassistant.util.dt as dt_util
from homeassistant.util.unit_system import (METRIC_SYSTEM)
from homeassistant.const import (
__version__, EVENT_STATE_CHANGED, ATTR_FRIENDLY_NAME, CONF_UNIT_SYSTEM)
from tests.common import get_test_home_assistant
PST = pytz.timezone('America/Los_Angeles')
def test_split_entity_id():
"""Test split_entity_id."""
assert ha.split_entity_id('domain.object_id') == ['domain', 'object_id']
def test_async_add_job_schedule_callback():
"""Test that we schedule coroutines and add jobs to the job pool."""
hass = MagicMock()
job = MagicMock()
ha.HomeAssistant.async_add_job(hass, ha.callback(job))
assert len(hass.loop.call_soon.mock_calls) == 1
assert len(hass.loop.create_task.mock_calls) == 0
assert len(hass.add_job.mock_calls) == 0
@patch('asyncio.iscoroutinefunction', return_value=True)
def test_async_add_job_schedule_coroutinefunction(mock_iscoro):
"""Test that we schedule coroutines and add jobs to the job pool."""
hass = MagicMock()
job = MagicMock()
ha.HomeAssistant.async_add_job(hass, job)
assert len(hass.loop.call_soon.mock_calls) == 0
assert len(hass.loop.create_task.mock_calls) == 1
assert len(hass.add_job.mock_calls) == 0
@patch('asyncio.iscoroutinefunction', return_value=False)
def test_async_add_job_add_threaded_job_to_pool(mock_iscoro):
"""Test that we schedule coroutines and add jobs to the job pool."""
hass = MagicMock()
job = MagicMock()
ha.HomeAssistant.async_add_job(hass, job)
assert len(hass.loop.call_soon.mock_calls) == 0
assert len(hass.loop.create_task.mock_calls) == 0
assert len(hass.loop.run_in_executor.mock_calls) == 1
def test_async_run_job_calls_callback():
"""Test that the callback annotation is respected."""
hass = MagicMock()
calls = []
def job():
calls.append(1)
ha.HomeAssistant.async_run_job(hass, ha.callback(job))
assert len(calls) == 1
assert len(hass.async_add_job.mock_calls) == 0
def test_async_run_job_delegates_non_async():
"""Test that the callback annotation is respected."""
hass = MagicMock()
calls = []
def job():
calls.append(1)
ha.HomeAssistant.async_run_job(hass, job)
assert len(calls) == 0
assert len(hass.async_add_job.mock_calls) == 1
class TestHomeAssistant(unittest.TestCase):
"""Test the Home Assistant core classes."""
# pylint: disable=invalid-name
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
# pylint: disable=invalid-name
def tearDown(self):
"""Stop everything that was started."""
self.hass.stop()
# This test hangs on `loop.add_signal_handler`
# def test_start_and_sigterm(self):
# """Start the test."""
# calls = []
# self.hass.bus.listen_once(EVENT_HOMEASSISTANT_START,
# lambda event: calls.append(1))
# self.hass.start()
# self.assertEqual(1, len(calls))
# self.hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP,
# lambda event: calls.append(1))
# os.kill(os.getpid(), signal.SIGTERM)
# self.hass.block_till_done()
# self.assertEqual(1, len(calls))
def test_pending_sheduler(self):
"""Add a coro to pending tasks."""
call_count = []
@asyncio.coroutine
def test_coro():
"""Test Coro."""
call_count.append('call')
for i in range(50):
self.hass.add_job(test_coro())
run_coroutine_threadsafe(
asyncio.wait(self.hass._pending_tasks, loop=self.hass.loop),
loop=self.hass.loop
).result()
with patch.object(self.hass.loop, 'call_later') as mock_later:
run_callback_threadsafe(
self.hass.loop, self.hass._async_tasks_cleanup).result()
assert mock_later.called
assert len(self.hass._pending_tasks) == 0
assert len(call_count) == 50
def test_async_add_job_pending_tasks_coro(self):
"""Add a coro to pending tasks."""
call_count = []
@asyncio.coroutine
def test_coro():
"""Test Coro."""
call_count.append('call')
for i in range(2):
self.hass.add_job(test_coro())
@asyncio.coroutine
def wait_finish_callback():
"""Wait until all stuff is scheduled."""
yield from asyncio.sleep(0, loop=self.hass.loop)
yield from asyncio.sleep(0, loop=self.hass.loop)
run_coroutine_threadsafe(
wait_finish_callback(), self.hass.loop).result()
assert len(self.hass._pending_tasks) == 2
self.hass.block_till_done()
assert len(call_count) == 2
def test_async_add_job_pending_tasks_executor(self):
"""Run a executor in pending tasks."""
call_count = []
def test_executor():
"""Test executor."""
call_count.append('call')
@asyncio.coroutine
def wait_finish_callback():
"""Wait until all stuff is scheduled."""
yield from asyncio.sleep(0, loop=self.hass.loop)
yield from asyncio.sleep(0, loop=self.hass.loop)
for i in range(2):
self.hass.add_job(test_executor)
run_coroutine_threadsafe(
wait_finish_callback(), self.hass.loop).result()
assert len(self.hass._pending_tasks) == 2
self.hass.block_till_done()
assert len(call_count) == 2
def test_async_add_job_pending_tasks_callback(self):
"""Run a callback in pending tasks."""
call_count = []
@ha.callback
def test_callback():
"""Test callback."""
call_count.append('call')
@asyncio.coroutine
def wait_finish_callback():
"""Wait until all stuff is scheduled."""
yield from asyncio.sleep(0, loop=self.hass.loop)
yield from asyncio.sleep(0, loop=self.hass.loop)
for i in range(2):
self.hass.add_job(test_callback)
run_coroutine_threadsafe(
wait_finish_callback(), self.hass.loop).result()
self.hass.block_till_done()
assert len(self.hass._pending_tasks) == 0
assert len(call_count) == 2
class TestEvent(unittest.TestCase):
"""A Test Event class."""
def test_eq(self):
"""Test events."""
now = dt_util.utcnow()
data = {'some': 'attr'}
event1, event2 = [
ha.Event('some_type', data, time_fired=now)
for _ in range(2)
]
self.assertEqual(event1, event2)
def test_repr(self):
"""Test that repr method works."""
self.assertEqual(
"<Event TestEvent[L]>",
str(ha.Event("TestEvent")))
self.assertEqual(
"<Event TestEvent[R]: beer=nice>",
str(ha.Event("TestEvent",
{"beer": "nice"},
ha.EventOrigin.remote)))
def test_as_dict(self):
"""Test as dictionary."""
event_type = 'some_type'
now = dt_util.utcnow()
data = {'some': 'attr'}
event = ha.Event(event_type, data, ha.EventOrigin.local, now)
expected = {
'event_type': event_type,
'data': data,
'origin': 'LOCAL',
'time_fired': now,
}
self.assertEqual(expected, event.as_dict())
class TestEventBus(unittest.TestCase):
"""Test EventBus methods."""
# pylint: disable=invalid-name
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.bus = self.hass.bus
# pylint: disable=invalid-name
def tearDown(self):
"""Stop down stuff we started."""
self.hass.stop()
def test_add_remove_listener(self):
"""Test remove_listener method."""
self.hass.allow_pool = False
old_count = len(self.bus.listeners)
def listener(_): pass
unsub = self.bus.listen('test', listener)
self.assertEqual(old_count + 1, len(self.bus.listeners))
# Remove listener
unsub()
self.assertEqual(old_count, len(self.bus.listeners))
# Should do nothing now
unsub()
def test_unsubscribe_listener(self):
"""Test unsubscribe listener from returned function."""
calls = []
@ha.callback
def listener(event):
"""Mock listener."""
calls.append(event)
unsub = self.bus.listen('test', listener)
self.bus.fire('test')
self.hass.block_till_done()
assert len(calls) == 1
unsub()
self.bus.fire('event')
self.hass.block_till_done()
assert len(calls) == 1
def test_listen_once_event_with_callback(self):
"""Test listen_once_event method."""
runs = []
@ha.callback
def event_handler(event):
runs.append(event)
self.bus.listen_once('test_event', event_handler)
self.bus.fire('test_event')
# Second time it should not increase runs
self.bus.fire('test_event')
self.hass.block_till_done()
self.assertEqual(1, len(runs))
def test_listen_once_event_with_coroutine(self):
"""Test listen_once_event method."""
runs = []
@asyncio.coroutine
def event_handler(event):
runs.append(event)
self.bus.listen_once('test_event', event_handler)
self.bus.fire('test_event')
# Second time it should not increase runs
self.bus.fire('test_event')
self.hass.block_till_done()
self.assertEqual(1, len(runs))
def test_listen_once_event_with_thread(self):
"""Test listen_once_event method."""
runs = []
def event_handler(event):
runs.append(event)
self.bus.listen_once('test_event', event_handler)
self.bus.fire('test_event')
# Second time it should not increase runs
self.bus.fire('test_event')
self.hass.block_till_done()
self.assertEqual(1, len(runs))
def test_thread_event_listener(self):
"""Test a event listener listeners."""
thread_calls = []
def thread_listener(event):
thread_calls.append(event)
self.bus.listen('test_thread', thread_listener)
self.bus.fire('test_thread')
self.hass.block_till_done()
assert len(thread_calls) == 1
def test_callback_event_listener(self):
"""Test a event listener listeners."""
callback_calls = []
@ha.callback
def callback_listener(event):
callback_calls.append(event)
self.bus.listen('test_callback', callback_listener)
self.bus.fire('test_callback')
self.hass.block_till_done()
assert len(callback_calls) == 1
def test_coroutine_event_listener(self):
"""Test a event listener listeners."""
coroutine_calls = []
@asyncio.coroutine
def coroutine_listener(event):
coroutine_calls.append(event)
self.bus.listen('test_coroutine', coroutine_listener)
self.bus.fire('test_coroutine')
self.hass.block_till_done()
assert len(coroutine_calls) == 1
class TestState(unittest.TestCase):
"""Test State methods."""
def test_init(self):
"""Test state.init."""
self.assertRaises(
InvalidEntityFormatError, ha.State,
'invalid_entity_format', 'test_state')
def test_domain(self):
"""Test domain."""
state = ha.State('some_domain.hello', 'world')
self.assertEqual('some_domain', state.domain)
def test_object_id(self):
"""Test object ID."""
state = ha.State('domain.hello', 'world')
self.assertEqual('hello', state.object_id)
def test_name_if_no_friendly_name_attr(self):
"""Test if there is no friendly name."""
state = ha.State('domain.hello_world', 'world')
self.assertEqual('hello world', state.name)
def test_name_if_friendly_name_attr(self):
"""Test if there is a friendly name."""
name = 'Some Unique Name'
state = ha.State('domain.hello_world', 'world',
{ATTR_FRIENDLY_NAME: name})
self.assertEqual(name, state.name)
def test_dict_conversion(self):
"""Test conversion of dict."""
state = ha.State('domain.hello', 'world', {'some': 'attr'})
self.assertEqual(state, ha.State.from_dict(state.as_dict()))
def test_dict_conversion_with_wrong_data(self):
"""Test conversion with wrong data."""
self.assertIsNone(ha.State.from_dict(None))
self.assertIsNone(ha.State.from_dict({'state': 'yes'}))
self.assertIsNone(ha.State.from_dict({'entity_id': 'yes'}))
def test_repr(self):
"""Test state.repr."""
self.assertEqual("<state happy.happy=on @ 1984-12-08T12:00:00+00:00>",
str(ha.State(
"happy.happy", "on",
last_changed=datetime(1984, 12, 8, 12, 0, 0))))
self.assertEqual(
"<state happy.happy=on; brightness=144 @ "
"1984-12-08T12:00:00+00:00>",
str(ha.State("happy.happy", "on", {"brightness": 144},
datetime(1984, 12, 8, 12, 0, 0))))
class TestStateMachine(unittest.TestCase):
"""Test State machine methods."""
# pylint: disable=invalid-name
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.states = self.hass.states
self.states.set("light.Bowl", "on")
self.states.set("switch.AC", "off")
# pylint: disable=invalid-name
def tearDown(self):
"""Stop down stuff we started."""
self.hass.stop()
def test_is_state(self):
"""Test is_state method."""
self.assertTrue(self.states.is_state('light.Bowl', 'on'))
self.assertFalse(self.states.is_state('light.Bowl', 'off'))
self.assertFalse(self.states.is_state('light.Non_existing', 'on'))
def test_is_state_attr(self):
"""Test is_state_attr method."""
self.states.set("light.Bowl", "on", {"brightness": 100})
self.assertTrue(
self.states.is_state_attr('light.Bowl', 'brightness', 100))
self.assertFalse(
self.states.is_state_attr('light.Bowl', 'friendly_name', 200))
self.assertFalse(
self.states.is_state_attr('light.Bowl', 'friendly_name', 'Bowl'))
self.assertFalse(
self.states.is_state_attr('light.Non_existing', 'brightness', 100))
def test_entity_ids(self):
"""Test get_entity_ids method."""
ent_ids = self.states.entity_ids()
self.assertEqual(2, len(ent_ids))
self.assertTrue('light.bowl' in ent_ids)
self.assertTrue('switch.ac' in ent_ids)
ent_ids = self.states.entity_ids('light')
self.assertEqual(1, len(ent_ids))
self.assertTrue('light.bowl' in ent_ids)
def test_all(self):
"""Test everything."""
states = sorted(state.entity_id for state in self.states.all())
self.assertEqual(['light.bowl', 'switch.ac'], states)
def test_remove(self):
"""Test remove method."""
events = []
@ha.callback
def callback(event):
events.append(event)
self.hass.bus.listen(EVENT_STATE_CHANGED, callback)
self.assertIn('light.bowl', self.states.entity_ids())
self.assertTrue(self.states.remove('light.bowl'))
self.hass.block_till_done()
self.assertNotIn('light.bowl', self.states.entity_ids())
self.assertEqual(1, len(events))
self.assertEqual('light.bowl', events[0].data.get('entity_id'))
self.assertIsNotNone(events[0].data.get('old_state'))
self.assertEqual('light.bowl', events[0].data['old_state'].entity_id)
self.assertIsNone(events[0].data.get('new_state'))
# If it does not exist, we should get False
self.assertFalse(self.states.remove('light.Bowl'))
self.hass.block_till_done()
self.assertEqual(1, len(events))
def test_case_insensitivty(self):
"""Test insensitivty."""
runs = []
@ha.callback
def callback(event):
runs.append(event)
self.hass.bus.listen(EVENT_STATE_CHANGED, callback)
self.states.set('light.BOWL', 'off')
self.hass.block_till_done()
self.assertTrue(self.states.is_state('light.bowl', 'off'))
self.assertEqual(1, len(runs))
def test_last_changed_not_updated_on_same_state(self):
"""Test to not update the existing, same state."""
state = self.states.get('light.Bowl')
future = dt_util.utcnow() + timedelta(hours=10)
with patch('homeassistant.util.dt.utcnow', return_value=future):
self.states.set("light.Bowl", "on", {'attr': 'triggers_change'})
self.hass.block_till_done()
state2 = self.states.get('light.Bowl')
assert state2 is not None
assert state.last_changed == state2.last_changed
def test_force_update(self):
"""Test force update option."""
events = []
@ha.callback
def callback(event):
events.append(event)
self.hass.bus.listen(EVENT_STATE_CHANGED, callback)
self.states.set('light.bowl', 'on')
self.hass.block_till_done()
self.assertEqual(0, len(events))
self.states.set('light.bowl', 'on', None, True)
self.hass.block_till_done()
self.assertEqual(1, len(events))
class TestServiceCall(unittest.TestCase):
"""Test ServiceCall class."""
def test_repr(self):
"""Test repr method."""
self.assertEqual(
"<ServiceCall homeassistant.start>",
str(ha.ServiceCall('homeassistant', 'start')))
self.assertEqual(
"<ServiceCall homeassistant.start: fast=yes>",
str(ha.ServiceCall('homeassistant', 'start', {"fast": "yes"})))
class TestServiceRegistry(unittest.TestCase):
"""Test ServicerRegistry methods."""
# pylint: disable=invalid-name
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.services = self.hass.services
@ha.callback
def mock_service(call):
pass
self.services.register("Test_Domain", "TEST_SERVICE", mock_service)
# pylint: disable=invalid-name
def tearDown(self):
"""Stop down stuff we started."""
self.hass.stop()
def test_has_service(self):
"""Test has_service method."""
self.assertTrue(
self.services.has_service("tesT_domaiN", "tesT_servicE"))
self.assertFalse(
self.services.has_service("test_domain", "non_existing"))
self.assertFalse(
self.services.has_service("non_existing", "test_service"))
def test_services(self):
"""Test services."""
expected = {
'test_domain': {'test_service': {'description': '', 'fields': {}}}
}
self.assertEqual(expected, self.services.services)
def test_call_with_blocking_done_in_time(self):
"""Test call with blocking."""
calls = []
@ha.callback
def service_handler(call):
"""Service handler."""
calls.append(call)
self.services.register("test_domain", "register_calls",
service_handler)
self.assertTrue(
self.services.call('test_domain', 'REGISTER_CALLS', blocking=True))
self.assertEqual(1, len(calls))
def test_call_non_existing_with_blocking(self):
"""Test non-existing with blocking."""
prior = ha.SERVICE_CALL_LIMIT
try:
ha.SERVICE_CALL_LIMIT = 0.01
assert not self.services.call('test_domain', 'i_do_not_exist',
blocking=True)
finally:
ha.SERVICE_CALL_LIMIT = prior
def test_async_service(self):
"""Test registering and calling an async service."""
calls = []
@asyncio.coroutine
def service_handler(call):
"""Service handler coroutine."""
calls.append(call)
self.services.register('test_domain', 'register_calls',
service_handler)
self.assertTrue(
self.services.call('test_domain', 'REGISTER_CALLS', blocking=True))
self.hass.block_till_done()
self.assertEqual(1, len(calls))
def test_callback_service(self):
"""Test registering and calling an async service."""
calls = []
@ha.callback
def service_handler(call):
"""Service handler coroutine."""
calls.append(call)
self.services.register('test_domain', 'register_calls',
service_handler)
self.assertTrue(
self.services.call('test_domain', 'REGISTER_CALLS', blocking=True))
self.hass.block_till_done()
self.assertEqual(1, len(calls))
class TestConfig(unittest.TestCase):
"""Test configuration methods."""
# pylint: disable=invalid-name
def setUp(self):
"""Setup things to be run when tests are started."""
self.config = ha.Config()
self.assertIsNone(self.config.config_dir)
def test_path_with_file(self):
"""Test get_config_path method."""
self.config.config_dir = '/tmp/ha-config'
self.assertEqual("/tmp/ha-config/test.conf",
self.config.path("test.conf"))
def test_path_with_dir_and_file(self):
"""Test get_config_path method."""
self.config.config_dir = '/tmp/ha-config'
self.assertEqual("/tmp/ha-config/dir/test.conf",
self.config.path("dir", "test.conf"))
def test_as_dict(self):
"""Test as dict."""
self.config.config_dir = '/tmp/ha-config'
expected = {
'latitude': None,
'longitude': None,
CONF_UNIT_SYSTEM: METRIC_SYSTEM.as_dict(),
'location_name': None,
'time_zone': 'UTC',
'components': [],
'config_dir': '/tmp/ha-config',
'version': __version__,
}
self.assertEqual(expected, self.config.as_dict())
class TestAsyncCreateTimer(object):
"""Test create timer."""
@patch('homeassistant.core.asyncio.Event')
@patch('homeassistant.core.dt_util.utcnow')
def test_create_timer(self, mock_utcnow, mock_event, event_loop):
"""Test create timer fires correctly."""
hass = MagicMock()
now = mock_utcnow()
event = mock_event()
now.second = 1
mock_utcnow.reset_mock()
ha._async_create_timer(hass)
assert len(hass.bus.async_listen_once.mock_calls) == 2
start_timer = hass.bus.async_listen_once.mock_calls[1][1][1]
event_loop.run_until_complete(start_timer(None))
assert hass.loop.create_task.called
timer = hass.loop.create_task.mock_calls[0][1][0]
event.is_set.side_effect = False, False, True
event_loop.run_until_complete(timer)
assert len(mock_utcnow.mock_calls) == 1
assert hass.loop.call_soon.called
event_type, event_data = hass.loop.call_soon.mock_calls[0][1][1:]
assert ha.EVENT_TIME_CHANGED == event_type
assert {ha.ATTR_NOW: now} == event_data
stop_timer = hass.bus.async_listen_once.mock_calls[0][1][1]
stop_timer(None)
assert event.set.called
|
|
import numpy as nm
from sfepy.base.base import assert_
from sfepy.linalg import dot_sequences
from sfepy.terms.terms import Term, terms
from sfepy.terms.terms_th import THTerm, ETHTerm
class DotProductVolumeTerm(Term):
r"""
Volume :math:`L^2(\Omega)` weighted dot product for both scalar and vector
fields. Can be evaluated. Can use derivatives.
:Definition:
.. math::
\int_\Omega q p \mbox{ , } \int_\Omega \ul{v} \cdot \ul{u}
\mbox{ , }
\int_\Omega p r \mbox{ , } \int_\Omega \ul{u} \cdot \ul{w} \\
\int_\Omega c q p \mbox{ , } \int_\Omega c \ul{v} \cdot \ul{u}
\mbox{ , }
\int_\Omega c p r \mbox{ , } \int_\Omega c \ul{u} \cdot \ul{w} \\
\int_\Omega \ul{v} \cdot (\ull{M} \ul{u})
\mbox{ , }
\int_\Omega \ul{u} \cdot (\ull{M} \ul{w})
:Arguments 1:
- material : :math:`c` or :math:`\ull{M}` (optional)
- virtual : :math:`q` or :math:`\ul{v}`
- state : :math:`p` or :math:`\ul{u}`
:Arguments 2:
- material : :math:`c` or :math:`\ull{M}` (optional)
- parameter_1 : :math:`p` or :math:`\ul{u}`
- parameter_2 : :math:`r` or :math:`\ul{w}`
"""
name = 'dw_volume_dot'
arg_types = (('opt_material', 'virtual', 'state'),
('opt_material', 'parameter_1', 'parameter_2'))
arg_shapes = [{'opt_material' : '1, 1', 'virtual' : (1, 'state'),
'state' : 1, 'parameter_1' : 1, 'parameter_2' : 1},
{'opt_material' : None},
{'opt_material' : '1, 1', 'virtual' : ('D', 'state'),
'state' : 'D', 'parameter_1' : 'D', 'parameter_2' : 'D'},
{'opt_material' : 'D, D'},
{'opt_material' : None}]
modes = ('weak', 'eval')
@staticmethod
def dw_dot(out, mat, val_qp, vgeo, sgeo, fun, fmode):
status = fun(out, mat, val_qp, vgeo, sgeo, fmode)
return status
@staticmethod
def d_dot(out, mat, val1_qp, val2_qp, geo):
if mat is None:
if val1_qp.shape[2] > 1:
if val2_qp.shape[2] == 1:
aux = dot_sequences(val1_qp, geo.normal, mode='ATB')
vec = dot_sequences(aux, val2_qp, mode='AB')
else:
vec = dot_sequences(val1_qp, val2_qp, mode='ATB')
else:
vec = val1_qp * val2_qp
elif mat.shape[-1] == 1:
if val1_qp.shape[2] > 1:
vec = mat * dot_sequences(val1_qp, val2_qp, mode='ATB')
else:
vec = mat * val1_qp * val2_qp
else:
aux = dot_sequences(mat, val2_qp, mode='AB')
vec = dot_sequences(val1_qp, aux, mode='ATB')
status = geo.integrate(out, vec)
return status
def get_fargs(self, mat, virtual, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
vgeo, _ = self.get_mapping(virtual)
if mode == 'weak':
if mat is None:
n_cell, n_qp, dim, n_n, n_c = self.get_data_shape(state)
mat = nm.ones((n_cell, n_qp, 1, 1), dtype=nm.float64)
sgeo, _ = self.get_mapping(state)
if diff_var is None:
val_qp = self.get(state, 'val')
fmode = 0
else:
val_qp = nm.array([0], ndmin=4, dtype=nm.float64)
fmode = 1
if state.n_components > 1:
if ((self.integration == 'volume')
or (virtual.n_components > 1)):
fun = terms.dw_volume_dot_vector
else:
fun = terms.dw_surface_s_v_dot_n
else:
if ((self.integration == 'volume')
or (virtual.n_components == 1)):
fun = terms.dw_volume_dot_scalar
else:
fun = terms.dw_surface_v_dot_n_s
return mat, val_qp, vgeo, sgeo, fun, fmode
elif mode == 'eval':
val1_qp = self.get(virtual, 'val')
val2_qp = self.get(state, 'val')
return mat, val1_qp, val2_qp, vgeo
else:
raise ValueError('unsupported evaluation mode in %s! (%s)'
% (self.name, mode))
def get_eval_shape(self, mat, virtual, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_cell, n_qp, dim, n_n, n_c = self.get_data_shape(state)
return (n_cell, 1, 1, 1), state.dtype
def set_arg_types(self):
if self.mode == 'weak':
self.function = self.dw_dot
else:
self.function = self.d_dot
class DotProductSurfaceTerm(DotProductVolumeTerm):
r"""
Surface :math:`L^2(\Gamma)` dot product for both scalar and vector
fields.
:Definition:
.. math::
\int_\Gamma q p \mbox{ , } \int_\Gamma \ul{v} \cdot \ul{u}
\mbox{ , }
\int_\Gamma \ul{v} \cdot \ul{n} p \mbox{ , }
\int_\Gamma q \ul{n} \cdot \ul{u} \mbox{ , }
\int_\Gamma p r \mbox{ , } \int_\Gamma \ul{u} \cdot \ul{w}
\mbox{ , } \int_\Gamma \ul{w} \cdot \ul{n} p \\
\int_\Gamma c q p \mbox{ , } \int_\Gamma c \ul{v} \cdot \ul{u}
\mbox{ , }
\int_\Gamma c p r \mbox{ , } \int_\Gamma c \ul{u} \cdot \ul{w} \\
\int_\Gamma \ul{v} \cdot \ull{M} \cdot \ul{u}
\mbox{ , }
\int_\Gamma \ul{u} \cdot \ull{M} \cdot \ul{w}
:Arguments 1:
- material : :math:`c` or :math:`\ull{M}` (optional)
- virtual : :math:`q` or :math:`\ul{v}`
- state : :math:`p` or :math:`\ul{u}`
:Arguments 2:
- material : :math:`c` or :math:`\ull{M}` (optional)
- parameter_1 : :math:`p` or :math:`\ul{u}`
- parameter_2 : :math:`r` or :math:`\ul{w}`
"""
name = 'dw_surface_dot'
arg_types = (('opt_material', 'virtual', 'state'),
('opt_material', 'parameter_1', 'parameter_2'))
arg_shapes = [{'opt_material' : '1, 1', 'virtual' : (1, 'state'),
'state' : 1, 'parameter_1' : 1, 'parameter_2' : 1},
{'opt_material' : None},
{'opt_material' : '1, 1', 'virtual' : (1, None),
'state' : 'D'},
{'opt_material' : None},
{'opt_material' : '1, 1', 'virtual' : ('D', None),
'state' : 1},
{'opt_material' : None},
{'opt_material' : '1, 1', 'virtual' : ('D', 'state'),
'state' : 'D', 'parameter_1' : 'D', 'parameter_2' : 'D'},
{'opt_material' : 'D, D'},
{'opt_material' : None}]
modes = ('weak', 'eval')
integration = 'surface'
class BCNewtonTerm(DotProductSurfaceTerm):
r"""
Newton boundary condition term.
:Definition:
.. math::
\int_{\Gamma} \alpha q (p - p_{\rm outer})
:Arguments:
- material_1 : :math:`\alpha`
- material_2 : :math:`p_{\rm outer}`
- virtual : :math:`q`
- state : :math:`p`
"""
name = 'dw_bc_newton'
arg_types = ('material_1', 'material_2', 'virtual', 'state')
arg_shapes = {'material_1' : '1, 1', 'material_2' : '1, 1',
'virtual' : (1, 'state'), 'state' : 1}
mode = 'weak'
def get_fargs(self, alpha, p_outer, virtual, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
fargs = DotProductSurfaceTerm.get_fargs(self, alpha, virtual, state,
mode, term_mode, diff_var,
**kwargs)
fargs = fargs[:1] + (fargs[1] - p_outer,) + fargs[2:]
return fargs
class DotSProductVolumeOperatorWTHTerm(THTerm):
r"""
Fading memory volume :math:`L^2(\Omega)` weighted dot product for
scalar fields. Can use derivatives.
:Definition:
.. math::
\int_\Omega \left [\int_0^t \Gcal(t-\tau) p(\tau) \difd{\tau} \right] q
:Arguments:
- ts : :class:`TimeStepper` instance
- material : :math:`\Gcal(\tau)`
- virtual : :math:`q`
- state : :math:`p`
"""
name = 'dw_volume_dot_w_scalar_th'
arg_types = ('ts', 'material', 'virtual', 'state')
arg_shapes = {'material' : '.: N, 1, 1',
'virtual' : (1, 'state'), 'state' : 1}
function = staticmethod(terms.dw_volume_dot_scalar)
def get_fargs(self, ts, mats, virtual, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(state)
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(state)
if diff_var is None:
def iter_kernel():
for ii, mat in enumerate(mats):
val_qp = self.get(state, 'val', step=-ii)
mat = nm.tile(mat, (n_el, n_qp, 1, 1))
yield ii, (ts.dt * mat, val_qp, vg, vg, 0)
fargs = iter_kernel
else:
val_qp = nm.array([0], ndmin=4, dtype=nm.float64)
mat = nm.tile(mats[0], (n_el, n_qp, 1, 1))
fargs = ts.dt * mat, val_qp, vg, vg, 1
return fargs
class DotSProductVolumeOperatorWETHTerm(ETHTerm):
r"""
Fading memory volume :math:`L^2(\Omega)` weighted dot product for
scalar fields. This term has the same definition as
dw_volume_dot_w_scalar_th, but assumes an exponential approximation of
the convolution kernel resulting in much higher efficiency. Can use
derivatives.
:Definition:
.. math::
\int_\Omega \left [\int_0^t \Gcal(t-\tau) p(\tau) \difd{\tau} \right] q
:Arguments:
- ts : :class:`TimeStepper` instance
- material_0 : :math:`\Gcal(0)`
- material_1 : :math:`\exp(-\lambda \Delta t)` (decay at :math:`t_1`)
- virtual : :math:`q`
- state : :math:`p`
"""
name = 'dw_volume_dot_w_scalar_eth'
arg_types = ('ts', 'material_0', 'material_1', 'virtual', 'state')
arg_shapes = {'material_0' : '1, 1', 'material_1' : '1, 1',
'virtual' : (1, 'state'), 'state' : 1}
function = staticmethod(terms.dw_volume_dot_scalar)
def get_fargs(self, ts, mat0, mat1, virtual, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _, key = self.get_mapping(state, return_key=True)
if diff_var is None:
val_qp = self.get(state, 'val')
key += tuple(self.arg_names[ii] for ii in [1, 2, 4])
data = self.get_eth_data(key, state, mat1, val_qp)
fargs = (ts.dt * mat0, data.history + data.values, vg, vg, 0)
else:
aux = nm.array([0], ndmin=4, dtype=nm.float64)
fargs = ts.dt * mat0, aux, vg, vg, 1
return fargs
class VectorDotGradScalarTerm(Term):
r"""
Volume dot product of a vector and a gradient of scalar.
Can be evaluated.
:Definition:
.. math::
\int_{\Omega} \ul{v} \cdot \nabla p \mbox{ , }
\int_{\Omega} \ul{u} \cdot \nabla q \\
\int_{\Omega} c \ul{v} \cdot \nabla p \mbox{ , }
\int_{\Omega} c \ul{u} \cdot \nabla q \\
\int_{\Omega} \ul{v} \cdot (\ull{M} \nabla p) \mbox{ , }
\int_{\Omega} \ul{u} \cdot (\ull{M} \nabla q)
:Arguments 1:
- material : :math:`c` or :math:`\ull{M}` (optional)
- virtual : :math:`\ul{v}`
- state : :math:`p`
:Arguments 2:
- material : :math:`c` or :math:`\ull{M}` (optional)
- state : :math:`\ul{u}`
- virtual : :math:`q`
:Arguments 3:
- material : :math:`c` or :math:`\ull{M}` (optional)
- parameter_v : :math:`\ul{u}`
- parameter_s : :math:`p`
"""
name = 'dw_v_dot_grad_s'
arg_types = (('opt_material', 'virtual', 'state'),
('opt_material', 'state', 'virtual'),
('opt_material', 'parameter_v', 'parameter_s'))
arg_shapes = [{'opt_material' : '1, 1',
'virtual/v_weak' : ('D', None), 'state/v_weak' : 1,
'virtual/s_weak' : (1, None), 'state/s_weak' : 'D',
'parameter_v' : 'D', 'parameter_s' : 1},
{'opt_material' : 'D, D'},
{'opt_material' : None}]
modes = ('v_weak', 's_weak', 'eval')
def get_fargs(self, coef, vvar, svar,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(vvar)
if coef is None:
coef = nm.ones((1, n_qp, 1, 1), dtype=nm.float64)
if mode == 'weak':
if self.mode == 'v_weak':
qp_var, qp_name = svar, 'grad'
else:
qp_var, qp_name = vvar, 'val'
vvg, _ = self.get_mapping(vvar)
svg, _ = self.get_mapping(svar)
if diff_var is None:
val_qp = self.get(qp_var, qp_name)
fmode = 0
else:
val_qp = nm.array([0], ndmin=4, dtype=nm.float64)
fmode = 1
return coef, val_qp, vvg, svg, fmode
elif mode == 'eval':
vvg, _ = self.get_mapping(vvar)
grad = self.get(svar, 'grad')
val = self.get(vvar, 'val')
return coef, grad, val, vvg
else:
raise ValueError('unsupported evaluation mode in %s! (%s)'
% (self.name, mode))
def get_eval_shape(self, coef, vvar, svar,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(vvar)
return (n_el, 1, 1, 1), vvar.dtype
def set_arg_types(self):
self.function = {
'v_weak' : terms.dw_v_dot_grad_s_vw,
's_weak' : terms.dw_v_dot_grad_s_sw,
'eval' : DotProductVolumeTerm.d_dot,
}[self.mode]
class VectorDotScalarTerm(Term):
r"""
Volume dot product of a vector and a scalar.
Can be evaluated.
:Definition:
.. math::
\int_{\Omega} \ul{v} \cdot \ul{m} p \mbox{ , }
\int_{\Omega} \ul{u} \cdot \ul{m} q\\
:Arguments 1:
- material : :math:`\ul{m}`
- virtual : :math:`\ul{v}`
- state : :math:`p`
:Arguments 2:
- material : :math:`\ul{m}`
- state : :math:`\ul{u}`
- virtual : :math:`q`
:Arguments 3:
- material : :math:`\ul{m}`
- parameter_v : :math:`\ul{u}`
- parameter_s : :math:`p`
"""
name = 'dw_vm_dot_s'
arg_types = (('material', 'virtual', 'state'),
('material', 'state', 'virtual'),
('material', 'parameter_v', 'parameter_s'))
arg_shapes = [{'material' : 'D, 1',
'virtual/v_weak' : ('D', None), 'state/v_weak' : 1,
'virtual/s_weak' : (1, None), 'state/s_weak' : 'D',
'parameter_v' : 'D', 'parameter_s' : 1}]
modes = ('v_weak', 's_weak', 'eval')
@staticmethod
def dw_dot(out, mat, val_qp, bfve, bfsc, geo, fmode):
nel, nqp, dim, nc = mat.shape
nen = bfve.shape[3]
status1 = 0
if fmode in [0, 1, 3]:
aux = nm.zeros((nel, nqp, dim * nen, nc), dtype=nm.float64)
status1 = terms.actBfT(aux, bfve, mat)
if fmode == 0:
status2 = terms.mulAB_integrate(out, aux, val_qp, geo, 'AB')
if fmode == 1:
status2 = terms.mulAB_integrate(out, aux, bfsc, geo, 'AB')
if fmode == 2:
aux = (bfsc * dot_sequences(mat, val_qp,
mode='ATB')).transpose((0,1,3,2))
status2 = geo.integrate(out, nm.ascontiguousarray(aux))
if fmode == 3:
status2 = terms.mulAB_integrate(out, bfsc, aux, geo, 'ATBT')
return status1 and status2
@staticmethod
def d_dot(out, mat, val1_qp, val2_qp, geo):
v1, v2 = (val1_qp, val2_qp) if val1_qp.shape[2] > 1 \
else (val2_qp, val1_qp)
aux = dot_sequences(v1, mat, mode='ATB')
vec = dot_sequences(aux, v2, mode='AB')
status = geo.integrate(out, vec)
return status
def get_fargs(self, coef, vvar, svar,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(vvar)
coef = coef.reshape(coef.shape[:2] + (dim, 1))
if mode == 'weak':
vgv, _ = self.get_mapping(vvar)
vgs, _ = self.get_mapping(svar)
bfve = vgv.bf
bfsc = vgs.bf
if self.mode == 'v_weak':
qp_var, geo, fmode = svar, vgv, 0
else:
qp_var, geo, fmode = vvar, vgs, 2
bfve, bfsc = bfsc, bfve
if diff_var is None:
val_qp = self.get(qp_var, 'val')
else:
val_qp = (nm.array([0], ndmin=4, dtype=nm.float64), 1)
fmode += 1
return coef, val_qp, bfve, bfsc, geo, fmode
elif mode == 'eval':
vvg, _ = self.get_mapping(vvar)
vals = self.get(svar, 'val')
valv = self.get(vvar, 'val')
return coef, vals, valv, vvg
else:
raise ValueError('unsupported evaluation mode in %s! (%s)'
% (self.name, mode))
def get_eval_shape(self, coef, vvar, svar,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(vvar)
return (n_el, 1, 1, 1), vvar.dtype
def set_arg_types(self):
self.function = {
'v_weak' : self.dw_dot,
's_weak' : self.dw_dot,
'eval' : self.d_dot,
}[self.mode]
class ScalarDotGradIScalarTerm(Term):
r"""
Dot product of a scalar and the :math:`i`-th component of gradient of a
scalar. The index should be given as a 'special_constant' material
parameter.
:Definition:
.. math::
Z^i = \int_{\Omega} q \nabla_i p
:Arguments:
- material : :math:`i`
- virtual : :math:`q`
- state : :math:`p`
"""
name = 'dw_s_dot_grad_i_s'
arg_types = ('material', 'virtual', 'state')
arg_shapes = {'material' : '.: 1, 1', 'virtual' : (1, 'state'), 'state' : 1}
@staticmethod
def dw_fun(out, bf, vg, grad, idx, fmode):
cc = nm.ascontiguousarray
bft = cc(nm.tile(bf, (out.shape[0], 1, 1, 1)))
if fmode == 0:
status = terms.mulAB_integrate(out, bft,
cc(grad[..., idx:idx+1, :]), vg,
mode='ATB')
else:
status = terms.mulAB_integrate(out, bft,
cc(vg.bfg[:,:,idx:(idx + 1),:]), vg,
mode='ATB')
return status
def get_fargs(self, material, virtual, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
if mode == 'weak':
if diff_var is None:
grad = self.get(state, 'grad')
fmode = 0
else:
grad = nm.array([0], ndmin=4, dtype=nm.float64)
fmode = 1
vg, _ = self.get_mapping(virtual)
vgs, _ = self.get_mapping(state)
idx = int(material)
return vgs.bf, vg, grad, idx, fmode
else:
raise ValueError('unsupported evaluation mode in %s! (%s)'
% (self.name, mode))
def set_arg_types(self):
self.function = self.dw_fun
class ScalarDotMGradScalarTerm(Term):
r"""
Volume dot product of a scalar gradient dotted with a material vector with
a scalar.
:Definition:
.. math::
\int_{\Omega} q \ul{y} \cdot \nabla p \mbox{ , }
\int_{\Omega} p \ul{y} \cdot \nabla q
:Arguments 1:
- material : :math:`\ul{y}`
- virtual : :math:`q`
- state : :math:`p`
:Arguments 2:
- material : :math:`\ul{y}`
- state : :math:`p`
- virtual : :math:`q`
"""
name = 'dw_s_dot_mgrad_s'
arg_types = (('material', 'virtual', 'state'),
('material', 'state', 'virtual'))
arg_shapes = [{'material' : 'D, 1',
'virtual/grad_state' : (1, None),
'state/grad_state' : 1,
'virtual/grad_virtual' : (1, None),
'state/grad_virtual' : 1}]
modes = ('grad_state', 'grad_virtual')
@staticmethod
def function(out, out_qp, geo, fmode):
status = geo.integrate(out, out_qp)
return status
def get_fargs(self, mat, var1, var2,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg1, _ = self.get_mapping(var1)
vg2, _ = self.get_mapping(var2)
if diff_var is None:
if self.mode == 'grad_state':
geo = vg1
bf_t = vg1.bf.transpose((0, 1, 3, 2))
val_qp = self.get(var2, 'grad')
out_qp = bf_t * dot_sequences(mat, val_qp, 'ATB')
else:
geo = vg2
val_qp = self.get(var1, 'val')
out_qp = dot_sequences(vg2.bfg, mat, 'ATB') * val_qp
fmode = 0
else:
if self.mode == 'grad_state':
geo = vg1
bf_t = vg1.bf.transpose((0, 1, 3, 2))
out_qp = bf_t * dot_sequences(mat, vg2.bfg, 'ATB')
else:
geo = vg2
out_qp = dot_sequences(vg2.bfg, mat, 'ATB') * vg1.bf
fmode = 1
return out_qp, geo, fmode
|
|
"""Test the MySensors config flow."""
from __future__ import annotations
from unittest.mock import patch
import pytest
from homeassistant import config_entries, setup
from homeassistant.components.mysensors.const import (
CONF_BAUD_RATE,
CONF_DEVICE,
CONF_GATEWAY_TYPE,
CONF_GATEWAY_TYPE_MQTT,
CONF_GATEWAY_TYPE_SERIAL,
CONF_GATEWAY_TYPE_TCP,
CONF_PERSISTENCE,
CONF_PERSISTENCE_FILE,
CONF_RETAIN,
CONF_TCP_PORT,
CONF_TOPIC_IN_PREFIX,
CONF_TOPIC_OUT_PREFIX,
CONF_VERSION,
DOMAIN,
ConfGatewayType,
)
from homeassistant.helpers.typing import HomeAssistantType
from tests.common import MockConfigEntry
async def get_form(
hass: HomeAssistantType, gatway_type: ConfGatewayType, expected_step_id: str
):
"""Get a form for the given gateway type."""
await setup.async_setup_component(hass, "persistent_notification", {})
stepuser = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert stepuser["type"] == "form"
assert not stepuser["errors"]
result = await hass.config_entries.flow.async_configure(
stepuser["flow_id"],
{CONF_GATEWAY_TYPE: gatway_type},
)
await hass.async_block_till_done()
assert result["type"] == "form"
assert result["step_id"] == expected_step_id
return result
async def test_config_mqtt(hass: HomeAssistantType):
"""Test configuring a mqtt gateway."""
step = await get_form(hass, CONF_GATEWAY_TYPE_MQTT, "gw_mqtt")
flow_id = step["flow_id"]
with patch(
"homeassistant.components.mysensors.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.mysensors.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
flow_id,
{
CONF_RETAIN: True,
CONF_TOPIC_IN_PREFIX: "bla",
CONF_TOPIC_OUT_PREFIX: "blub",
CONF_VERSION: "2.4",
},
)
await hass.async_block_till_done()
if "errors" in result2:
assert not result2["errors"]
assert result2["type"] == "create_entry"
assert result2["title"] == "mqtt"
assert result2["data"] == {
CONF_DEVICE: "mqtt",
CONF_RETAIN: True,
CONF_TOPIC_IN_PREFIX: "bla",
CONF_TOPIC_OUT_PREFIX: "blub",
CONF_VERSION: "2.4",
CONF_GATEWAY_TYPE: "MQTT",
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_config_serial(hass: HomeAssistantType):
"""Test configuring a gateway via serial."""
step = await get_form(hass, CONF_GATEWAY_TYPE_SERIAL, "gw_serial")
flow_id = step["flow_id"]
with patch( # mock is_serial_port because otherwise the test will be platform dependent (/dev/ttyACMx vs COMx)
"homeassistant.components.mysensors.config_flow.is_serial_port",
return_value=True,
), patch(
"homeassistant.components.mysensors.config_flow.try_connect", return_value=True
), patch(
"homeassistant.components.mysensors.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.mysensors.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
flow_id,
{
CONF_BAUD_RATE: 115200,
CONF_DEVICE: "/dev/ttyACM0",
CONF_VERSION: "2.4",
},
)
await hass.async_block_till_done()
if "errors" in result2:
assert not result2["errors"]
assert result2["type"] == "create_entry"
assert result2["title"] == "/dev/ttyACM0"
assert result2["data"] == {
CONF_DEVICE: "/dev/ttyACM0",
CONF_BAUD_RATE: 115200,
CONF_VERSION: "2.4",
CONF_GATEWAY_TYPE: "Serial",
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_config_tcp(hass: HomeAssistantType):
"""Test configuring a gateway via tcp."""
step = await get_form(hass, CONF_GATEWAY_TYPE_TCP, "gw_tcp")
flow_id = step["flow_id"]
with patch(
"homeassistant.components.mysensors.config_flow.try_connect", return_value=True
), patch(
"homeassistant.components.mysensors.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.mysensors.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
flow_id,
{
CONF_TCP_PORT: 5003,
CONF_DEVICE: "127.0.0.1",
CONF_VERSION: "2.4",
},
)
await hass.async_block_till_done()
if "errors" in result2:
assert not result2["errors"]
assert result2["type"] == "create_entry"
assert result2["title"] == "127.0.0.1"
assert result2["data"] == {
CONF_DEVICE: "127.0.0.1",
CONF_TCP_PORT: 5003,
CONF_VERSION: "2.4",
CONF_GATEWAY_TYPE: "TCP",
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_fail_to_connect(hass: HomeAssistantType):
"""Test configuring a gateway via tcp."""
step = await get_form(hass, CONF_GATEWAY_TYPE_TCP, "gw_tcp")
flow_id = step["flow_id"]
with patch(
"homeassistant.components.mysensors.config_flow.try_connect", return_value=False
), patch(
"homeassistant.components.mysensors.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.mysensors.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
flow_id,
{
CONF_TCP_PORT: 5003,
CONF_DEVICE: "127.0.0.1",
CONF_VERSION: "2.4",
},
)
await hass.async_block_till_done()
assert result2["type"] == "form"
assert "errors" in result2
assert "base" in result2["errors"]
assert result2["errors"]["base"] == "cannot_connect"
assert len(mock_setup.mock_calls) == 0
assert len(mock_setup_entry.mock_calls) == 0
@pytest.mark.parametrize(
"gateway_type, expected_step_id, user_input, err_field, err_string",
[
(
CONF_GATEWAY_TYPE_TCP,
"gw_tcp",
{
CONF_TCP_PORT: 600_000,
CONF_DEVICE: "127.0.0.1",
CONF_VERSION: "2.4",
},
CONF_TCP_PORT,
"port_out_of_range",
),
(
CONF_GATEWAY_TYPE_TCP,
"gw_tcp",
{
CONF_TCP_PORT: 0,
CONF_DEVICE: "127.0.0.1",
CONF_VERSION: "2.4",
},
CONF_TCP_PORT,
"port_out_of_range",
),
(
CONF_GATEWAY_TYPE_TCP,
"gw_tcp",
{
CONF_TCP_PORT: 5003,
CONF_DEVICE: "127.0.0.1",
CONF_VERSION: "a",
},
CONF_VERSION,
"invalid_version",
),
(
CONF_GATEWAY_TYPE_TCP,
"gw_tcp",
{
CONF_TCP_PORT: 5003,
CONF_DEVICE: "127.0.0.1",
CONF_VERSION: "a.b",
},
CONF_VERSION,
"invalid_version",
),
(
CONF_GATEWAY_TYPE_TCP,
"gw_tcp",
{
CONF_TCP_PORT: 5003,
CONF_DEVICE: "127.0.0.1",
},
CONF_VERSION,
"invalid_version",
),
(
CONF_GATEWAY_TYPE_TCP,
"gw_tcp",
{
CONF_TCP_PORT: 5003,
CONF_DEVICE: "127.0.0.1",
CONF_VERSION: "4",
},
CONF_VERSION,
"invalid_version",
),
(
CONF_GATEWAY_TYPE_TCP,
"gw_tcp",
{
CONF_TCP_PORT: 5003,
CONF_DEVICE: "127.0.0.1",
CONF_VERSION: "v3",
},
CONF_VERSION,
"invalid_version",
),
(
CONF_GATEWAY_TYPE_TCP,
"gw_tcp",
{
CONF_TCP_PORT: 5003,
CONF_DEVICE: "127.0.0.",
},
CONF_DEVICE,
"invalid_ip",
),
(
CONF_GATEWAY_TYPE_TCP,
"gw_tcp",
{
CONF_TCP_PORT: 5003,
CONF_DEVICE: "abcd",
},
CONF_DEVICE,
"invalid_ip",
),
(
CONF_GATEWAY_TYPE_MQTT,
"gw_mqtt",
{
CONF_RETAIN: True,
CONF_TOPIC_IN_PREFIX: "bla",
CONF_TOPIC_OUT_PREFIX: "blub",
CONF_PERSISTENCE_FILE: "asdf.zip",
CONF_VERSION: "2.4",
},
CONF_PERSISTENCE_FILE,
"invalid_persistence_file",
),
(
CONF_GATEWAY_TYPE_MQTT,
"gw_mqtt",
{
CONF_RETAIN: True,
CONF_TOPIC_IN_PREFIX: "/#/#",
CONF_TOPIC_OUT_PREFIX: "blub",
CONF_VERSION: "2.4",
},
CONF_TOPIC_IN_PREFIX,
"invalid_subscribe_topic",
),
(
CONF_GATEWAY_TYPE_MQTT,
"gw_mqtt",
{
CONF_RETAIN: True,
CONF_TOPIC_IN_PREFIX: "asdf",
CONF_TOPIC_OUT_PREFIX: "/#/#",
CONF_VERSION: "2.4",
},
CONF_TOPIC_OUT_PREFIX,
"invalid_publish_topic",
),
(
CONF_GATEWAY_TYPE_MQTT,
"gw_mqtt",
{
CONF_RETAIN: True,
CONF_TOPIC_IN_PREFIX: "asdf",
CONF_TOPIC_OUT_PREFIX: "asdf",
CONF_VERSION: "2.4",
},
CONF_TOPIC_OUT_PREFIX,
"same_topic",
),
],
)
async def test_config_invalid(
hass: HomeAssistantType,
gateway_type: ConfGatewayType,
expected_step_id: str,
user_input: dict[str, any],
err_field,
err_string,
):
"""Perform a test that is expected to generate an error."""
step = await get_form(hass, gateway_type, expected_step_id)
flow_id = step["flow_id"]
with patch(
"homeassistant.components.mysensors.config_flow.try_connect", return_value=True
), patch(
"homeassistant.components.mysensors.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.mysensors.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
flow_id,
user_input,
)
await hass.async_block_till_done()
assert result2["type"] == "form"
assert "errors" in result2
assert err_field in result2["errors"]
assert result2["errors"][err_field] == err_string
assert len(mock_setup.mock_calls) == 0
assert len(mock_setup_entry.mock_calls) == 0
@pytest.mark.parametrize(
"user_input",
[
{
CONF_DEVICE: "COM5",
CONF_BAUD_RATE: 57600,
CONF_TCP_PORT: 5003,
CONF_RETAIN: True,
CONF_VERSION: "2.3",
CONF_PERSISTENCE_FILE: "bla.json",
},
{
CONF_DEVICE: "COM5",
CONF_PERSISTENCE_FILE: "bla.json",
CONF_BAUD_RATE: 57600,
CONF_TCP_PORT: 5003,
CONF_VERSION: "2.3",
CONF_PERSISTENCE: False,
CONF_RETAIN: True,
},
{
CONF_DEVICE: "mqtt",
CONF_BAUD_RATE: 115200,
CONF_TCP_PORT: 5003,
CONF_TOPIC_IN_PREFIX: "intopic",
CONF_TOPIC_OUT_PREFIX: "outtopic",
CONF_VERSION: "2.4",
CONF_PERSISTENCE: False,
CONF_RETAIN: False,
},
{
CONF_DEVICE: "127.0.0.1",
CONF_PERSISTENCE_FILE: "blub.pickle",
CONF_BAUD_RATE: 115200,
CONF_TCP_PORT: 343,
CONF_VERSION: "2.4",
CONF_PERSISTENCE: False,
CONF_RETAIN: False,
},
],
)
async def test_import(hass: HomeAssistantType, user_input: dict):
"""Test importing a gateway."""
await setup.async_setup_component(hass, "persistent_notification", {})
with patch("sys.platform", "win32"), patch(
"homeassistant.components.mysensors.config_flow.try_connect", return_value=True
), patch(
"homeassistant.components.mysensors.async_setup_entry",
return_value=True,
):
result = await hass.config_entries.flow.async_init(
DOMAIN, data=user_input, context={"source": config_entries.SOURCE_IMPORT}
)
await hass.async_block_till_done()
assert result["type"] == "create_entry"
@pytest.mark.parametrize(
"first_input, second_input, expected_result",
[
(
{
CONF_DEVICE: "mqtt",
CONF_VERSION: "2.3",
CONF_TOPIC_IN_PREFIX: "same1",
CONF_TOPIC_OUT_PREFIX: "same2",
},
{
CONF_DEVICE: "mqtt",
CONF_VERSION: "2.3",
CONF_TOPIC_IN_PREFIX: "same1",
CONF_TOPIC_OUT_PREFIX: "same2",
},
(CONF_TOPIC_IN_PREFIX, "duplicate_topic"),
),
(
{
CONF_DEVICE: "mqtt",
CONF_VERSION: "2.3",
CONF_TOPIC_IN_PREFIX: "different1",
CONF_TOPIC_OUT_PREFIX: "different2",
},
{
CONF_DEVICE: "mqtt",
CONF_VERSION: "2.3",
CONF_TOPIC_IN_PREFIX: "different3",
CONF_TOPIC_OUT_PREFIX: "different4",
},
None,
),
(
{
CONF_DEVICE: "mqtt",
CONF_VERSION: "2.3",
CONF_TOPIC_IN_PREFIX: "same1",
CONF_TOPIC_OUT_PREFIX: "different2",
},
{
CONF_DEVICE: "mqtt",
CONF_VERSION: "2.3",
CONF_TOPIC_IN_PREFIX: "same1",
CONF_TOPIC_OUT_PREFIX: "different4",
},
(CONF_TOPIC_IN_PREFIX, "duplicate_topic"),
),
(
{
CONF_DEVICE: "mqtt",
CONF_VERSION: "2.3",
CONF_TOPIC_IN_PREFIX: "same1",
CONF_TOPIC_OUT_PREFIX: "different2",
},
{
CONF_DEVICE: "mqtt",
CONF_VERSION: "2.3",
CONF_TOPIC_IN_PREFIX: "different1",
CONF_TOPIC_OUT_PREFIX: "same1",
},
(CONF_TOPIC_OUT_PREFIX, "duplicate_topic"),
),
(
{
CONF_DEVICE: "mqtt",
CONF_VERSION: "2.3",
CONF_TOPIC_IN_PREFIX: "same1",
CONF_TOPIC_OUT_PREFIX: "different2",
},
{
CONF_DEVICE: "mqtt",
CONF_VERSION: "2.3",
CONF_TOPIC_IN_PREFIX: "same1",
CONF_TOPIC_OUT_PREFIX: "different1",
},
(CONF_TOPIC_IN_PREFIX, "duplicate_topic"),
),
(
{
CONF_DEVICE: "127.0.0.1",
CONF_PERSISTENCE_FILE: "same.json",
CONF_TCP_PORT: 343,
CONF_VERSION: "2.3",
CONF_PERSISTENCE: False,
CONF_RETAIN: False,
},
{
CONF_DEVICE: "192.168.1.2",
CONF_PERSISTENCE_FILE: "same.json",
CONF_TCP_PORT: 343,
CONF_VERSION: "2.3",
CONF_PERSISTENCE: False,
CONF_RETAIN: False,
},
("persistence_file", "duplicate_persistence_file"),
),
(
{
CONF_DEVICE: "127.0.0.1",
CONF_TCP_PORT: 343,
CONF_VERSION: "2.3",
CONF_PERSISTENCE: False,
CONF_RETAIN: False,
},
{
CONF_DEVICE: "192.168.1.2",
CONF_PERSISTENCE_FILE: "same.json",
CONF_TCP_PORT: 343,
CONF_VERSION: "2.3",
CONF_PERSISTENCE: False,
CONF_RETAIN: False,
},
None,
),
(
{
CONF_DEVICE: "127.0.0.1",
CONF_TCP_PORT: 343,
CONF_VERSION: "2.3",
CONF_PERSISTENCE: False,
CONF_RETAIN: False,
},
{
CONF_DEVICE: "192.168.1.2",
CONF_TCP_PORT: 343,
CONF_VERSION: "2.3",
CONF_PERSISTENCE: False,
CONF_RETAIN: False,
},
None,
),
(
{
CONF_DEVICE: "192.168.1.2",
CONF_PERSISTENCE_FILE: "different1.json",
CONF_TCP_PORT: 343,
CONF_VERSION: "2.3",
CONF_PERSISTENCE: False,
CONF_RETAIN: False,
},
{
CONF_DEVICE: "192.168.1.2",
CONF_PERSISTENCE_FILE: "different2.json",
CONF_TCP_PORT: 343,
CONF_VERSION: "2.3",
CONF_PERSISTENCE: False,
CONF_RETAIN: False,
},
("base", "already_configured"),
),
(
{
CONF_DEVICE: "192.168.1.2",
CONF_PERSISTENCE_FILE: "different1.json",
CONF_TCP_PORT: 343,
CONF_VERSION: "2.3",
CONF_PERSISTENCE: False,
CONF_RETAIN: False,
},
{
CONF_DEVICE: "192.168.1.2",
CONF_PERSISTENCE_FILE: "different2.json",
CONF_TCP_PORT: 5003,
CONF_VERSION: "2.3",
CONF_PERSISTENCE: False,
CONF_RETAIN: False,
},
None,
),
(
{
CONF_DEVICE: "192.168.1.2",
CONF_TCP_PORT: 5003,
CONF_VERSION: "2.3",
CONF_PERSISTENCE: False,
CONF_RETAIN: False,
},
{
CONF_DEVICE: "192.168.1.3",
CONF_TCP_PORT: 5003,
CONF_VERSION: "2.3",
CONF_PERSISTENCE: False,
CONF_RETAIN: False,
},
None,
),
(
{
CONF_DEVICE: "COM5",
CONF_TCP_PORT: 5003,
CONF_RETAIN: True,
CONF_VERSION: "2.3",
CONF_PERSISTENCE_FILE: "different1.json",
},
{
CONF_DEVICE: "COM5",
CONF_TCP_PORT: 5003,
CONF_RETAIN: True,
CONF_VERSION: "2.3",
CONF_PERSISTENCE_FILE: "different2.json",
},
("base", "already_configured"),
),
(
{
CONF_DEVICE: "COM6",
CONF_BAUD_RATE: 57600,
CONF_RETAIN: True,
CONF_VERSION: "2.3",
},
{
CONF_DEVICE: "COM5",
CONF_TCP_PORT: 5003,
CONF_RETAIN: True,
CONF_VERSION: "2.3",
},
None,
),
(
{
CONF_DEVICE: "COM5",
CONF_BAUD_RATE: 115200,
CONF_RETAIN: True,
CONF_VERSION: "2.3",
CONF_PERSISTENCE_FILE: "different1.json",
},
{
CONF_DEVICE: "COM5",
CONF_BAUD_RATE: 57600,
CONF_RETAIN: True,
CONF_VERSION: "2.3",
CONF_PERSISTENCE_FILE: "different2.json",
},
("base", "already_configured"),
),
(
{
CONF_DEVICE: "COM5",
CONF_BAUD_RATE: 115200,
CONF_RETAIN: True,
CONF_VERSION: "2.3",
CONF_PERSISTENCE_FILE: "same.json",
},
{
CONF_DEVICE: "COM6",
CONF_BAUD_RATE: 57600,
CONF_RETAIN: True,
CONF_VERSION: "2.3",
CONF_PERSISTENCE_FILE: "same.json",
},
("persistence_file", "duplicate_persistence_file"),
),
(
{
CONF_DEVICE: "mqtt",
CONF_PERSISTENCE_FILE: "bla.json",
CONF_BAUD_RATE: 115200,
CONF_TCP_PORT: 5003,
CONF_VERSION: "1.4",
},
{
CONF_DEVICE: "COM6",
CONF_PERSISTENCE_FILE: "bla2.json",
CONF_BAUD_RATE: 115200,
CONF_TCP_PORT: 5003,
CONF_VERSION: "1.4",
},
None,
),
],
)
async def test_duplicate(
hass: HomeAssistantType,
first_input: dict,
second_input: dict,
expected_result: tuple[str, str] | None,
):
"""Test duplicate detection."""
await setup.async_setup_component(hass, "persistent_notification", {})
with patch("sys.platform", "win32"), patch(
"homeassistant.components.mysensors.config_flow.try_connect", return_value=True
), patch(
"homeassistant.components.mysensors.async_setup_entry",
return_value=True,
):
MockConfigEntry(domain=DOMAIN, data=first_input).add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, data=second_input, context={"source": config_entries.SOURCE_IMPORT}
)
await hass.async_block_till_done()
if expected_result is None:
assert result["type"] == "create_entry"
else:
assert result["type"] == "abort"
assert result["reason"] == expected_result[1]
|
|
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Track resources like memory and disk for a compute host. Provides the
scheduler with useful information about availability through the ComputeNode
model.
"""
import copy
from oslo.config import cfg
from oslo.utils import importutils
from nova.compute import claims
from nova.compute import flavors
from nova.compute import monitors
from nova.compute import resources as ext_resources
from nova.compute import task_states
from nova.compute import vm_states
from nova import conductor
from nova import exception
from nova.i18n import _
from nova import objects
from nova.objects import base as obj_base
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.pci import pci_manager
from nova import rpc
from nova.scheduler import client as scheduler_client
from nova import utils
from nova.virt import hardware
resource_tracker_opts = [
cfg.IntOpt('reserved_host_disk_mb', default=0,
help='Amount of disk in MB to reserve for the host'),
cfg.IntOpt('reserved_host_memory_mb', default=512,
help='Amount of memory in MB to reserve for the host'),
cfg.StrOpt('compute_stats_class',
default='nova.compute.stats.Stats',
help='Class that will manage stats for the local compute host'),
cfg.ListOpt('compute_resources',
default=['vcpu'],
help='The names of the extra resources to track.'),
]
CONF = cfg.CONF
CONF.register_opts(resource_tracker_opts)
LOG = logging.getLogger(__name__)
COMPUTE_RESOURCE_SEMAPHORE = "compute_resources"
CONF.import_opt('my_ip', 'nova.netconf')
class ResourceTracker(object):
"""Compute helper class for keeping track of resource usage as instances
are built and destroyed.
"""
def __init__(self, host, driver, nodename):
self.host = host
self.driver = driver
self.pci_tracker = None
self.nodename = nodename
self.compute_node = None
self.stats = importutils.import_object(CONF.compute_stats_class)
self.tracked_instances = {}
self.tracked_migrations = {}
self.conductor_api = conductor.API()
monitor_handler = monitors.ResourceMonitorHandler()
self.monitors = monitor_handler.choose_monitors(self)
self.ext_resources_handler = \
ext_resources.ResourceHandler(CONF.compute_resources)
self.notifier = rpc.get_notifier()
self.old_resources = {}
self.scheduler_client = scheduler_client.SchedulerClient()
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def instance_claim(self, context, instance_ref, limits=None):
"""Indicate that some resources are needed for an upcoming compute
instance build operation.
This should be called before the compute node is about to perform
an instance build operation that will consume additional resources.
:param context: security context
:param instance_ref: instance to reserve resources for
:param limits: Dict of oversubscription limits for memory, disk,
and CPUs.
:returns: A Claim ticket representing the reserved resources. It can
be used to revert the resource usage if an error occurs
during the instance build.
"""
if self.disabled:
# compute_driver doesn't support resource tracking, just
# set the 'host' and node fields and continue the build:
self._set_instance_host_and_node(context, instance_ref)
return claims.NopClaim()
# sanity checks:
if instance_ref['host']:
LOG.warning(_("Host field should not be set on the instance until "
"resources have been claimed."),
instance=instance_ref)
if instance_ref['node']:
LOG.warning(_("Node field should not be set on the instance "
"until resources have been claimed."),
instance=instance_ref)
# get memory overhead required to build this instance:
overhead = self.driver.estimate_instance_overhead(instance_ref)
LOG.debug("Memory overhead for %(flavor)d MB instance; %(overhead)d "
"MB", {'flavor': instance_ref['memory_mb'],
'overhead': overhead['memory_mb']})
claim = claims.Claim(context, instance_ref, self, self.compute_node,
overhead=overhead, limits=limits)
self._set_instance_host_and_node(context, instance_ref)
# Mark resources in-use and update stats
self._update_usage_from_instance(context, self.compute_node,
instance_ref)
elevated = context.elevated()
# persist changes to the compute node:
self._update(elevated, self.compute_node)
return claim
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def resize_claim(self, context, instance, instance_type,
image_meta=None, limits=None):
"""Indicate that resources are needed for a resize operation to this
compute host.
:param context: security context
:param instance: instance object to reserve resources for
:param instance_type: new instance_type being resized to
:param limits: Dict of oversubscription limits for memory, disk,
and CPUs
:returns: A Claim ticket representing the reserved resources. This
should be turned into finalize a resource claim or free
resources after the compute operation is finished.
"""
image_meta = image_meta or {}
if self.disabled:
# compute_driver doesn't support resource tracking, just
# generate the migration record and continue the resize:
migration = self._create_migration(context, instance,
instance_type)
return claims.NopClaim(migration=migration)
# get memory overhead required to build this instance:
overhead = self.driver.estimate_instance_overhead(instance_type)
LOG.debug("Memory overhead for %(flavor)d MB instance; %(overhead)d "
"MB", {'flavor': instance_type['memory_mb'],
'overhead': overhead['memory_mb']})
instance_ref = obj_base.obj_to_primitive(instance)
claim = claims.ResizeClaim(context, instance_ref, instance_type,
image_meta, self, self.compute_node,
overhead=overhead, limits=limits)
migration = self._create_migration(context, instance_ref,
instance_type)
claim.migration = migration
# Mark the resources in-use for the resize landing on this
# compute host:
self._update_usage_from_migration(context, instance_ref, image_meta,
self.compute_node, migration)
elevated = context.elevated()
self._update(elevated, self.compute_node)
return claim
def _create_migration(self, context, instance, instance_type):
"""Create a migration record for the upcoming resize. This should
be done while the COMPUTE_RESOURCES_SEMAPHORE is held so the resource
claim will not be lost if the audit process starts.
"""
old_instance_type = flavors.extract_flavor(instance)
migration = objects.Migration()
migration.dest_compute = self.host
migration.dest_node = self.nodename
migration.dest_host = self.driver.get_host_ip_addr()
migration.old_instance_type_id = old_instance_type['id']
migration.new_instance_type_id = instance_type['id']
migration.status = 'pre-migrating'
migration.instance_uuid = instance['uuid']
migration.source_compute = instance['host']
migration.source_node = instance['node']
migration.create(context.elevated())
return migration
def _set_instance_host_and_node(self, context, instance_ref):
"""Tag the instance as belonging to this host. This should be done
while the COMPUTE_RESOURCES_SEMAPHORE is held so the resource claim
will not be lost if the audit process starts.
"""
values = {'host': self.host, 'node': self.nodename,
'launched_on': self.host}
self.conductor_api.instance_update(context, instance_ref['uuid'],
**values)
instance_ref['host'] = self.host
instance_ref['launched_on'] = self.host
instance_ref['node'] = self.nodename
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def abort_instance_claim(self, context, instance):
"""Remove usage from the given instance."""
# flag the instance as deleted to revert the resource usage
# and associated stats:
instance['vm_state'] = vm_states.DELETED
self._update_usage_from_instance(context, self.compute_node, instance)
self._update(context.elevated(), self.compute_node)
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def drop_resize_claim(self, context, instance, instance_type=None,
image_meta=None, prefix='new_'):
"""Remove usage for an incoming/outgoing migration."""
if instance['uuid'] in self.tracked_migrations:
migration, itype = self.tracked_migrations.pop(instance['uuid'])
if not instance_type:
ctxt = context.elevated()
instance_type = self._get_instance_type(ctxt, instance, prefix)
if image_meta is None:
image_meta = utils.get_image_from_system_metadata(
instance['system_metadata'])
if instance_type['id'] == itype['id']:
numa_topology = (
hardware.VirtNUMAInstanceTopology.get_constraints(
itype, image_meta))
usage = self._get_usage_dict(
itype, numa_topology=numa_topology)
if self.pci_tracker:
self.pci_tracker.update_pci_for_migration(context,
instance,
sign=-1)
self._update_usage(context, self.compute_node, usage, sign=-1)
ctxt = context.elevated()
self._update(ctxt, self.compute_node)
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def update_usage(self, context, instance):
"""Update the resource usage and stats after a change in an
instance
"""
if self.disabled:
return
uuid = instance['uuid']
# don't update usage for this instance unless it submitted a resource
# claim first:
if uuid in self.tracked_instances:
self._update_usage_from_instance(context, self.compute_node,
instance)
self._update(context.elevated(), self.compute_node)
@property
def disabled(self):
return self.compute_node is None
def _get_host_metrics(self, context, nodename):
"""Get the metrics from monitors and
notify information to message bus.
"""
metrics = []
metrics_info = {}
for monitor in self.monitors:
try:
metrics += monitor.get_metrics(nodename=nodename)
except Exception:
LOG.warn(_("Cannot get the metrics from %s."), monitors)
if metrics:
metrics_info['nodename'] = nodename
metrics_info['metrics'] = metrics
metrics_info['host'] = self.host
metrics_info['host_ip'] = CONF.my_ip
notifier = rpc.get_notifier(service='compute', host=nodename)
notifier.info(context, 'compute.metrics.update', metrics_info)
return metrics
def update_available_resource(self, context):
"""Override in-memory calculations of compute node resource usage based
on data audited from the hypervisor layer.
Add in resource claims in progress to account for operations that have
declared a need for resources, but not necessarily retrieved them from
the hypervisor layer yet.
"""
LOG.audit(_("Auditing locally available compute resources"))
resources = self.driver.get_available_resource(self.nodename)
if not resources:
# The virt driver does not support this function
LOG.audit(_("Virt driver does not support "
"'get_available_resource' Compute tracking is disabled."))
self.compute_node = None
return
resources['host_ip'] = CONF.my_ip
# TODO(berrange): remove this once all virt drivers are updated
# to report topology
if "numa_topology" not in resources:
resources["numa_topology"] = None
self._verify_resources(resources)
self._report_hypervisor_resource_view(resources)
return self._update_available_resource(context, resources)
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def _update_available_resource(self, context, resources):
if 'pci_passthrough_devices' in resources:
if not self.pci_tracker:
self.pci_tracker = pci_manager.PciDevTracker()
self.pci_tracker.set_hvdevs(jsonutils.loads(resources.pop(
'pci_passthrough_devices')))
# Grab all instances assigned to this node:
instances = objects.InstanceList.get_by_host_and_node(
context, self.host, self.nodename,
expected_attrs=['system_metadata',
'numa_topology'])
# Now calculate usage based on instance utilization:
self._update_usage_from_instances(context, resources, instances)
# Grab all in-progress migrations:
capi = self.conductor_api
migrations = capi.migration_get_in_progress_by_host_and_node(context,
self.host, self.nodename)
self._update_usage_from_migrations(context, resources, migrations)
# Detect and account for orphaned instances that may exist on the
# hypervisor, but are not in the DB:
orphans = self._find_orphaned_instances()
self._update_usage_from_orphans(context, resources, orphans)
# NOTE(yjiang5): Because pci device tracker status is not cleared in
# this periodic task, and also because the resource tracker is not
# notified when instances are deleted, we need remove all usages
# from deleted instances.
if self.pci_tracker:
self.pci_tracker.clean_usage(instances, migrations, orphans)
resources['pci_stats'] = jsonutils.dumps(self.pci_tracker.stats)
else:
resources['pci_stats'] = jsonutils.dumps([])
self._report_final_resource_view(resources)
metrics = self._get_host_metrics(context, self.nodename)
resources['metrics'] = jsonutils.dumps(metrics)
self._sync_compute_node(context, resources)
def _sync_compute_node(self, context, resources):
"""Create or update the compute node DB record."""
if not self.compute_node:
# we need a copy of the ComputeNode record:
service = self._get_service(context)
if not service:
# no service record, disable resource
return
compute_node_refs = service['compute_node']
if compute_node_refs:
for cn in compute_node_refs:
if cn.get('hypervisor_hostname') == self.nodename:
self.compute_node = cn
if self.pci_tracker:
self.pci_tracker.set_compute_node_id(cn['id'])
break
if not self.compute_node:
# Need to create the ComputeNode record:
resources['service_id'] = service['id']
self._create(context, resources)
if self.pci_tracker:
self.pci_tracker.set_compute_node_id(self.compute_node['id'])
LOG.info(_('Compute_service record created for %(host)s:%(node)s')
% {'host': self.host, 'node': self.nodename})
else:
# just update the record:
self._update(context, resources)
LOG.info(_('Compute_service record updated for %(host)s:%(node)s')
% {'host': self.host, 'node': self.nodename})
def _write_ext_resources(self, resources):
resources['stats'] = {}
resources['stats'].update(self.stats)
self.ext_resources_handler.write_resources(resources)
def _create(self, context, values):
"""Create the compute node in the DB."""
# initialize load stats from existing instances:
self._write_ext_resources(values)
# NOTE(pmurray): the stats field is stored as a json string. The
# json conversion will be done automatically by the ComputeNode object
# so this can be removed when using ComputeNode.
values['stats'] = jsonutils.dumps(values['stats'])
self.compute_node = self.conductor_api.compute_node_create(context,
values)
# NOTE(sbauza): We don't want to miss the first creation event
self._update_resource_stats(context, values)
def _get_service(self, context):
try:
return self.conductor_api.service_get_by_compute_host(context,
self.host)
except exception.NotFound:
LOG.warn(_("No service record for host %s"), self.host)
def _report_hypervisor_resource_view(self, resources):
"""Log the hypervisor's view of free resources.
This is just a snapshot of resource usage recorded by the
virt driver.
The following resources are logged:
- free memory
- free disk
- free CPUs
- assignable PCI devices
"""
free_ram_mb = resources['memory_mb'] - resources['memory_mb_used']
free_disk_gb = resources['local_gb'] - resources['local_gb_used']
LOG.debug("Hypervisor: free ram (MB): %s" % free_ram_mb)
LOG.debug("Hypervisor: free disk (GB): %s" % free_disk_gb)
vcpus = resources['vcpus']
if vcpus:
free_vcpus = vcpus - resources['vcpus_used']
LOG.debug("Hypervisor: free VCPUs: %s" % free_vcpus)
else:
LOG.debug("Hypervisor: VCPU information unavailable")
if 'pci_passthrough_devices' in resources and \
resources['pci_passthrough_devices']:
LOG.debug("Hypervisor: assignable PCI devices: %s" %
resources['pci_passthrough_devices'])
else:
LOG.debug("Hypervisor: no assignable PCI devices")
def _report_final_resource_view(self, resources):
"""Report final calculate of physical memory, used virtual memory,
disk, usable vCPUs, used virtual CPUs and PCI devices,
including instance calculations and in-progress resource claims. These
values will be exposed via the compute node table to the scheduler.
"""
LOG.audit(_("Total physical ram (MB): %(pram)s, "
"total allocated virtual ram (MB): %(vram)s"),
{'pram': resources['memory_mb'],
'vram': resources['memory_mb_used']})
LOG.audit(_("Free disk (GB): %s") % resources['free_disk_gb'])
vcpus = resources['vcpus']
if vcpus:
LOG.audit(_("Total usable vcpus: %(tcpu)s, "
"total allocated vcpus: %(ucpu)s"),
{'tcpu': vcpus, 'ucpu': resources['vcpus_used']})
else:
LOG.audit(_("Free VCPU information unavailable"))
if 'pci_stats' in resources:
LOG.audit(_("PCI stats: %s"), resources['pci_stats'])
def _resource_change(self, resources):
"""Check to see if any resouces have changed."""
if cmp(resources, self.old_resources) != 0:
self.old_resources = copy.deepcopy(resources)
return True
return False
def _update(self, context, values):
"""Update partial stats locally and populate them to Scheduler."""
self._write_ext_resources(values)
# NOTE(pmurray): the stats field is stored as a json string. The
# json conversion will be done automatically by the ComputeNode object
# so this can be removed when using ComputeNode.
values['stats'] = jsonutils.dumps(values['stats'])
if not self._resource_change(values):
return
if "service" in self.compute_node:
del self.compute_node['service']
# NOTE(sbauza): Now the DB update is asynchronous, we need to locally
# update the values
self.compute_node.update(values)
# Persist the stats to the Scheduler
self._update_resource_stats(context, values)
if self.pci_tracker:
self.pci_tracker.save(context)
def _update_resource_stats(self, context, values):
stats = values.copy()
stats['id'] = self.compute_node['id']
self.scheduler_client.update_resource_stats(
context, (self.host, self.nodename), stats)
def _update_usage(self, context, resources, usage, sign=1):
mem_usage = usage['memory_mb']
overhead = self.driver.estimate_instance_overhead(usage)
mem_usage += overhead['memory_mb']
resources['memory_mb_used'] += sign * mem_usage
resources['local_gb_used'] += sign * usage.get('root_gb', 0)
resources['local_gb_used'] += sign * usage.get('ephemeral_gb', 0)
# free ram and disk may be negative, depending on policy:
resources['free_ram_mb'] = (resources['memory_mb'] -
resources['memory_mb_used'])
resources['free_disk_gb'] = (resources['local_gb'] -
resources['local_gb_used'])
resources['running_vms'] = self.stats.num_instances
self.ext_resources_handler.update_from_instance(usage, sign)
# Calculate the numa usage
free = sign == -1
updated_numa_topology = hardware.get_host_numa_usage_from_instance(
resources, usage, free)
resources['numa_topology'] = updated_numa_topology
def _update_usage_from_migration(self, context, instance, image_meta,
resources, migration):
"""Update usage for a single migration. The record may
represent an incoming or outbound migration.
"""
uuid = migration['instance_uuid']
LOG.audit(_("Updating from migration %s") % uuid)
incoming = (migration['dest_compute'] == self.host and
migration['dest_node'] == self.nodename)
outbound = (migration['source_compute'] == self.host and
migration['source_node'] == self.nodename)
same_node = (incoming and outbound)
record = self.tracked_instances.get(uuid, None)
itype = None
if same_node:
# same node resize. record usage for whichever instance type the
# instance is *not* in:
if (instance['instance_type_id'] ==
migration['old_instance_type_id']):
itype = self._get_instance_type(context, instance, 'new_',
migration['new_instance_type_id'])
else:
# instance record already has new flavor, hold space for a
# possible revert to the old instance type:
itype = self._get_instance_type(context, instance, 'old_',
migration['old_instance_type_id'])
elif incoming and not record:
# instance has not yet migrated here:
itype = self._get_instance_type(context, instance, 'new_',
migration['new_instance_type_id'])
elif outbound and not record:
# instance migrated, but record usage for a possible revert:
itype = self._get_instance_type(context, instance, 'old_',
migration['old_instance_type_id'])
if image_meta is None:
image_meta = utils.get_image_from_system_metadata(
instance['system_metadata'])
if itype:
numa_topology = (
hardware.VirtNUMAInstanceTopology.get_constraints(
itype, image_meta))
usage = self._get_usage_dict(
itype, numa_topology=numa_topology)
if self.pci_tracker:
self.pci_tracker.update_pci_for_migration(context, instance)
self._update_usage(context, resources, usage)
if self.pci_tracker:
resources['pci_stats'] = jsonutils.dumps(
self.pci_tracker.stats)
else:
resources['pci_stats'] = jsonutils.dumps([])
self.tracked_migrations[uuid] = (migration, itype)
def _update_usage_from_migrations(self, context, resources, migrations):
self.tracked_migrations.clear()
filtered = {}
# do some defensive filtering against bad migrations records in the
# database:
for migration in migrations:
instance = migration['instance']
if not instance:
# migration referencing deleted instance
continue
uuid = instance['uuid']
# skip migration if instance isn't in a resize state:
if not self._instance_in_resize_state(instance):
LOG.warn(_("Instance not resizing, skipping migration."),
instance_uuid=uuid)
continue
# filter to most recently updated migration for each instance:
m = filtered.get(uuid, None)
if not m or migration['updated_at'] >= m['updated_at']:
filtered[uuid] = migration
for migration in filtered.values():
instance = migration['instance']
try:
self._update_usage_from_migration(context, instance, None,
resources, migration)
except exception.FlavorNotFound:
LOG.warn(_("Flavor could not be found, skipping "
"migration."), instance_uuid=uuid)
continue
def _update_usage_from_instance(self, context, resources, instance):
"""Update usage for a single instance."""
uuid = instance['uuid']
is_new_instance = uuid not in self.tracked_instances
is_deleted_instance = instance['vm_state'] == vm_states.DELETED
if is_new_instance:
self.tracked_instances[uuid] = obj_base.obj_to_primitive(instance)
sign = 1
if is_deleted_instance:
self.tracked_instances.pop(uuid)
sign = -1
self.stats.update_stats_for_instance(instance)
if self.pci_tracker:
self.pci_tracker.update_pci_for_instance(context, instance)
# if it's a new or deleted instance:
if is_new_instance or is_deleted_instance:
# new instance, update compute node resource usage:
self._update_usage(context, resources, instance, sign=sign)
resources['current_workload'] = self.stats.calculate_workload()
if self.pci_tracker:
resources['pci_stats'] = jsonutils.dumps(self.pci_tracker.stats)
else:
resources['pci_stats'] = jsonutils.dumps([])
def _update_usage_from_instances(self, context, resources, instances):
"""Calculate resource usage based on instance utilization. This is
different than the hypervisor's view as it will account for all
instances assigned to the local compute host, even if they are not
currently powered on.
"""
self.tracked_instances.clear()
# purge old stats and init with anything passed in by the driver
self.stats.clear()
self.stats.digest_stats(resources.get('stats'))
# set some initial values, reserve room for host/hypervisor:
resources['local_gb_used'] = CONF.reserved_host_disk_mb / 1024
resources['memory_mb_used'] = CONF.reserved_host_memory_mb
resources['free_ram_mb'] = (resources['memory_mb'] -
resources['memory_mb_used'])
resources['free_disk_gb'] = (resources['local_gb'] -
resources['local_gb_used'])
resources['current_workload'] = 0
resources['running_vms'] = 0
# Reset values for extended resources
self.ext_resources_handler.reset_resources(resources, self.driver)
for instance in instances:
if instance['vm_state'] != vm_states.DELETED:
self._update_usage_from_instance(context, resources, instance)
def _find_orphaned_instances(self):
"""Given the set of instances and migrations already account for
by resource tracker, sanity check the hypervisor to determine
if there are any "orphaned" instances left hanging around.
Orphans could be consuming memory and should be accounted for in
usage calculations to guard against potential out of memory
errors.
"""
uuids1 = frozenset(self.tracked_instances.keys())
uuids2 = frozenset(self.tracked_migrations.keys())
uuids = uuids1 | uuids2
usage = self.driver.get_per_instance_usage()
vuuids = frozenset(usage.keys())
orphan_uuids = vuuids - uuids
orphans = [usage[uuid] for uuid in orphan_uuids]
return orphans
def _update_usage_from_orphans(self, context, resources, orphans):
"""Include orphaned instances in usage."""
for orphan in orphans:
memory_mb = orphan['memory_mb']
LOG.warn(_("Detected running orphan instance: %(uuid)s (consuming "
"%(memory_mb)s MB memory)"),
{'uuid': orphan['uuid'], 'memory_mb': memory_mb})
# just record memory usage for the orphan
usage = {'memory_mb': memory_mb}
self._update_usage(context, resources, usage)
def _verify_resources(self, resources):
resource_keys = ["vcpus", "memory_mb", "local_gb", "cpu_info",
"vcpus_used", "memory_mb_used", "local_gb_used",
"numa_topology"]
missing_keys = [k for k in resource_keys if k not in resources]
if missing_keys:
reason = _("Missing keys: %s") % missing_keys
raise exception.InvalidInput(reason=reason)
def _instance_in_resize_state(self, instance):
vm = instance['vm_state']
task = instance['task_state']
if vm == vm_states.RESIZED:
return True
if (vm in [vm_states.ACTIVE, vm_states.STOPPED]
and task in [task_states.RESIZE_PREP,
task_states.RESIZE_MIGRATING, task_states.RESIZE_MIGRATED,
task_states.RESIZE_FINISH]):
return True
return False
def _get_instance_type(self, context, instance, prefix,
instance_type_id=None):
"""Get the instance type from sys metadata if it's stashed. If not,
fall back to fetching it via the object API.
See bug 1164110
"""
try:
return flavors.extract_flavor(instance, prefix)
except KeyError:
if not instance_type_id:
instance_type_id = instance['instance_type_id']
return objects.Flavor.get_by_id(context, instance_type_id)
def _get_usage_dict(self, object_or_dict, **updates):
"""Make a usage dict _update methods expect.
Accepts a dict or an Instance or Flavor object, and a set of updates.
Converts the object to a dict and applies the updates.
:param object_or_dict: instance or flavor as an object or just a dict
:param updates: key-value pairs to update the passed object.
Currently only considers 'numa_topology', all other
keys are ignored.
:returns: a dict with all the information from object_or_dict updated
with updates
"""
usage = {}
if isinstance(object_or_dict, (objects.Flavor, objects.Instance)):
usage = obj_base.obj_to_primitive(object_or_dict)
else:
usage.update(object_or_dict)
for key in ('numa_topology',):
if key in updates:
usage[key] = updates[key]
return usage
|
|
import numpy as np
import matplotlib.pyplot as plt
import h5py
import scipy.io
import sklearn
import sklearn.datasets
def sigmoid(x):
"""
Compute the sigmoid of x
Arguments:
x -- A scalar or numpy array of any size.
Return:
s -- sigmoid(x)
"""
s = 1/(1+np.exp(-x))
return s
def relu(x):
"""
Compute the relu of x
Arguments:
x -- A scalar or numpy array of any size.
Return:
s -- relu(x)
"""
s = np.maximum(0,x)
return s
def load_params_and_grads(seed=1):
np.random.seed(seed)
W1 = np.random.randn(2,3)
b1 = np.random.randn(2,1)
W2 = np.random.randn(3,3)
b2 = np.random.randn(3,1)
dW1 = np.random.randn(2,3)
db1 = np.random.randn(2,1)
dW2 = np.random.randn(3,3)
db2 = np.random.randn(3,1)
return W1, b1, W2, b2, dW1, db1, dW2, db2
def initialize_parameters(layer_dims):
"""
Arguments:
layer_dims -- python array (list) containing the dimensions of each layer in our network
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
W1 -- weight matrix of shape (layer_dims[l], layer_dims[l-1])
b1 -- bias vector of shape (layer_dims[l], 1)
Wl -- weight matrix of shape (layer_dims[l-1], layer_dims[l])
bl -- bias vector of shape (1, layer_dims[l])
Tips:
- For example: the layer_dims for the "Planar Data classification model" would have been [2,2,1].
This means W1's shape was (2,2), b1 was (1,2), W2 was (2,1) and b2 was (1,1). Now you have to generalize it!
- In the for loop, use parameters['W' + str(l)] to access Wl, where l is the iterative integer.
"""
np.random.seed(3)
parameters = {}
L = len(layer_dims) # number of layers in the network
for l in range(1, L):
parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l-1])* np.sqrt(2 / layer_dims[l-1])
parameters['b' + str(l)] = np.zeros((layer_dims[l], 1))
assert(parameters['W' + str(l)].shape == layer_dims[l], layer_dims[l-1])
assert(parameters['W' + str(l)].shape == layer_dims[l], 1)
return parameters
def compute_cost(a3, Y):
"""
Implement the cost function
Arguments:
a3 -- post-activation, output of forward propagation
Y -- "true" labels vector, same shape as a3
Returns:
cost - value of the cost function
"""
m = Y.shape[1]
logprobs = np.multiply(-np.log(a3),Y) + np.multiply(-np.log(1 - a3), 1 - Y)
cost = 1./m * np.sum(logprobs)
return cost
def forward_propagation(X, parameters):
"""
Implements the forward propagation (and computes the loss) presented in Figure 2.
Arguments:
X -- input dataset, of shape (input size, number of examples)
parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3":
W1 -- weight matrix of shape ()
b1 -- bias vector of shape ()
W2 -- weight matrix of shape ()
b2 -- bias vector of shape ()
W3 -- weight matrix of shape ()
b3 -- bias vector of shape ()
Returns:
loss -- the loss function (vanilla logistic loss)
"""
# retrieve parameters
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
W3 = parameters["W3"]
b3 = parameters["b3"]
# LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID
z1 = np.dot(W1, X) + b1
a1 = relu(z1)
z2 = np.dot(W2, a1) + b2
a2 = relu(z2)
z3 = np.dot(W3, a2) + b3
a3 = sigmoid(z3)
cache = (z1, a1, W1, b1, z2, a2, W2, b2, z3, a3, W3, b3)
return a3, cache
def backward_propagation(X, Y, cache):
"""
Implement the backward propagation presented in figure 2.
Arguments:
X -- input dataset, of shape (input size, number of examples)
Y -- true "label" vector (containing 0 if cat, 1 if non-cat)
cache -- cache output from forward_propagation()
Returns:
gradients -- A dictionary with the gradients with respect to each parameter, activation and pre-activation variables
"""
m = X.shape[1]
(z1, a1, W1, b1, z2, a2, W2, b2, z3, a3, W3, b3) = cache
dz3 = 1./m * (a3 - Y)
dW3 = np.dot(dz3, a2.T)
db3 = np.sum(dz3, axis=1, keepdims = True)
da2 = np.dot(W3.T, dz3)
dz2 = np.multiply(da2, np.int64(a2 > 0))
dW2 = np.dot(dz2, a1.T)
db2 = np.sum(dz2, axis=1, keepdims = True)
da1 = np.dot(W2.T, dz2)
dz1 = np.multiply(da1, np.int64(a1 > 0))
dW1 = np.dot(dz1, X.T)
db1 = np.sum(dz1, axis=1, keepdims = True)
gradients = {"dz3": dz3, "dW3": dW3, "db3": db3,
"da2": da2, "dz2": dz2, "dW2": dW2, "db2": db2,
"da1": da1, "dz1": dz1, "dW1": dW1, "db1": db1}
return gradients
def predict(X, y, parameters):
"""
This function is used to predict the results of a n-layer neural network.
Arguments:
X -- data set of examples you would like to label
parameters -- parameters of the trained model
Returns:
p -- predictions for the given dataset X
"""
m = X.shape[1]
p = np.zeros((1,m), dtype = np.int)
# Forward propagation
a3, caches = forward_propagation(X, parameters)
# convert probas to 0/1 predictions
for i in range(0, a3.shape[1]):
if a3[0,i] > 0.5:
p[0,i] = 1
else:
p[0,i] = 0
# print results
#print ("predictions: " + str(p[0,:]))
#print ("true labels: " + str(y[0,:]))
print("Accuracy: " + str(np.mean((p[0,:] == y[0,:]))))
return p
def load_2D_dataset():
data = scipy.io.loadmat('datasets/data.mat')
train_X = data['X'].T
train_Y = data['y'].T
test_X = data['Xval'].T
test_Y = data['yval'].T
plt.scatter(train_X[0, :], train_X[1, :], c=train_Y.ravel(), s=40, cmap=plt.cm.Spectral);
return train_X, train_Y, test_X, test_Y
def plot_decision_boundary(model, X, y):
# Set min and max values and give it some padding
x_min, x_max = X[0, :].min() - 1, X[0, :].max() + 1
y_min, y_max = X[1, :].min() - 1, X[1, :].max() + 1
h = 0.01
# Generate a grid of points with distance h between them
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Predict the function value for the whole grid
Z = model(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# Plot the contour and training examples
plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)
plt.ylabel('x2')
plt.xlabel('x1')
plt.scatter(X[0, :], X[1, :], c=y.ravel(), cmap=plt.cm.Spectral)
plt.show()
def predict_dec(parameters, X):
"""
Used for plotting decision boundary.
Arguments:
parameters -- python dictionary containing your parameters
X -- input data of size (m, K)
Returns
predictions -- vector of predictions of our model (red: 0 / blue: 1)
"""
# Predict using forward propagation and a classification threshold of 0.5
a3, cache = forward_propagation(X, parameters)
predictions = (a3 > 0.5)
return predictions
def load_dataset():
np.random.seed(3)
train_X, train_Y = sklearn.datasets.make_moons(n_samples=300, noise=.2) #300 #0.2
# Visualize the data
plt.scatter(train_X[:, 0], train_X[:, 1], c=train_Y.ravel(), s=40, cmap=plt.cm.Spectral);
train_X = train_X.T
train_Y = train_Y.reshape((1, train_Y.shape[0]))
return train_X, train_Y
|
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test replace by fee code
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
MAX_REPLACEMENT_LIMIT = 100
def txToHex(tx):
return bytes_to_hex_str(tx.serialize())
def make_utxo(node, amount, confirmed=True, scriptPubKey=CScript([1])):
"""Create a txout with a given amount and scriptPubKey
Mines coins as needed.
confirmed - txouts created will be confirmed in the blockchain;
unconfirmed otherwise.
"""
fee = 1*COIN
while node.getbalance() < satoshi_round((amount + fee)/COIN):
node.generate(100)
#print (node.getbalance(), amount, fee)
new_addr = node.getnewaddress()
#print new_addr
txid = node.sendtoaddress(new_addr, satoshi_round((amount+fee)/COIN))
tx1 = node.getrawtransaction(txid, 1)
txid = int(txid, 16)
i = None
for i, txout in enumerate(tx1['vout']):
#print i, txout['scriptPubKey']['addresses']
if txout['scriptPubKey']['addresses'] == [new_addr]:
#print i
break
assert i is not None
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(txid, i))]
tx2.vout = [CTxOut(amount, scriptPubKey)]
tx2.rehash()
signed_tx = node.signrawtransaction(txToHex(tx2))
txid = node.sendrawtransaction(signed_tx['hex'], True)
# If requested, ensure txouts are confirmed.
if confirmed:
mempool_size = len(node.getrawmempool())
while mempool_size > 0:
node.generate(1)
new_size = len(node.getrawmempool())
# Error out if we have something stuck in the mempool, as this
# would likely be a bug.
assert(new_size < mempool_size)
mempool_size = new_size
return COutPoint(int(txid, 16), 0)
class ReplaceByFeeTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 1
self.setup_clean_chain = False
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-maxorphantx=1000", "-debug",
"-whitelist=127.0.0.1",
"-limitancestorcount=50",
"-limitancestorsize=101",
"-limitdescendantcount=200",
"-limitdescendantsize=101"
]))
self.is_network_split = False
def run_test(self):
make_utxo(self.nodes[0], 1*COIN)
print("Running test simple doublespend...")
self.test_simple_doublespend()
print("Running test doublespend chain...")
self.test_doublespend_chain()
print("Running test doublespend tree...")
self.test_doublespend_tree()
print("Running test replacement feeperkb...")
self.test_replacement_feeperkb()
print("Running test spends of conflicting outputs...")
self.test_spends_of_conflicting_outputs()
print("Running test new unconfirmed inputs...")
self.test_new_unconfirmed_inputs()
print("Running test too many replacements...")
self.test_too_many_replacements()
print("Running test opt-in...")
self.test_opt_in()
print("Running test prioritised transactions...")
self.test_prioritised_transactions()
print("Passed\n")
def test_simple_doublespend(self):
"""Simple doublespend"""
tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
# Should fail because we haven't changed the fee
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(1*COIN, CScript([b'b']))]
tx1b_hex = txToHex(tx1b)
try:
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26) # insufficient fee
else:
assert(False)
# Extra 0.1 BTC fee
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(int(0.9*COIN), CScript([b'b']))]
tx1b_hex = txToHex(tx1b)
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
mempool = self.nodes[0].getrawmempool()
assert (tx1a_txid not in mempool)
assert (tx1b_txid in mempool)
assert_equal(tx1b_hex, self.nodes[0].getrawtransaction(tx1b_txid))
def test_doublespend_chain(self):
"""Doublespend of a long chain"""
initial_nValue = 50*COIN
tx0_outpoint = make_utxo(self.nodes[0], initial_nValue)
prevout = tx0_outpoint
remaining_value = initial_nValue
chain_txids = []
while remaining_value > 10*COIN:
remaining_value -= 1*COIN
tx = CTransaction()
tx.vin = [CTxIn(prevout, nSequence=0)]
tx.vout = [CTxOut(remaining_value, CScript([1]))]
tx_hex = txToHex(tx)
txid = self.nodes[0].sendrawtransaction(tx_hex, True)
chain_txids.append(txid)
prevout = COutPoint(int(txid, 16), 0)
# Whether the double-spend is allowed is evaluated by including all
# child fees - 40 BTC - so this attempt is rejected.
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - 30*COIN, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
try:
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26) # insufficient fee
else:
assert(False) # transaction mistakenly accepted!
# Accepted with sufficient fee
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(1*COIN, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
mempool = self.nodes[0].getrawmempool()
for doublespent_txid in chain_txids:
assert(doublespent_txid not in mempool)
def test_doublespend_tree(self):
"""Doublespend of a big tree of transactions"""
initial_nValue = 50*COIN
tx0_outpoint = make_utxo(self.nodes[0], initial_nValue)
def branch(prevout, initial_value, max_txs, tree_width=5, fee=0.0001*COIN, _total_txs=None):
if _total_txs is None:
_total_txs = [0]
if _total_txs[0] >= max_txs:
return
txout_value = (initial_value - fee) // tree_width
if txout_value < fee:
return
vout = [CTxOut(txout_value, CScript([i+1]))
for i in range(tree_width)]
tx = CTransaction()
tx.vin = [CTxIn(prevout, nSequence=0)]
tx.vout = vout
tx_hex = txToHex(tx)
assert(len(tx.serialize()) < 100000)
txid = self.nodes[0].sendrawtransaction(tx_hex, True)
yield tx
_total_txs[0] += 1
txid = int(txid, 16)
for i, txout in enumerate(tx.vout):
for x in branch(COutPoint(txid, i), txout_value,
max_txs,
tree_width=tree_width, fee=fee,
_total_txs=_total_txs):
yield x
fee = int(0.0001*COIN)
n = MAX_REPLACEMENT_LIMIT
tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee))
assert_equal(len(tree_txs), n)
# Attempt double-spend, will fail because too little fee paid
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - fee*n, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
try:
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26) # insufficient fee
else:
assert(False)
# 1 BTC fee is enough
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - fee*n - 1*COIN, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
mempool = self.nodes[0].getrawmempool()
for tx in tree_txs:
tx.rehash()
assert (tx.hash not in mempool)
# Try again, but with more total transactions than the "max txs
# double-spent at once" anti-DoS limit.
for n in (MAX_REPLACEMENT_LIMIT+1, MAX_REPLACEMENT_LIMIT*2):
fee = int(0.0001*COIN)
tx0_outpoint = make_utxo(self.nodes[0], initial_nValue)
tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee))
assert_equal(len(tree_txs), n)
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - 2*fee*n, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
try:
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
assert_equal("too many potential replacements" in exp.error['message'], True)
else:
assert(False)
for tx in tree_txs:
tx.rehash()
self.nodes[0].getrawtransaction(tx.hash)
def test_replacement_feeperkb(self):
"""Replacement requires fee-per-KB to be higher"""
tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
# Higher fee, but the fee per KB is much lower, so the replacement is
# rejected.
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(int(0.001*COIN), CScript([b'a'*999000]))]
tx1b_hex = txToHex(tx1b)
try:
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26) # insufficient fee
else:
assert(False)
def test_spends_of_conflicting_outputs(self):
"""Replacements that spend conflicting tx outputs are rejected"""
utxo1 = make_utxo(self.nodes[0], int(1.2*COIN))
utxo2 = make_utxo(self.nodes[0], 3*COIN)
tx1a = CTransaction()
tx1a.vin = [CTxIn(utxo1, nSequence=0)]
tx1a.vout = [CTxOut(int(1.1*COIN), CScript([b'a']))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
tx1a_txid = int(tx1a_txid, 16)
# Direct spend an output of the transaction we're replacing.
tx2 = CTransaction()
tx2.vin = [CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0)]
tx2.vin.append(CTxIn(COutPoint(tx1a_txid, 0), nSequence=0))
tx2.vout = tx1a.vout
tx2_hex = txToHex(tx2)
try:
tx2_txid = self.nodes[0].sendrawtransaction(tx2_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
assert(False)
# Spend tx1a's output to test the indirect case.
tx1b = CTransaction()
tx1b.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)]
tx1b.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1b_hex = txToHex(tx1b)
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
tx1b_txid = int(tx1b_txid, 16)
tx2 = CTransaction()
tx2.vin = [CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0),
CTxIn(COutPoint(tx1b_txid, 0))]
tx2.vout = tx1a.vout
tx2_hex = txToHex(tx2)
try:
tx2_txid = self.nodes[0].sendrawtransaction(tx2_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
assert(False)
def test_new_unconfirmed_inputs(self):
"""Replacements that add new unconfirmed inputs are rejected"""
confirmed_utxo = make_utxo(self.nodes[0], int(1.1*COIN))
unconfirmed_utxo = make_utxo(self.nodes[0], int(0.1*COIN), False)
tx1 = CTransaction()
tx1.vin = [CTxIn(confirmed_utxo)]
tx1.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1_hex = txToHex(tx1)
tx1_txid = self.nodes[0].sendrawtransaction(tx1_hex, True)
tx2 = CTransaction()
tx2.vin = [CTxIn(confirmed_utxo), CTxIn(unconfirmed_utxo)]
tx2.vout = tx1.vout
tx2_hex = txToHex(tx2)
try:
tx2_txid = self.nodes[0].sendrawtransaction(tx2_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
assert(False)
def test_too_many_replacements(self):
"""Replacements that evict too many transactions are rejected"""
# Try directly replacing more than MAX_REPLACEMENT_LIMIT
# transactions
# Start by creating a single transaction with many outputs
initial_nValue = 10*COIN
utxo = make_utxo(self.nodes[0], initial_nValue)
fee = int(0.0001*COIN)
split_value = int((initial_nValue-fee)/(MAX_REPLACEMENT_LIMIT+1))
actual_fee = initial_nValue - split_value*(MAX_REPLACEMENT_LIMIT+1)
outputs = []
for i in range(MAX_REPLACEMENT_LIMIT+1):
outputs.append(CTxOut(split_value, CScript([1])))
splitting_tx = CTransaction()
splitting_tx.vin = [CTxIn(utxo, nSequence=0)]
splitting_tx.vout = outputs
splitting_tx_hex = txToHex(splitting_tx)
txid = self.nodes[0].sendrawtransaction(splitting_tx_hex, True)
txid = int(txid, 16)
# Now spend each of those outputs individually
for i in range(MAX_REPLACEMENT_LIMIT+1):
tx_i = CTransaction()
tx_i.vin = [CTxIn(COutPoint(txid, i), nSequence=0)]
tx_i.vout = [CTxOut(split_value-fee, CScript([b'a']))]
tx_i_hex = txToHex(tx_i)
self.nodes[0].sendrawtransaction(tx_i_hex, True)
# Now create doublespend of the whole lot; should fail.
# Need a big enough fee to cover all spending transactions and have
# a higher fee rate
double_spend_value = (split_value-100*fee)*(MAX_REPLACEMENT_LIMIT+1)
inputs = []
for i in range(MAX_REPLACEMENT_LIMIT+1):
inputs.append(CTxIn(COutPoint(txid, i), nSequence=0))
double_tx = CTransaction()
double_tx.vin = inputs
double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))]
double_tx_hex = txToHex(double_tx)
try:
self.nodes[0].sendrawtransaction(double_tx_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
assert_equal("too many potential replacements" in exp.error['message'], True)
else:
assert(False)
# If we remove an input, it should pass
double_tx = CTransaction()
double_tx.vin = inputs[0:-1]
double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))]
double_tx_hex = txToHex(double_tx)
self.nodes[0].sendrawtransaction(double_tx_hex, True)
def test_opt_in(self):
""" Replacing should only work if orig tx opted in """
tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
# Create a non-opting in transaction
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0xffffffff)]
tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
# Shouldn't be able to double-spend
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(int(0.9*COIN), CScript([b'b']))]
tx1b_hex = txToHex(tx1b)
try:
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
print(tx1b_txid)
assert(False)
tx1_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
# Create a different non-opting in transaction
tx2a = CTransaction()
tx2a.vin = [CTxIn(tx1_outpoint, nSequence=0xfffffffe)]
tx2a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx2a_hex = txToHex(tx2a)
tx2a_txid = self.nodes[0].sendrawtransaction(tx2a_hex, True)
# Still shouldn't be able to double-spend
tx2b = CTransaction()
tx2b.vin = [CTxIn(tx1_outpoint, nSequence=0)]
tx2b.vout = [CTxOut(int(0.9*COIN), CScript([b'b']))]
tx2b_hex = txToHex(tx2b)
try:
tx2b_txid = self.nodes[0].sendrawtransaction(tx2b_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
assert(False)
# Now create a new transaction that spends from tx1a and tx2a
# opt-in on one of the inputs
# Transaction should be replaceable on either input
tx1a_txid = int(tx1a_txid, 16)
tx2a_txid = int(tx2a_txid, 16)
tx3a = CTransaction()
tx3a.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0xffffffff),
CTxIn(COutPoint(tx2a_txid, 0), nSequence=0xfffffffd)]
tx3a.vout = [CTxOut(int(0.9*COIN), CScript([b'c'])), CTxOut(int(0.9*COIN), CScript([b'd']))]
tx3a_hex = txToHex(tx3a)
self.nodes[0].sendrawtransaction(tx3a_hex, True)
tx3b = CTransaction()
tx3b.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)]
tx3b.vout = [CTxOut(int(0.5*COIN), CScript([b'e']))]
tx3b_hex = txToHex(tx3b)
tx3c = CTransaction()
tx3c.vin = [CTxIn(COutPoint(tx2a_txid, 0), nSequence=0)]
tx3c.vout = [CTxOut(int(0.5*COIN), CScript([b'f']))]
tx3c_hex = txToHex(tx3c)
self.nodes[0].sendrawtransaction(tx3b_hex, True)
# If tx3b was accepted, tx3c won't look like a replacement,
# but make sure it is accepted anyway
self.nodes[0].sendrawtransaction(tx3c_hex, True)
def test_prioritised_transactions(self):
# Ensure that fee deltas used via prioritisetransaction are
# correctly used by replacement logic
# 1. Check that feeperkb uses modified fees
tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
# Higher fee, but the actual fee per KB is much lower.
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(int(0.001*COIN), CScript([b'a'*740000]))]
tx1b_hex = txToHex(tx1b)
# Verify tx1b cannot replace tx1a.
try:
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
assert(False)
# Use prioritisetransaction to set tx1a's fee to 0.
self.nodes[0].prioritisetransaction(tx1a_txid, 0, int(-0.1*COIN))
# Now tx1b should be able to replace tx1a
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
assert(tx1b_txid in self.nodes[0].getrawmempool())
# 2. Check that absolute fee checks use modified fee.
tx1_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
tx2a = CTransaction()
tx2a.vin = [CTxIn(tx1_outpoint, nSequence=0)]
tx2a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx2a_hex = txToHex(tx2a)
tx2a_txid = self.nodes[0].sendrawtransaction(tx2a_hex, True)
# Lower fee, but we'll prioritise it
tx2b = CTransaction()
tx2b.vin = [CTxIn(tx1_outpoint, nSequence=0)]
tx2b.vout = [CTxOut(int(1.01*COIN), CScript([b'a']))]
tx2b.rehash()
tx2b_hex = txToHex(tx2b)
# Verify tx2b cannot replace tx2a.
try:
tx2b_txid = self.nodes[0].sendrawtransaction(tx2b_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
assert(False)
# Now prioritise tx2b to have a higher modified fee
self.nodes[0].prioritisetransaction(tx2b.hash, 0, int(0.1*COIN))
# tx2b should now be accepted
tx2b_txid = self.nodes[0].sendrawtransaction(tx2b_hex, True)
assert(tx2b_txid in self.nodes[0].getrawmempool())
if __name__ == '__main__':
ReplaceByFeeTest().main()
|
|
# Chicago Tribune News Applications fabfile
# No copying allowed
from fabric.api import *
"""
Base configuration
"""
#name of the deployed site if different from the name of the project
env.site_name = 'censusweb'
env.project_name = 'censusweb'
env.database_password = 'Xy9XKembdu'
env.site_media_prefix = "site_media"
env.admin_media_prefix = "admin_media"
env.path = '/home/ubuntu/sites/%(project_name)s' % env
env.log_path = '/home/ubuntu/logs' % env
env.env_path = '/home/ubuntu/sites/virtualenvs/%(project_name)s' % env
env.repo_path = '%(path)s' % env
env.site_path = '%(repo_path)s/censusweb' % env
env.dataprocessing_path = '%(repo_path)s/dataprocessing' % env
env.apache_config_path = '/home/ubuntu/apache/%(project_name)s' % env
env.python = 'python2.7'
env.repository_url = "[email protected]:ireapps/census.git"
env.memcached_server_address = "cache"
env.multi_server = False
"""
Environments
"""
def production():
"""
Work on production environment
"""
#TKTK
env.settings = 'production'
env.hosts = ['census.ire.org']
env.user = 'ubuntu'
env.s3_bucket = 'censusmedia.ire.org'
env.site_domain = 'census.ire.org'
env.cache_server = 'census.ire.org'
def staging():
"""
Work on staging environment
"""
env.settings = 'staging'
env.hosts = ['census.tribapps.com']
env.user = 'ubuntu'
env.s3_bucket = 'media-beta.tribapps.com'
env.site_domain = 'census.tribapps.com'
env.cache_server = 'census.tribapps.com'
"""
Branches
"""
def stable():
"""
Work on stable branch.
"""
env.branch = 'stable'
def master():
"""
Work on development branch.
"""
env.branch = 'master'
def branch(branch_name):
"""
Work on any specified branch.
"""
env.branch = branch_name
"""
Commands - setup
"""
def setup():
"""
Setup a fresh virtualenv, install everything we need, and fire up the database.
Does NOT perform the functions of deploy().
"""
require('settings', provided_by=[production, staging])
require('branch', provided_by=[stable, master, branch])
setup_directories()
setup_virtualenv()
clone_repo()
checkout_latest()
install_requirements()
destroy_database()
create_database()
load_data()
install_apache_conf()
deploy_requirements_to_s3()
def setup_directories():
"""
Create directories necessary for deployment.
"""
run('mkdir -p %(path)s' % env)
def setup_virtualenv():
"""
Setup a fresh virtualenv.
"""
run('virtualenv -p %(python)s --no-site-packages %(env_path)s;' % env)
run('source %(env_path)s/bin/activate; easy_install -U setuptools; easy_install -U pip;' % env)
def clone_repo():
"""
Do initial clone of the git repository.
"""
run('git clone %(repository_url)s %(repo_path)s' % env)
def checkout_latest():
"""
Pull the latest code on the specified branch.
"""
run('cd %(repo_path)s; git checkout %(branch)s; git pull origin %(branch)s' % env)
def install_requirements():
"""
Install the required packages using pip.
"""
run('source %(env_path)s/bin/activate; pip install -q -r %(site_path)s/requirements.txt' % env)
def install_apache_conf():
"""
Install the apache site config file.
"""
sudo('cp %(site_path)s/config/%(settings)s/apache %(apache_config_path)s' % env)
def deploy_requirements_to_s3():
"""
Deploy the admin media to s3.
"""
with settings(warn_only=True):
run('s3cmd del --recursive s3://%(s3_bucket)s/%(project_name)s/%(admin_media_prefix)s/' % env)
run('s3cmd -P --guess-mime-type --rexclude-from=%(site_path)s/s3exclude sync %(env_path)s/src/django/django/contrib/admin/media/ s3://%(s3_bucket)s/%(project_name)s/%(admin_media_prefix)s/' % env)
"""
Commands - deployment
"""
def deploy():
"""
Deploy the latest version of the site to the server and restart Apache2.
Does not perform the functions of load_new_data().
"""
require('settings', provided_by=[production, staging])
require('branch', provided_by=[stable, master, branch])
with settings(warn_only=True):
maintenance_up()
checkout_latest()
gzip_assets()
deploy_to_s3()
maintenance_down()
clear_cache()
def maintenance_up():
"""
Install the Apache maintenance configuration.
"""
sudo('cp %(site_path)s/config/%(settings)s/apache_maintenance %(apache_config_path)s' % env)
reboot()
def gzip_assets():
"""
GZips every file in the media directory and places the new file
in the gzip directory with the same filename.
"""
run('cd %(site_path)s; python gzip_assets.py' % env)
def deploy_to_s3():
"""
Deploy the latest project site media to S3.
"""
env.gzip_path = '%(site_path)s/gzip_media/' % env
run(('s3cmd -P --add-header=Content-encoding:gzip --guess-mime-type --rexclude-from=%(site_path)s/s3exclude sync %(gzip_path)s s3://%(s3_bucket)s/%(project_name)s/%(site_media_prefix)s/') % env)
def reboot():
"""
Restart the Apache2 server.
"""
if env.multi_server:
run('bounce-apaches-for-cluster')
else:
sudo('service apache2 restart')
def maintenance_down():
"""
Reinstall the normal site configuration.
"""
install_apache_conf()
reboot()
"""
Commands - rollback
"""
def rollback(commit_id):
"""
Rolls back to specified git commit hash or tag.
There is NO guarantee we have committed a valid dataset for an arbitrary
commit hash.
"""
require('settings', provided_by=[production, staging])
require('branch', provided_by=[stable, master, branch])
maintenance_up()
checkout_latest()
git_reset(commit_id)
gzip_assets()
deploy_to_s3()
maintenance_down()
def git_reset(commit_id):
"""
Reset the git repository to an arbitrary commit hash or tag.
"""
env.commit_id = commit_id
run("cd %(repo_path)s; git reset --hard %(commit_id)s" % env)
"""
Commands - data
"""
def load_new_data():
"""
Erase the current database and load new data from the SQL dump file.
"""
require('settings', provided_by=[production, staging])
maintenance_up()
pgpool_down()
destroy_database()
create_database()
load_data()
pgpool_up()
maintenance_down()
def create_database(func=run):
"""
Creates the user and database for this project.
"""
func('createuser -s %(project_name)s' % env)
func('echo "ALTER USER %(project_name)s with password %(database_password)s" | psql postgres' % env)
func('echo "GRANT ALL PRIVILEGES TO %(project_name)s;" | psql postgres' % env)
func('createdb -O %(project_name)s %(project_name)s -T template_postgis' % env)
def destroy_database(func=run):
"""
Destroys the user and database for this project.
Will not cause the fab to fail if they do not exist.
"""
with settings(warn_only=True):
func('dropdb %(project_name)s' % env)
func('dropuser %(project_name)s' % env)
def load_data():
"""
Loads data from the repository into PostgreSQL.
"""
run('psql -q %(project_name)s < %(site_path)s/data/psql/dump.sql' % env)
def pgpool_down():
"""
Stop pgpool so that it won't prevent the database from being rebuilt.
"""
sudo('/etc/init.d/pgpool stop')
def pgpool_up():
"""
Start pgpool.
"""
sudo('/etc/init.d/pgpool start')
"""
Commands - Data Processing
"""
def run_unattended_batch_command(command, command_log):
# Make sure log exists
run("touch %s" % command_log)
with cd(env.dataprocessing_path):
run("source %s/bin/activate; nohup %s >> %s < /dev/null &" % (env.env_path, command, command_log))
def batch_sf(state, fake=''):
"""
Kick off the SF 2000 data loader for a state.
"""
command = './batch_sf.sh %s %s %s' % (state, env.settings, fake)
loader_log = '%s/census.load.%s.log' % (env.log_path, state)
run_unattended_batch_command(command, loader_log)
def batch_sf_everything(fake=''):
"""
Kick off the SF data loaders for all states.
USE WITH CAUTION!
"""
command = 'python batch_sf_everything.py %s %s' % (env.settings, fake)
loader_log = '%s/census.load.everything.log' % (env.log_path)
run_unattended_batch_command(command, loader_log)
def batch_test():
"""
Kick off the test data loader.
USE WITH CAUTION!
"""
loader_log = '%(log_path)s/census.load.test.log' % env
run_unattended_batch_command('./batch_test.sh %s' % env.settings, loader_log)
def make_state_public(state):
"""
Make a state's data public.
"""
loader_log = '%(log_path)s/census.make_public.log' % env
run_unattended_batch_command('python make_state_public.py %s %s' % (env.settings, state), loader_log)
"""
Commands - miscellaneous
"""
def clear_cache():
"""
Restart memcache, wiping the current cache.
"""
if env.multi_server:
run('bounce-memcaches-for-cluster')
else:
sudo('service memcached restart')
run('curl -X PURGE -H "Host: %(site_domain)s" http://%(cache_server)s/' % env)
def echo_host():
"""
Echo the current host to the command line.
"""
run('echo %(settings)s; echo %(hosts)s' % env)
"""
Deaths, destroyers of worlds
"""
def shiva_the_destroyer():
"""
Remove all directories, databases, etc. associated with the application.
"""
with settings(warn_only=True):
run('rm -Rf %(path)s' % env)
run('rm -Rf %(env_path)s' % env)
pgpool_down()
run('dropdb %(project_name)s' % env)
run('dropuser %(project_name)s' % env)
pgpool_up()
sudo('rm %(apache_config_path)s' % env)
reboot()
run('s3cmd del --recursive s3://%(s3_bucket)s/' % env)
def local_shiva():
destroy_database(local)
def local_bootstrap():
create_database(local)
# Normal bootstrap
local('python manage.py syncdb --noinput')
def local_load_geodata():
local('mkdir -p /tmp/geofetch')
local('./fetch_geodata.sh /tmp/geofetch 10')
local('cp data/shapefiles/definitions.py /tmp/geofetch')
local('./manage.py load_shapefiles -c -d /tmp/geofetch')
"""
Utility functions (not to be called directly)
"""
def _execute_psql(query):
"""
Executes a PostgreSQL command using the command line interface.
"""
env.query = query
run(('cd %(site_path)s; psql -q %(project_name)s -c "%(query)s"') % env)
def _confirm_branch():
if (env.settings == 'production' and env.branch != 'stable'):
answer = prompt("You are trying to deploy the '%(branch)s' branch to production.\nYou should really only deploy a stable branch.\nDo you know what you're doing?" % env, default="Not at all")
if answer not in ('y','Y','yes','Yes','buzz off','screw you'):
exit()
|
|
import threading
import new
import logging
import django
from django.db import router, connections, models
from django.apps import apps
from django.utils.encoding import smart_text
from djangae.crc64 import CRC64
class SimulatedContentTypeManager(models.Manager):
"""
Simulates content types without actually hitting the datastore.
"""
_store = threading.local()
def __init__(self, model=None, *args, **kwargs):
super(SimulatedContentTypeManager, self).__init__(*args, **kwargs)
self.model = model
def _get_model(self):
""" If we're in a migration, then the 'fake' model class will be passed
into __init__ and we'll use that. Otherwise we'll use the 'real'
ContentType class.
"""
from django.contrib.contenttypes.models import ContentType
return self.model or ContentType
def _get_id(self, app_label, model):
crc = CRC64()
crc.append(app_label)
crc.append(model)
return crc.fini() - (2 ** 63) # GAE integers are signed so we shift down
def _get_opts(self, model, for_concrete_model):
if for_concrete_model:
model = model._meta.concrete_model
elif model._deferred:
model = model._meta.proxy_for_model
return model._meta
def _update_queries(self, models):
"""
This is just to satisfy the contenttypes tests which check that queries are executed at certain
times. It's a bit hacky but it works for that purpose.
"""
ContentType = self._get_model()
conn = connections[router.db_for_write(ContentType)]
if getattr(conn, "use_debug_cursor", getattr(conn, "force_debug_cursor", False)):
for model in models or []:
if model not in self._store.queried_models:
conn.queries.append("select * from {}".format(ContentType._meta.db_table))
break
if not hasattr(self._store, "queried_models"):
self._store.queried_models = set()
self._store.queried_models |= set(models or [])
def _repopulate_if_necessary(self, models=None):
if not hasattr(self._store, "queried_models"):
self._store.queried_models = set()
if not hasattr(self._store, "constructed_instances"):
self._store.constructed_instances = {}
self._update_queries(models)
if not hasattr(self._store, "content_types"):
all_models = [(x._meta.app_label, x._meta.model_name, x) for x in apps.get_models()]
self._update_queries([(x[0], x[1]) for x in all_models])
content_types = {}
for app_label, model_name, model in all_models:
content_type_id = self._get_id(app_label, model_name)
content_types[content_type_id] = {
"id": content_type_id,
"app_label": app_label,
"model": model_name,
}
if django.VERSION < (1, 9):
content_types[content_type_id]["name"] = smart_text(model._meta.verbose_name_raw)
self._store.content_types = content_types
def get_by_natural_key(self, app_label, model):
self._repopulate_if_necessary(models=[(app_label, model)])
return self.get(id=self._get_id(app_label, model))
def get_for_model(self, model, for_concrete_model=True):
opts = self._get_opts(model, for_concrete_model)
self._repopulate_if_necessary(models=[(opts.app_label, opts.model_name)])
return self.get_by_natural_key(opts.app_label, opts.model_name)
def get_for_models(self, *models, **kwargs):
for_concrete_model = kwargs.get("for_concrete_models", True)
self._update_queries(
[(self._get_opts(x, for_concrete_model).app_label, self._get_opts(x, for_concrete_model).model_name) for x in models]
)
ret = {}
for model in models:
ret[model] = self.get_for_model(model, for_concrete_model)
return ret
def get_for_id(self, id):
return self.get(pk=id)
def clear_cache(self):
self._store.queried_models = set()
def _get_from_store(self, id):
try:
return self._store.content_types[id]
except KeyError:
ContentType = self._get_model()
raise ContentType.DoesNotExist()
def get(self, **kwargs):
ContentType = self._get_model()
self._repopulate_if_necessary()
if "pk" in kwargs:
kwargs["id"] = kwargs["pk"]
del kwargs["pk"]
if "id" in kwargs:
dic = self._get_from_store(int(kwargs["id"]))
else:
for ct in self._store.content_types.values():
for k, v in kwargs.items():
if k not in ct:
raise ContentType.DoesNotExist()
if ct[k] != v:
break
else:
dic = ct
break
else:
raise ContentType.DoesNotExist()
def disable_save(*args, **kwargs):
raise NotImplementedError("You can't save simulated content types")
# We do this because some tests to comparisons with 'is' so we store
# constructed ContentTypes in the thread local and return them if possible
if dic["id"] in self._store.constructed_instances:
return self._store.constructed_instances[dic["id"]]
else:
ContentType = self._get_model()
result = ContentType(**dic)
result.save = new.instancemethod(disable_save, ContentType, result)
self._store.constructed_instances[dic["id"]] = result
return result
def create(self, **kwargs):
self._repopulate_if_necessary()
logging.warning(
"Created simulated content type, this will not persist and will remain only on this "
"app instance"
)
new_id = self._get_id(kwargs["app_label"], kwargs["model"])
kwargs["id"] = new_id
if "pk" in kwargs:
del kwargs["pk"]
self._store.content_types[new_id] = kwargs
return self.get(id=new_id)
def get_or_create(self, **kwargs):
ContentType = self._get_model()
defaults = kwargs.pop("defaults", None)
try:
return self.get(**kwargs), False
except ContentType.DoesNotExist:
if defaults:
kwargs.update(**defaults)
return self.create(**kwargs), True
def filter(self, **kwargs):
self._repopulate_if_necessary()
def _condition(ct):
for attr, val in kwargs.items():
if getattr(ct, attr) != val:
return False
return True
return [ct for ct in self.all() if _condition(ct)]
def all(self, **kwargs):
self._repopulate_if_necessary()
result = []
for ct in self._store.content_types.keys():
result.append(self.get(id=ct))
return result
def using(self, *args, **kwargs):
return self
def bulk_create(self, *args, **kwargs):
pass
|
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""gRPC server for the DocQA environment.
Implementation of a gRPC server for the DocQA model. Requests contain
questions and document IDs that identify SQuAD datapoints. The responses
contain answers from the BiDAF environment and associated scores.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from concurrent import futures
import time
from absl import app
from absl import flags
from absl import logging
import grpc
from px.environments import docqa
from px.environments import docqa_squad
from px.proto import aqa_pb2
from px.proto import aqa_pb2_grpc
FLAGS = flags.FLAGS
flags.DEFINE_integer('port', 10000, 'Port to listen on.')
flags.DEFINE_string('precomputed_data_path', '', 'Precomputed data path.')
flags.DEFINE_string('corpus_part', 'train', 'train or dev')
flags.DEFINE_string('docqa_model_dir', '', 'Directory of trained DocQA model.')
flags.DEFINE_string('nltk_dir', '', 'NLTK directory.')
flags.DEFINE_integer('worker_threads', 10,
'Number of worker threads running on the server.')
flags.DEFINE_integer('sleep_seconds', 10,
'Number of seconds to wait for a termination event.')
flags.DEFINE_bool('load_test', False,
'Load test data in addition to dev and train.')
flags.DEFINE_bool('debug_mode', False,
'If true, log questions, answers, and scores.')
flags.DEFINE_enum('model_type', 'squad', ['squad', 'triviaqa'], 'Model type.')
class DocqaServer(aqa_pb2_grpc.EnvironmentServerServicer):
"""A gRPC server for the DocQA environment.
Attributes:
environment: A DocqaEnvironment object that returns scored answers to
questions.
"""
def __init__(self, *args, **kwargs):
""""Constructor for the BiDAF server."""
precomputed_data_path = kwargs.pop('precomputed_data_path', None)
corpus_dir = kwargs.pop('corpus_dir', None)
model_dir = kwargs.pop('docqa_model_dir', None)
nltk_dir = kwargs.pop('nltk_dir', None)
load_test = kwargs.pop('load_test', False)
debug_mode = kwargs.pop('debug_mode', False)
model_type = kwargs.pop('model_type', 'squad')
corpus_name = kwargs.pop('corpus_name', None)
corpus_part = kwargs.pop('corpus_part', None)
self.debug_mode = debug_mode
if model_type == 'triviaqa':
self._InitializeEnvironment(
precomputed_data_path=precomputed_data_path,
corpus_dir=corpus_dir,
model_dir=model_dir,
nltk_dir=nltk_dir,
load_test=load_test,
debug_mode=debug_mode)
elif model_type == 'squad':
self._InitializeSquadEnvironment(
corpus_dir=corpus_dir,
corpus_name=corpus_name,
corpus_part=corpus_part,
model_dir=model_dir,
nltk_dir=nltk_dir)
def _InitializeEnvironment(self, precomputed_data_path, corpus_dir, model_dir,
nltk_dir, load_test, debug_mode):
"""Initilizes the DocQA model environment.
Args:
precomputed_data_path: Path to the precomputed data stored in a pickle
file.
corpus_dir: Path to corpus directory.
model_dir: Directory containing parameters of a pre-trained DocQA model.
nltk_dir: Folder containing the nltk package.
load_test: If True, loads the test set as well.
debug_mode: If true, logs additional debug information.
"""
self._environment = docqa.DocqaEnvironment(
precomputed_data_path=precomputed_data_path,
corpus_dir=corpus_dir,
model_dir=model_dir,
nltk_dir=nltk_dir,
load_test=load_test,
debug_mode=debug_mode)
def _InitializeSquadEnvironment(self, corpus_dir, corpus_name, corpus_part,
model_dir, nltk_dir):
"""Initilizes the DocQA SquAD model environment.
Args:
corpus_dir: Path to corpus directory.
corpus_name: Name of the corpus, effectively this is a subdirectory in
corpus_dir.
corpus_part: Part of the corpus ("train" or "dev").
model_dir: Directory containing parameters of a pre-trained DocQA model.
nltk_dir: Folder containing the nltk package.
"""
self._environment = docqa_squad.DocqaSquadEnvironment(
corpus_dir=corpus_dir,
corpus_name=corpus_name,
corpus_part=corpus_part,
model_dir=model_dir,
nltk_dir=nltk_dir)
def GetObservations(self, request, context):
"""Returns answers to given questions.
Passes questions and document ids contained in the request to the Bidaf
environment and repackages the scored answers coming from the environment
into the response.
Args:
rpc: The rpc object
request: An EnvironmentRequest containing questions and docids.
response: An EnvironmentResponse to fill with the resulting answers.
"""
if self.debug_mode:
start_time = time.time()
response = aqa_pb2.EnvironmentResponse()
if not request.queries:
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
context.set_details('Empty list of queries provided in the request')
return response
questions = list()
document_ids = list()
for query in request.queries:
questions.append(query.question)
document_ids.append(query.id)
try:
answers_confidences_scores = self._environment.GetAnswers(
questions, document_ids)
except KeyError as e:
context.set_code(grpc.StatusCode.INTERNAL)
context.set_details('KeyError: {}'.format(e))
return response
# -2 for the entry containing the scores and f1_scores.
if len(answers_confidences_scores) != len(request.queries):
context.set_code(grpc.StatusCode.INTERNAL)
context.set_details('Unexpected number of answers: {} vs. {}'.format(
len(answers_confidences_scores) - 1, len(request.queries)))
return response
for question, document_id, answer_confidence_score in zip(
questions, document_ids, answers_confidences_scores):
answer_text, confidence, score = answer_confidence_score
output_response = response.responses.add()
output_response.id = document_id
answer = output_response.answers.add()
answer.text = answer_text
answer.scores['confidence'] = confidence
answer.scores['f1'] = score
output_response.question = question
output_response.processed_question = question
if self.debug_mode:
logging.info('{} questions processed in {}'.format(
len(request.queries),
time.time() - start_time))
return response
def main(unused_argv):
logging.info('Loading server...')
server = grpc.server(
futures.ThreadPoolExecutor(max_workers=FLAGS.worker_threads))
aqa_pb2_grpc.add_EnvironmentServerServicer_to_server(
DocqaServer(
'active_qa.EnvironmentServer',
'DocQA environment server',
precomputed_data_path=FLAGS.precomputed_data_path,
corpus_dir=FLAGS.corpus_dir,
corpus_name=FLAGS.corpus_name,
corpus_part=FLAGS.corpus_part,
docqa_model_dir=FLAGS.docqa_model_dir,
nltk_dir=FLAGS.nltk_dir,
load_test=FLAGS.load_test,
debug_mode=FLAGS.debug_mode), server)
port = FLAGS.port
logging.info('Running server on port {}...'.format(port))
server.add_insecure_port('[::]:{}'.format(port))
server.start()
# Prevent the main thread from exiting.
try:
while True:
time.sleep(FLAGS.sleep_seconds)
except KeyboardInterrupt:
server.stop(0)
if __name__ == '__main__':
app.run(main)
|
|
import os
import shutil
import zipfile
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, Http404
from django.shortcuts import get_object_or_404, render_to_response
from django.template import RequestContext
from django.views.generic.list_detail import object_list
from guardian.shortcuts import assign
from builds.forms import AliasForm, VersionForm
from builds.filters import VersionFilter
from builds.models import Version
from projects.forms import (ImportProjectForm, build_versions_form,
build_upload_html_form, SubprojectForm,
UserForm, EmailHookForm, TranslationForm,
AdvancedProjectForm)
from projects.models import Project, EmailHook
from projects import constants
@login_required
def project_dashboard(request):
"""
A dashboard! If you aint know what that means you aint need to.
Essentially we show you an overview of your content.
"""
qs = (Version.objects.active(user=request.user)
.filter(project__users__in=[request.user]))
filter = VersionFilter(constants.IMPORTANT_VERSION_FILTERS, queryset=qs)
return object_list(
request,
queryset=request.user.projects.live(),
page=int(request.GET.get('page', 1)),
template_object_name='project',
template_name='projects/project_dashboard.html',
extra_context={
'filter': filter,
}
)
@login_required
def project_manage(request, project_slug):
"""
The management view for a project, where you will have links to edit
the projects' configuration, edit the files associated with that
project, etc.
Now redirects to the normal /projects/<slug> view.
"""
return HttpResponseRedirect(reverse('projects_detail',
args=[project_slug]))
@login_required
def project_edit(request, project_slug):
"""
Edit an existing project - depending on what type of project is being
edited (created or imported) a different form will be displayed
"""
project = get_object_or_404(request.user.projects.live(),
slug=project_slug)
form_class = ImportProjectForm
form = form_class(instance=project, data=request.POST or None)
if request.method == 'POST' and form.is_valid():
form.save()
project_dashboard = reverse('projects_detail', args=[project.slug])
return HttpResponseRedirect(project_dashboard)
return render_to_response(
'projects/project_edit.html',
{'form': form, 'project': project},
context_instance=RequestContext(request)
)
@login_required
def project_advanced(request, project_slug):
"""
Edit an existing project - depending on what type of project is being
edited (created or imported) a different form will be displayed
"""
project = get_object_or_404(request.user.projects.live(),
slug=project_slug)
form_class = AdvancedProjectForm
form = form_class(instance=project, data=request.POST or None, initial={'num_minor': 2, 'num_major': 2, 'num_point': 2})
if request.method == 'POST' and form.is_valid():
form.save()
project_dashboard = reverse('projects_detail', args=[project.slug])
return HttpResponseRedirect(project_dashboard)
return render_to_response(
'projects/project_advanced.html',
{'form': form, 'project': project},
context_instance=RequestContext(request)
)
@login_required
def project_versions(request, project_slug):
"""
Shows the available versions and lets the user choose which ones he would
like to have built.
"""
project = get_object_or_404(request.user.projects.live(),
slug=project_slug)
if not project.is_imported:
raise Http404
form_class = build_versions_form(project)
form = form_class(data=request.POST or None)
if request.method == 'POST' and form.is_valid():
form.save()
project_dashboard = reverse('projects_detail', args=[project.slug])
return HttpResponseRedirect(project_dashboard)
return render_to_response(
'projects/project_versions.html',
{'form': form, 'project': project},
context_instance=RequestContext(request)
)
@login_required
def project_version_detail(request, project_slug, version_slug):
project = get_object_or_404(request.user.projects.live(),
slug=project_slug)
version = get_object_or_404(project.versions.all(), slug=version_slug)
form = VersionForm(request.POST or None, instance=version)
if request.method == 'POST' and form.is_valid():
form.save()
url = reverse('projects_versions', args=[project.slug])
return HttpResponseRedirect(url)
return render_to_response(
'projects/project_version_detail.html',
{'form': form, 'project': project, 'version': version},
context_instance=RequestContext(request)
)
@login_required
def project_delete(request, project_slug):
"""
Make a project as deleted on POST, otherwise show a form asking for
confirmation of delete.
"""
project = get_object_or_404(request.user.projects.live(),
slug=project_slug)
if request.method == 'POST':
# Remove the repository checkout
shutil.rmtree(project.doc_path, ignore_errors=True)
# Delete the project and everything related to it
project.delete()
project_dashboard = reverse('projects_dashboard')
return HttpResponseRedirect(project_dashboard)
return render_to_response(
'projects/project_delete.html',
{'project': project},
context_instance=RequestContext(request)
)
@login_required
def project_import(request):
"""
Import docs from an repo
"""
form = ImportProjectForm(request.POST or None)
if request.method == 'POST' and form.is_valid():
project = form.save()
form.instance.users.add(request.user)
assign('view_project', request.user, project)
project_manage = reverse('projects_detail', args=[project.slug])
return HttpResponseRedirect(project_manage + '?docs_not_built=True')
return render_to_response(
'projects/project_import.html',
{'form': form},
context_instance=RequestContext(request)
)
@login_required
def edit_alias(request, project_slug, id=None):
proj = get_object_or_404(Project.objects.all(), slug=project_slug)
if id:
alias = proj.aliases.get(pk=id)
form = AliasForm(instance=alias, data=request.POST or None)
else:
form = AliasForm(request.POST or None)
if request.method == 'POST' and form.is_valid():
alias = form.save()
return HttpResponseRedirect(alias.project.get_absolute_url())
return render_to_response(
'projects/alias_edit.html',
{'form': form},
context_instance=RequestContext(request)
)
@login_required
def list_alias(request, project_slug):
proj = get_object_or_404(Project.objects.all(), slug=project_slug)
return object_list(
request,
queryset=proj.aliases.all(),
template_object_name='alias',
template_name='projects/alias_list.html',
)
@login_required
def project_subprojects(request, project_slug):
project = get_object_or_404(request.user.projects.live(),
slug=project_slug)
form = SubprojectForm(data=request.POST or None, parent=project)
if request.method == 'POST' and form.is_valid():
form.save()
project_dashboard = reverse('projects_subprojects', args=[project.slug])
return HttpResponseRedirect(project_dashboard)
subprojects = project.subprojects.all()
return render_to_response(
'projects/project_subprojects.html',
{'form': form, 'project': project, 'subprojects': subprojects},
context_instance=RequestContext(request)
)
@login_required
def project_subprojects_delete(request, project_slug, child_slug):
parent = get_object_or_404(request.user.projects.live(), slug=project_slug)
child = get_object_or_404(Project.objects.all(), slug=child_slug)
parent.remove_subproject(child)
project_dashboard = reverse('projects_detail', args=[parent.slug])
return HttpResponseRedirect(project_dashboard)
@login_required
def project_users(request, project_slug):
project = get_object_or_404(request.user.projects.live(),
slug=project_slug)
form = UserForm(data=request.POST or None, project=project)
if request.method == 'POST' and form.is_valid():
form.save()
project_dashboard = reverse('projects_users', args=[project.slug])
return HttpResponseRedirect(project_dashboard)
users = project.users.all()
return render_to_response(
'projects/project_users.html',
{'form': form, 'project': project, 'users': users},
context_instance=RequestContext(request)
)
@login_required
def project_users_delete(request, project_slug):
if request.method != 'POST':
raise Http404
project = get_object_or_404(request.user.projects.live(),
slug=project_slug)
user = get_object_or_404(User.objects.all(),
username=request.POST.get('username'))
if user == request.user:
raise Http404
project.users.remove(user)
project_dashboard = reverse('projects_users', args=[project.slug])
return HttpResponseRedirect(project_dashboard)
@login_required
def project_notifications(request, project_slug):
project = get_object_or_404(request.user.projects.live(),
slug=project_slug)
form = EmailHookForm(data=request.POST or None, project=project)
if request.method == 'POST' and form.is_valid():
form.save()
project_dashboard = reverse('projects_notifications',
args=[project.slug])
return HttpResponseRedirect(project_dashboard)
emails = project.emailhook_notifications.all()
return render_to_response(
'projects/project_notifications.html',
{'form': form, 'project': project, 'emails': emails},
context_instance=RequestContext(request)
)
@login_required
def project_notifications_delete(request, project_slug):
if request.method != 'POST':
raise Http404
project = get_object_or_404(request.user.projects.live(),
slug=project_slug)
notification = get_object_or_404(EmailHook.objects.all(),
email=request.POST.get('email'))
notification.delete()
project_dashboard = reverse('projects_notifications', args=[project.slug])
return HttpResponseRedirect(project_dashboard)
@login_required
def project_translations(request, project_slug):
project = get_object_or_404(request.user.projects.live(),
slug=project_slug)
form = TranslationForm(data=request.POST or None, parent=project)
if request.method == 'POST' and form.is_valid():
form.save()
project_dashboard = reverse('projects_translations',
args=[project.slug])
return HttpResponseRedirect(project_dashboard)
lang_projects = project.translations.all()
return render_to_response(
'projects/project_translations.html',
{'form': form, 'project': project, 'lang_projects': lang_projects},
context_instance=RequestContext(request)
)
@login_required
def project_translations_delete(request, project_slug, child_slug):
project = get_object_or_404(request.user.projects.live(),
slug=project_slug)
subproj = get_object_or_404(Project.objects.public(), slug=child_slug)
project.translations.remove(subproj)
project_dashboard = reverse('projects_translations', args=[project.slug])
return HttpResponseRedirect(project_dashboard)
|
|
import pytest
from mitmproxy.test import tflow
from mitmproxy.addons import view
from mitmproxy import flowfilter
from mitmproxy import exceptions
from mitmproxy import io
from mitmproxy.test import taddons
from mitmproxy.tools.console import consoleaddons
from mitmproxy.tools.console.common import render_marker, SYMBOL_MARK
def tft(*, method="get", start=0):
f = tflow.tflow()
f.request.method = method
f.request.timestamp_start = start
return f
def test_order_refresh():
v = view.View()
sargs = []
def save(*args, **kwargs):
sargs.extend([args, kwargs])
v.sig_view_refresh.connect(save)
tf = tflow.tflow(resp=True)
with taddons.context() as tctx:
tctx.configure(v, view_order="time")
v.add([tf])
tf.request.timestamp_start = 10
assert not sargs
v.update([tf])
assert sargs
def test_order_generators_http():
v = view.View()
tf = tflow.tflow(resp=True)
rs = view.OrderRequestStart(v)
assert rs.generate(tf) == 946681200
rm = view.OrderRequestMethod(v)
assert rm.generate(tf) == tf.request.method
ru = view.OrderRequestURL(v)
assert ru.generate(tf) == tf.request.url
sz = view.OrderKeySize(v)
assert sz.generate(tf) == len(tf.request.raw_content) + len(tf.response.raw_content)
def test_order_generators_tcp():
v = view.View()
tf = tflow.ttcpflow()
rs = view.OrderRequestStart(v)
assert rs.generate(tf) == 946681200
rm = view.OrderRequestMethod(v)
assert rm.generate(tf) == "TCP"
ru = view.OrderRequestURL(v)
assert ru.generate(tf) == "address:22"
sz = view.OrderKeySize(v)
assert sz.generate(tf) == sum(len(m.content) for m in tf.messages)
def test_simple():
v = view.View()
f = tft(start=1)
assert v.store_count() == 0
v.requestheaders(f)
assert list(v) == [f]
assert v.get_by_id(f.id)
assert not v.get_by_id("nonexistent")
# These all just call update
v.error(f)
v.response(f)
v.intercept(f)
v.resume(f)
v.kill(f)
assert list(v) == [f]
v.requestheaders(f)
assert list(v) == [f]
assert len(v._store) == 1
assert v.store_count() == 1
f2 = tft(start=3)
v.requestheaders(f2)
assert list(v) == [f, f2]
v.requestheaders(f2)
assert list(v) == [f, f2]
assert len(v._store) == 2
assert v.inbounds(0)
assert not v.inbounds(-1)
assert not v.inbounds(100)
f3 = tft(start=2)
v.requestheaders(f3)
assert list(v) == [f, f3, f2]
v.requestheaders(f3)
assert list(v) == [f, f3, f2]
assert len(v._store) == 3
f.marked = not f.marked
f2.marked = not f2.marked
v.clear_not_marked()
assert list(v) == [f, f2]
assert len(v) == 2
assert len(v._store) == 2
v.clear()
assert len(v) == 0
assert len(v._store) == 0
def test_simple_tcp():
v = view.View()
f = tflow.ttcpflow()
assert v.store_count() == 0
v.tcp_start(f)
assert list(v) == [f]
# These all just call update
v.tcp_start(f)
v.tcp_message(f)
v.tcp_error(f)
v.tcp_end(f)
assert list(v) == [f]
def test_filter():
v = view.View()
v.requestheaders(tft(method="get"))
v.requestheaders(tft(method="put"))
v.requestheaders(tft(method="get"))
v.requestheaders(tft(method="put"))
assert(len(v)) == 4
v.set_filter_cmd("~m get")
assert [i.request.method for i in v] == ["GET", "GET"]
assert len(v._store) == 4
v.set_filter(None)
assert len(v) == 4
v.toggle_marked()
assert len(v) == 0
v.toggle_marked()
assert len(v) == 4
with pytest.raises(exceptions.CommandError):
v.set_filter_cmd("~notafilter regex")
v[1].marked = True
v.toggle_marked()
assert len(v) == 1
assert v[0].marked
v.toggle_marked()
assert len(v) == 4
def tdump(path, flows):
with open(path, "wb") as f:
w = io.FlowWriter(f)
for i in flows:
w.add(i)
def test_create():
v = view.View()
with taddons.context():
v.create("get", "http://foo.com")
assert len(v) == 1
assert v[0].request.url == "http://foo.com/"
v.create("get", "http://foo.com")
assert len(v) == 2
with pytest.raises(exceptions.CommandError, match="Invalid URL"):
v.create("get", "http://foo.com\\")
with pytest.raises(exceptions.CommandError, match="Invalid URL"):
v.create("get", "http://")
def test_orders():
v = view.View()
with taddons.context(v):
assert v.order_options()
@pytest.mark.asyncio
async def test_load(tmpdir):
path = str(tmpdir.join("path"))
v = view.View()
with taddons.context() as tctx:
tctx.master.addons.add(v)
tdump(
path,
[
tflow.tflow(resp=True),
tflow.tflow(resp=True)
]
)
v.load_file(path)
assert len(v) == 2
v.load_file(path)
assert len(v) == 4
try:
v.load_file("nonexistent_file_path")
except OSError:
assert False
with open(path, "wb") as f:
f.write(b"invalidflows")
v.load_file(path)
await tctx.master.await_log("Invalid data format.")
def test_resolve():
v = view.View()
with taddons.context() as tctx:
f = tft(method="get")
assert tctx.command(v.resolve, "@all") == []
assert tctx.command(v.resolve, "@focus") == []
assert tctx.command(v.resolve, "@shown") == []
assert tctx.command(v.resolve, "@hidden") == []
assert tctx.command(v.resolve, "@marked") == []
assert tctx.command(v.resolve, "@unmarked") == []
assert tctx.command(v.resolve, f"@{f.id}") == []
assert tctx.command(v.resolve, "~m get") == []
v.requestheaders(f)
assert len(tctx.command(v.resolve, "~m get")) == 1
assert len(tctx.command(v.resolve, "@focus")) == 1
assert len(tctx.command(v.resolve, "@all")) == 1
assert len(tctx.command(v.resolve, "@shown")) == 1
assert len(tctx.command(v.resolve, "@unmarked")) == 1
assert len(tctx.command(v.resolve, f"@{f.id}")) == 1
assert tctx.command(v.resolve, "@hidden") == []
assert tctx.command(v.resolve, "@marked") == []
v.requestheaders(tft(method="put"))
assert len(tctx.command(v.resolve, f"@{f.id}")) == 1
assert len(tctx.command(v.resolve, "@focus")) == 1
assert len(tctx.command(v.resolve, "@shown")) == 2
assert len(tctx.command(v.resolve, "@all")) == 2
assert tctx.command(v.resolve, "@hidden") == []
assert tctx.command(v.resolve, "@marked") == []
v.requestheaders(tft(method="get"))
v.requestheaders(tft(method="put"))
f = flowfilter.parse("~m get")
v.set_filter(f)
v[0].marked = True
def m(l):
return [i.request.method for i in l]
assert m(tctx.command(v.resolve, "~m get")) == ["GET", "GET"]
assert m(tctx.command(v.resolve, "~m put")) == ["PUT", "PUT"]
assert m(tctx.command(v.resolve, "@shown")) == ["GET", "GET"]
assert m(tctx.command(v.resolve, "@hidden")) == ["PUT", "PUT"]
assert m(tctx.command(v.resolve, "@marked")) == ["GET"]
assert m(tctx.command(v.resolve, "@unmarked")) == ["PUT", "GET", "PUT"]
assert m(tctx.command(v.resolve, "@all")) == ["GET", "PUT", "GET", "PUT"]
with pytest.raises(exceptions.CommandError, match="Invalid filter expression"):
tctx.command(v.resolve, "~")
def test_movement():
v = view.View()
with taddons.context():
v.go(0)
v.add([
tflow.tflow(),
tflow.tflow(),
tflow.tflow(),
tflow.tflow(),
tflow.tflow(),
])
assert v.focus.index == 0
v.go(-1)
assert v.focus.index == 4
v.go(0)
assert v.focus.index == 0
v.go(1)
assert v.focus.index == 1
v.go(999)
assert v.focus.index == 4
v.go(-999)
assert v.focus.index == 0
v.focus_next()
assert v.focus.index == 1
v.focus_prev()
assert v.focus.index == 0
def test_duplicate():
v = view.View()
with taddons.context():
f = [
tflow.tflow(),
tflow.tflow(),
]
v.add(f)
assert len(v) == 2
v.duplicate(f)
assert len(v) == 4
assert v.focus.index == 2
def test_remove():
v = view.View()
with taddons.context():
f = [tflow.tflow(), tflow.tflow()]
v.add(f)
assert len(v) == 2
v.remove(f)
assert len(v) == 0
def test_setgetval():
v = view.View()
with taddons.context():
f = tflow.tflow()
v.add([f])
v.setvalue([f], "key", "value")
assert v.getvalue(f, "key", "default") == "value"
assert v.getvalue(f, "unknow", "default") == "default"
v.setvalue_toggle([f], "key")
assert v.getvalue(f, "key", "default") == "true"
v.setvalue_toggle([f], "key")
assert v.getvalue(f, "key", "default") == "false"
def test_order():
v = view.View()
v.requestheaders(tft(method="get", start=1))
v.requestheaders(tft(method="put", start=2))
v.requestheaders(tft(method="get", start=3))
v.requestheaders(tft(method="put", start=4))
assert [i.request.timestamp_start for i in v] == [1, 2, 3, 4]
v.set_order("method")
assert v.get_order() == "method"
assert [i.request.method for i in v] == ["GET", "GET", "PUT", "PUT"]
v.set_reversed(True)
assert [i.request.method for i in v] == ["PUT", "PUT", "GET", "GET"]
v.set_order("time")
assert v.get_order() == "time"
assert [i.request.timestamp_start for i in v] == [4, 3, 2, 1]
v.set_reversed(False)
assert [i.request.timestamp_start for i in v] == [1, 2, 3, 4]
with pytest.raises(exceptions.CommandError):
v.set_order("not_an_order")
def test_reversed():
v = view.View()
v.requestheaders(tft(start=1))
v.requestheaders(tft(start=2))
v.requestheaders(tft(start=3))
v.set_reversed(True)
assert v[0].request.timestamp_start == 3
assert v[-1].request.timestamp_start == 1
assert v[2].request.timestamp_start == 1
with pytest.raises(IndexError):
v[5]
with pytest.raises(IndexError):
v[-5]
assert v._bisect(v[0]) == 1
assert v._bisect(v[2]) == 3
def test_update():
v = view.View()
flt = flowfilter.parse("~m get")
v.set_filter(flt)
f = tft(method="get")
v.requestheaders(f)
assert f in v
f.request.method = "put"
v.update([f])
assert f not in v
f.request.method = "get"
v.update([f])
assert f in v
v.update([f])
assert f in v
class Record:
def __init__(self):
self.calls = []
def __bool__(self):
return bool(self.calls)
def __repr__(self):
return repr(self.calls)
def __call__(self, *args, **kwargs):
self.calls.append((args, kwargs))
def test_signals():
v = view.View()
rec_add = Record()
rec_update = Record()
rec_remove = Record()
rec_refresh = Record()
def clearrec():
rec_add.calls = []
rec_update.calls = []
rec_remove.calls = []
rec_refresh.calls = []
v.sig_view_add.connect(rec_add)
v.sig_view_update.connect(rec_update)
v.sig_view_remove.connect(rec_remove)
v.sig_view_refresh.connect(rec_refresh)
assert not any([rec_add, rec_update, rec_remove, rec_refresh])
# Simple add
v.add([tft()])
assert rec_add
assert not any([rec_update, rec_remove, rec_refresh])
# Filter change triggers refresh
clearrec()
v.set_filter(flowfilter.parse("~m put"))
assert rec_refresh
assert not any([rec_update, rec_add, rec_remove])
v.set_filter(flowfilter.parse("~m get"))
# An update that results in a flow being added to the view
clearrec()
v[0].request.method = "PUT"
v.update([v[0]])
assert rec_remove
assert not any([rec_update, rec_refresh, rec_add])
# An update that does not affect the view just sends update
v.set_filter(flowfilter.parse("~m put"))
clearrec()
v.update([v[0]])
assert rec_update
assert not any([rec_remove, rec_refresh, rec_add])
# An update for a flow in state but not view does not do anything
f = v[0]
v.set_filter(flowfilter.parse("~m get"))
assert not len(v)
clearrec()
v.update([f])
assert not any([rec_add, rec_update, rec_remove, rec_refresh])
def test_focus_follow():
v = view.View()
with taddons.context(v) as tctx:
console_addon = consoleaddons.ConsoleAddon(tctx.master)
tctx.configure(console_addon)
tctx.configure(v, console_focus_follow=True, view_filter="~m get")
v.add([tft(start=5)])
assert v.focus.index == 0
v.add([tft(start=4)])
assert v.focus.index == 0
assert v.focus.flow.request.timestamp_start == 4
v.add([tft(start=7)])
assert v.focus.index == 2
assert v.focus.flow.request.timestamp_start == 7
mod = tft(method="put", start=6)
v.add([mod])
assert v.focus.index == 2
assert v.focus.flow.request.timestamp_start == 7
mod.request.method = "GET"
v.update([mod])
assert v.focus.index == 2
assert v.focus.flow.request.timestamp_start == 6
def test_focus():
# Special case - initialising with a view that already contains data
v = view.View()
v.add([tft()])
f = view.Focus(v)
assert f.index == 0
assert f.flow is v[0]
# Start empty
v = view.View()
f = view.Focus(v)
assert f.index is None
assert f.flow is None
v.add([tft(start=1)])
assert f.index == 0
assert f.flow is v[0]
# Try to set to something not in view
with pytest.raises(ValueError):
f.__setattr__("flow", tft())
with pytest.raises(ValueError):
f.__setattr__("index", 99)
v.add([tft(start=0)])
assert f.index == 1
assert f.flow is v[1]
v.add([tft(start=2)])
assert f.index == 1
assert f.flow is v[1]
f.index = 0
assert f.index == 0
f.index = 1
v.remove([v[1]])
v[1].intercept()
assert f.index == 1
assert f.flow is v[1]
v.remove([v[1]])
assert f.index == 0
assert f.flow is v[0]
v.remove([v[0]])
assert f.index is None
assert f.flow is None
v.add([
tft(method="get", start=0),
tft(method="get", start=1),
tft(method="put", start=2),
tft(method="get", start=3),
])
f.flow = v[2]
assert f.flow.request.method == "PUT"
filt = flowfilter.parse("~m get")
v.set_filter(filt)
assert f.index == 2
filt = flowfilter.parse("~m oink")
v.set_filter(filt)
assert f.index is None
def test_settings():
v = view.View()
f = tft()
with pytest.raises(KeyError):
v.settings[f]
v.add([f])
v.settings[f]["foo"] = "bar"
assert v.settings[f]["foo"] == "bar"
assert len(list(v.settings)) == 1
v.remove([f])
with pytest.raises(KeyError):
v.settings[f]
assert not v.settings.keys()
v.add([f])
v.settings[f]["foo"] = "bar"
assert v.settings.keys()
v.clear()
assert not v.settings.keys()
def test_properties():
v = view.View()
f = tft()
v.requestheaders(f)
assert v.get_length() == 1
assert not v.get_marked()
v.toggle_marked()
assert v.get_length() == 0
assert v.get_marked()
def test_configure():
v = view.View()
with taddons.context(v) as tctx:
tctx.configure(v, view_filter="~q")
with pytest.raises(Exception, match="Invalid filter expression"):
tctx.configure(v, view_filter="~~")
tctx.configure(v, view_order="method")
with pytest.raises(Exception, match="Unknown flow order"):
tctx.configure(v, view_order="no")
tctx.configure(v, view_order_reversed=True)
tctx.configure(v, console_focus_follow=True)
assert v.focus_follow
@pytest.mark.parametrize("marker, expected", [
[":default:", SYMBOL_MARK],
["X", "X"],
[":grapes:", "\N{grapes}"],
[":not valid:", SYMBOL_MARK], [":weird", SYMBOL_MARK]
])
def test_marker(marker, expected):
assert render_marker(marker) == expected
|
|
from whoosh.fields import *
from whoosh.index import create_in, open_dir
from whoosh.qparser import MultifieldParser
from whoosh.query import *
import abc
import copy
import csv
import json
import os.path
import sys
import utils
class GenericSearchEngine(object):
"""
An abstract class for any search engine, whether that's an external API
you've already built or a Whoosh-based search engine you can make from
scratch via searchbetter.
This class encapsulates some useful functionality like query rewriting
that can benefit any search engine, even one not made using SearchBetter
tools.
Extending this class is easy - you just need to provide a search function
and a few other details, and we'll build in functionality from there.
"""
# make it an abstract class
__metaclass__ = abc.ABCMeta
def __init__(self):
# no rewriter yet
# TODO let someone pass this in the constructor
self.rewriter = None
def set_rewriter(self, rewriter):
"""
Sets a new query rewriter (from this_package.rewriter) as the default
rewriter for this search engine.
"""
self.rewriter = rewriter
def search(self, term):
"""
Runs a plain-English search and returns results.
:param term {String}: a query like you'd type into Google.
:return: a list of dicts, each of which encodes a search result.
"""
if self.rewriter is None:
# if there's no query rewriter in place, just search for the
# original term
return self.single_search(term)
else:
# there's a rewriter! use it
rewritten_queries = self.rewriter.rewrite(term)
results = [self.single_search(q) for q in rewritten_queries]
# results are multi-level... flatten it
flattened_results = utils.flatten(results)
return self.process_raw_results(flattened_results)
def process_raw_results(self, raw_results):
"""
After rewriting, we'll pass the full list of results in here
for you to clean up. This could include sorting, removing duplicates,
etc. (What you can do, and how you do it, really depends on what kind
of objects your search engine returns.)
"""
# default operation is a no-op
return raw_results
###
###
### functions you need to specify
###
###
def single_search(self, term):
"""
Runs the search engine on a single term (no rewriting or anything),
returning a list of objects.
Subclasses must implement!
:param str term: a word or phrase to search for
:return: a list of objects that were found. Can be anything: dicts,
strings, custom objects, whatever.
:rtype: list(object)
"""
raise NotImplementedError("Subclasses must implement!")
class WhooshSearchEngine(GenericSearchEngine):
"""
An abstract class for custom, Whoosh-based search engines.
A batteries-included search engine that can operate on any
given dataset. Uses the Whoosh library to index and run searches
on the dataset. Has built-in support for query rewriting.
"""
# make it an abstract class
__metaclass__ = abc.ABCMeta
# TODO consider making more hierarchy. This is the WhooshSearchEngine,
# which has the cool indexing capabilities. But more generally, you
# could have a search engine that only has to support search().
# but at that point it's just a useless interface, mostly.
# anyway, such a search engine would let the query rewriting search engine
# inherit from search engine too.
def __init__(self, create, search_fields, index_path):
"""
Creates a new search engine.
:param create {bool}: If True, recreates an index from scratch.
If False, loads the existing index
:param search_fields {str[]}: An array names of fields in the index that our
search engine will search against.
:param index_path {str}: A relative path to a folder where the whoosh
index should be stored.
"""
super(WhooshSearchEngine, self).__init__()
# TODO have an auto-detect feature that will determine if the
# index exists, and depending on that creates or loads the index
# TODO have the `create` option become `force_create`; normally
# it'll intelligently auto-generate, but if you force it it'll
# do what you say
self.index_path = index_path
# both these functions return an index
if create:
self.index = self.create_index()
else:
self.index = self.load_index()
# set up searching
# first, query parser
self.parser = MultifieldParser(search_fields, self.index.schema)
def load_index(self):
"""
Used when the index is already created. This just loads it and
returns it for you.
"""
index = open_dir(self.index_path)
return index
def create_index(self):
"""
Creates and returns a brand-new index. This will call
get_empty_index() behind the scenes.
Subclasses must implement!
"""
raise NotImplementedError("Subclasses must implement!")
def get_empty_index(self, path, schema):
"""
Makes an empty index file, making the directory where it needs
to be stored if necessary. Returns the index.
This is called within create_index().
TODO this breakdown is still confusing
"""
if not os.path.exists(path):
os.mkdir(path)
index = create_in(path, schema)
return index
def get_num_documents(self):
"""
Returns the number of documents in this search engine's corpus. That is,
this is the size of the search engine.
"""
query = Every()
with self.index.searcher() as searcher:
result = searcher.search(query)
return len(result)
return None
def __len__(self):
return self.get_num_documents()
def single_search(self, term):
"""
Helper function for search() that just returns search results for a
single, non-rewritten search term.
Returns a list of results, each of which is a Result object.
The makeup of the results objects varies
from search engine to search engine.
OVERRIDDEN from GenericSearchEngine.
"""
outer_results = []
with self.index.searcher() as searcher:
query_obj = self.parser.parse(term)
# this variable is closed when the searcher is closed, so save this data
# in a variable outside the with-block
results = list(searcher.search(query_obj, limit=None))
# this list of Hits, each of which has `fields()`` which is a dict version
# of the item we got (contains title, description, or other fields)
# `score` tells you how relevant the hit is (higher = better)
cleaned_results = [WhooshResult(hit.fields(), hit.score) for hit in results]
# make sure we store it outside the with-block b/c scope
outer_results = cleaned_results
return outer_results
def process_raw_results(self, raw_results):
# our search engine returns WhooshResult objects, so we can unique/sort
# them
# only give the unique ones
# this works now that we use a Result object, which is hashable!
unique_results = list(set(raw_results))
# now let's sort all the results by their relevance score (descending
# b/c higher is better)
# so the best stuff bubbles to the top
unique_results.sort(key=lambda result: result.score, reverse=True)
return unique_results
class UdacitySearchEngine(WhooshSearchEngine):
"""
Udacity
"""
# DATASET_PATH = secure.DATASET_PATH_BASE+'udacity-api.json'
# INDEX_PATH = secure.INDEX_PATH_BASE+'udacity'
"""Which dataset fields we should search over."""
SEARCH_FIELDS = ["title", "subtitle", "expected_learning",
"syllabus", "summary", "short_summary"]
def __init__(self, dataset_path, index_path, create=False):
"""
Creates a new Udacity search engine.
:param dataset_path {string}: the path to the Udacity API JSON file.
:param index_path {string}: the path to a folder where you'd like to
store the search engine index. The given folder doesn't have to exist,
but its *parent* folder does.
:param create {bool}: If True, recreates an index from scratch.
If False, loads the existing index
"""
self.dataset_path = dataset_path
super(UdacitySearchEngine, self).__init__(
create, self.SEARCH_FIELDS, index_path)
def create_index(self):
"""
Creates a new index to search the Udacity dataset. You only need to
call this once; once the index is created, you can just load it again
instead of creating it afresh all the time.
"""
# load data
udacity_data = None
with open(self.dataset_path, 'r') as file:
udacity_data = json.load(file)
# set up whoosh
# schema
# TODO: use StemmingAnalyzer here so we get the built-in benefits
# of stemming in our search engine
# http://whoosh.readthedocs.io/en/latest/stemming.html
schema = Schema(
slug=ID(stored=True),
title=TEXT(stored=True),
subtitle=TEXT,
expected_learning=TEXT,
syllabus=TEXT,
summary=TEXT,
short_summary=TEXT
)
# make an index to store this stuff in
index = self.get_empty_index(self.index_path, schema)
# start adding documents (i.e. the courses) to the index
try:
writer = index.writer()
for course in udacity_data['courses']:
writer.add_document(
slug=course['slug'],
title=course['title'],
subtitle=course['subtitle'],
expected_learning=course['expected_learning'],
syllabus=course['syllabus'],
summary=course['summary'],
short_summary=course['short_summary'])
writer.commit()
except Exception as e:
print e
# all done for now
return index
def count_words(self):
"""
Returns the number of words in the underlying Udacity dataset.
"""
# will be useful for extracting textual content from a course later
def extract_text_from_course(c):
return [c[field] for field in self.SEARCH_FIELDS]
# load data
with open(self.dataset_path, 'r') as file:
udacity_data = json.load(file)
# extract just the text fields, no other markup or fields
courses = udacity_data['courses']
paragraphs = [extract_text_from_course(c) for c in courses]
# these are nested... flatten into one huge string array
raw_lines = utils.flatten(paragraphs)
# then flatten into one huge string
mega_string = (" ").join(raw_lines)
return utils.unique_words_in_string(mega_string)
class HarvardXSearchEngine(WhooshSearchEngine):
"""
HX
"""
# INDEX_PATH = secure.INDEX_PATH_BASE+'harvardx'
SEARCH_FIELDS = ["display_name", "contents"]
def __init__(self, dataset_path, index_path, create=False):
"""
Creates a new HarvardX search engine. Searches over the HarvardX/DART
database of all courses and course materials used in HarvardX. This includes
videos, quizzes, etc.
TODO: consider renaming to DART, probz
:param dataset_path {string}: the path to the HarvardX course catalog CSV file.
:param index_path {string}: the path to a folder where you'd like to
store the search engine index. The given folder doesn't have to exist,
but its *parent* folder does.
:param create {bool}: If True, recreates an index from scratch.
If False, loads the existing index
"""
super(HarvardXSearchEngine, self).__init__(
create, self.SEARCH_FIELDS, index_path)
self.dataset_path = dataset_path
def create_index(self):
"""
Creates a new index to search the dataset. You only need to
call this once; once the index is created, you can just load it again
instead of creating it afresh all the time.
Returns the index object.
"""
# load data
# real data
# csvfile_path = secure.DATASET_PATH_BASE+'corpus_HarvardX_LatestCourses_based_on_2016-10-18.csv'
# test data
# csvfile_path = 'datasets/test.csv'
# only consider resources with this category (type of content)
# unsure about courses (b/c they have no content) and html (b/c they often include messy CSS/JS in there)
# TODO: add "html" support. requires stripping comments
# http://stackoverflow.com/questions/753052/strip-html-from-strings-in-python
#
supported_categories = ('problem', 'video', 'course')
# set up whoosh schema
schema = Schema(
course_id=ID(stored=True),
display_name=TEXT(stored=True),
contents=TEXT
)
# TODO: use StemmingAnalyzer here so we get the built-in benefits
# of stemming in our search engine
# http://whoosh.readthedocs.io/en/latest/stemming.html
# make an index to store this stuff in
index = self.get_empty_index(self.index_path, schema)
# start adding documents (i.e. the courses) to the index
# first, some of the fields are HUGE so we need to let the csv
# reader handle them
csv.field_size_limit(sys.maxsize)
with open(self.dataset_path, 'r') as csvfile:
reader = csv.DictReader(csvfile)
writer = index.writer()
try:
for row in reader:
# ensure the content is actually a valid type
if row['category'] not in supported_categories:
pass
# write
writer.add_document(
course_id=row['course_id'].decode('utf8'),
display_name=row['display_name'].decode('utf8'),
contents=row['contents'].decode('utf8'))
writer.commit()
except Exception as e:
print e
writer.cancel()
# all done for now
return index
class EdXSearchEngine(WhooshSearchEngine):
"""
edX
"""
# INDEX_PATH = secure.INDEX_PATH_BASE+'edx'
SEARCH_FIELDS = ["name"]
def __init__(self, dataset_path, index_path, create=False):
"""
Creates a new search engine that searches over edX courses.
:param dataset_path {string}: the path to the edX course listings file.
:param index_path {string}: the path to a folder where you'd like to
store the search engine index. The given folder doesn't have to exist,
but its *parent* folder does.
:param create {bool}: If True, recreates an index from scratch.
If False, loads the existing index
"""
super(EdXSearchEngine, self).__init__(
create, self.SEARCH_FIELDS, index_path)
self.dataset_path = dataset_path
def create_index(self):
"""
Creates a new index to search the dataset. You only need to
call this once; once the index is created, you can just load it again
instead of creating it afresh all the time.
Returns the index object.
"""
# load data
# csvfile_path = secure.DATASET_PATH_BASE+'Master CourseListings - edX.csv'
# set up whoosh schema
schema = Schema(
course_id=ID(stored=True),
name=TEXT(stored=True)
)
# TODO: use StemmingAnalyzer here so we get the built-in benefits
# of stemming in our search engine
# http://whoosh.readthedocs.io/en/latest/stemming.html
# make an index to store this stuff in
index = self.get_empty_index(self.index_path, schema)
# start adding documents (i.e. the courses) to the index
with open(self.dataset_path, 'r') as csvfile:
reader = csv.DictReader(csvfile)
writer = index.writer()
try:
for row in reader:
# write
writer.add_document(
course_id=row['course_id'].decode('utf8'),
name=row['name'].decode('utf8'))
writer.commit()
except Exception as e:
print e
writer.cancel()
# all done for now
return index
def count_words(self):
"""
Returns the number of words in the underlying Udacity dataset.
"""
with open(self.dataset_path, 'r') as csvfile:
reader = csv.DictReader(csvfile)
# the only text field that's useful is the name field
names = [row['name'].decode('utf8') for row in reader]
# turn into one huge string then count words in that
mega_string = (" ").join(names)
return utils.unique_words_in_string(mega_string)
class PrebuiltSearchEngine(WhooshSearchEngine):
"""
A search engine designed for when you're just given a model file and can
use that directly without having to build anything.
"""
def __init__(self, search_fields, index_path):
super(PrebuiltSearchEngine, self).__init__(
create=False, search_fields=search_fields, index_path=index_path)
def create_index(self):
# no need to create!!
# TODO raise an error
raise NotImplementedError(
"This search engine doesn't need to create an index! Use create = False.")
pass
class WhooshResult(object):
"""
Encodes a search result from a Whoosh-based search engine.
Basically a wrapper around a result dict and its relevance score
(higher is better).
"""
def __init__(self, dict_data, score):
self.dict_data = dict_data
self.score = score
def get_dict(self):
"""
Get the underlying dict data
"""
return self.dict_data
def __repr__(self):
"""
Stringified version of the result, which encodes the dict and the score
"""
return str((self.dict_data, self.score))
# enable lookup as if this was a real dict
def __getitem__(self, key):
return self.dict_data[key]
# to enable hashing
def __hash__(self):
return hash(frozenset(self.dict_data.items()))
def __eq__(self, other):
return frozenset(self.dict_data.items()) == frozenset(other.dict_data.items())
|
|
import os
import json
import uuid
import logging
import requests
import predix.config
import predix.service
import predix.security.uaa
class Asset(object):
"""
Client library for working with the Predix Asset Service. For more details
on use of the service please see official docs:
https://www.predix.io/services/service.html?id=1171
"""
def __init__(self, uri=None, zone_id=None, *args, **kwargs):
super(Asset, self).__init__(*args, **kwargs)
self.uri = uri or self._get_uri()
self.zone_id = zone_id or self._get_zone_id()
self.service = predix.service.Service(self.zone_id)
def _get_uri(self):
"""
Returns the URI endpoint for an instance of the Asset
service from environment inspection.
"""
if 'VCAP_SERVICES' in os.environ:
services = json.loads(os.getenv('VCAP_SERVICES'))
predix_asset = services['predix-asset'][0]['credentials']
return predix_asset['uri']
else:
return predix.config.get_env_value(self, 'uri')
def _get_zone_id(self):
"""
Returns the Predix Zone Id for the service that is a required
header in service calls.
"""
if 'VCAP_SERVICES' in os.environ:
services = json.loads(os.getenv('VCAP_SERVICES'))
predix_asset = services['predix-asset'][0]['credentials']
return predix_asset['zone']['http-header-value']
else:
return predix.config.get_env_value(self, 'zone_id')
def authenticate_as_client(self, client_id, client_secret):
"""
Will authenticate for the given client / secret.
"""
self.service.uaa.authenticate(client_id, client_secret)
def _get_collections(self):
"""
Returns the names of all user-defined domain object collections with
counts for number of domain objects contained in that collection.
..
[ { "collection": "volcano", "count": 1 }, ... ]
"""
uri = self.uri
return self.service._get(uri)
def get_collections(self):
"""
Returns a flat list of the names of collections in the asset
service.
..
['wind-turbines', 'jet-engines']
"""
collections = []
for result in self._get_collections():
collections.append(result['collection'])
return collections
def get_collection(self, collection, filter=None, fields=None,
page_size=None):
"""
Returns a specific collection from the asset service with
the given collection endpoint.
Supports passing through parameters such as...
- filters such as "name=Vesuvius" following GEL spec
- fields such as "uri,description" comma delimited
- page_size such as "100" (the default)
"""
params = {}
if filter:
params['filter'] = filter
if fields:
params['fields'] = fields
if page_size:
params['pageSize'] = page_size
uri = self.uri + '/v1' + collection
return self.service._get(uri, params=params)
def create_guid(self, collection=None):
"""
Returns a new guid for use in posting a new asset to a collection.
"""
guid = str(uuid.uuid4())
if collection:
return str.join('/', [collection, guid])
else:
return guid
def post_collection(self, collection, body):
"""
Creates a new collection. This is mostly just transport layer
and passes collection and body along. It presumes the body
already has generated.
The collection is *not* expected to have the id.
"""
assert isinstance(body, (list)), "POST requires body to be a list"
assert collection.startswith('/'), "Collections must start with /"
uri = self.uri + '/v1' + collection
return self.service._post(uri, body)
def put_collection(self, collection, body):
"""
Updates an existing collection.
The collection being updated *is* expected to include the id.
"""
uri = self.uri + '/v1' + collection
return self.service._put(uri, body)
def delete_collection(self, collection):
"""
Deletes an existing collection.
The collection being updated *is* expected to include the id.
"""
uri = str.join('/', [self.uri, collection])
return self.service._delete(uri)
def patch_collection(self, collection, changes):
"""
Will make specific updates to a record based on JSON Patch
documentation.
https://tools.ietf.org/html/rfc6902
the format of changes is something like::
[{
'op': 'add',
'path': '/newfield',
'value': 'just added'
}]
"""
uri = str.join('/', [self.uri, collection])
return self.service._patch(uri, changes)
def get_audit(self):
"""
Return audit report for asset. Disabled by default.
"""
return self.service._get(self.uri + '/v1/system/audit')
def get_audit_changes(self):
"""
Return change log for audit. Disabled by default.
"""
return self.service._get(self.uri + '/v1/system/audit/changes')
def get_audit_snapshots(self):
"""
Return an audit snapshot. Disabled by default.
"""
return self.service._get(self.uri + '/v1/system/audit/snapshots')
def get_scripts(self):
"""
Return any configured scripts for asset service.
"""
return self.service._get(self.uri + '/v1/system/scripts')
def get_messages(self):
"""
Return any system messages related to asset systems.
"""
return self.service._get(self.uri + '/v1/system/messages')
def get_configs(self):
"""
Return the configuration for the asset service.
"""
return self.service._get(self.uri + '/v1/system/configs')
def get_triggers(self):
"""
Return configured triggers in the asset system.
"""
return self.service._get(self.uri + '/v1/system/triggers')
def save(self, collection):
"""
Save an asset collection to the service.
"""
assert isinstance(collection, predix.data.asset.AssetCollection), "Expected AssetCollection"
collection.validate()
self.put_collection(collection.uri, collection.__dict__) # MAINT: no
class AssetCollection(object):
"""
User Defined Domain Objects are the customizable collections to represent
data in the Asset Service.
This is experimental to provide a base class for a sort of ORM between
domain objects to marshall and unmarshall between Python and the REST
endpoints.
"""
def __init__(self, parent=None, guid=None, *args, **kwargs):
super(AssetCollection, self).__init__(*args, **kwargs)
# You have the right to a guid, if you cannot afford a guid...
if not guid:
guid = str(uuid.uuid4())
# By naming collection after classname we get safe URI
# naming rules as well.
collection = self.get_collection()
# There is a no more than 2 forward slash limitation for uri, so
# collections cannot really be nested deeper than one level.
self.uri = '/' + str.join('/', [collection, guid])
def __repr__(self):
return json.dumps(self.__dict__)
def __str__(self):
return json.dumps(self.__dict__)
def get_collection(self):
return type(self).__name__.lower()
def validate(self):
"""
If an asset collection wants any client-side validation the
object can override this method and it is called anytime
we're saving.
"""
return
|
|
"""
pyscgi.py - Portable SCGI implementation
This module has been written as part of the Cherokee project:
http://www.cherokee-project.com/
"""
# Copyright (c) 2006-2010, Alvaro Lopez Ortega <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * The name "Alvaro Lopez Ortega" may not be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import SocketServer
import traceback
import socket
import errno
import time
import sys
import os
__version__ = '1.14'
__author__ = 'Alvaro Lopez Ortega'
__copyright__ = 'Copyright 2010, Alvaro Lopez Ortega'
__license__ = 'BSD'
class SCGIHandler (SocketServer.StreamRequestHandler):
def __init__ (self, request, client_address, server):
self.env = {}
self.post = None
SocketServer.StreamRequestHandler.__init__ (self, request, client_address, server)
def __safe_read (self, length):
info = ''
while True:
if len(info) >= length:
return info
chunk = None
try:
to_read = length - len(info)
chunk = os.read (self.rfile.fileno(), to_read)
if not len(chunk):
return info
info += chunk
except OSError, e:
if e.errno in (errno.EAGAIN, errno.EWOULDBLOCK, errno.EINPROGRESS):
if chunk:
info += chunk
continue
time.sleep(0.001)
continue
raise
def send(self, buf):
pending = len(buf)
offset = 0
while True:
if not pending:
return
try:
sent = os.write (self.wfile.fileno(), buf[offset:])
pending -= sent
offset += sent
except OSError, e:
if e.errno in (errno.EAGAIN, errno.EWOULDBLOCK, errno.EINPROGRESS):
time.sleep(0.001)
continue
raise
def __read_netstring_size (self):
size = ""
while True:
c = self.__safe_read(1)
if c == ':':
break
elif not c:
raise IOError, 'Malformed netstring'
size += c
return long(size)
def __read_netstring (self):
data = ""
size = self.__read_netstring_size()
while size > 0:
s = self.__safe_read(size)
if not s:
raise IOError, 'Malformed netstring'
data += s
size -= len(s)
if self.__safe_read(1) != ',':
raise IOError, 'Missing netstring terminator'
return data
def __read_env (self):
headers = self.__read_netstring()
items = headers.split('\0')[:-1]
itemsn = len(items)
if itemsn % 2 != 0:
raise Exception, 'Malformed headers'
for i in range(0, itemsn, 2):
self.env[items[i]] = items[i+1]
def handle_post (self):
if self.post:
return
if not self.env.has_key('CONTENT_LENGTH'):
return
length = int(self.env['CONTENT_LENGTH'])
self.post = self.__safe_read(length)
def handle (self):
self.__read_env()
try:
self.handle_request()
except:
if sys.exc_type != SystemExit:
traceback.print_exc() # Print the error
try: # Closes wfile and rfile
self.finish()
except: pass
try: # Send a FIN signal
self.request.shutdown (socket.SHUT_WR)
except: pass
try: # Either: close or reset
self.request.close()
except: pass
def handle_request (self):
self.send('Status: 200 OK\r\n')
self.send("Content-Type: text/plain\r\n\r\n")
self.send("handle_request() should be overridden")
class ThreadingMixIn_Custom (SocketServer.ThreadingMixIn):
def set_synchronous (self, sync):
assert type(sync) == bool
self.syncronous = sync
def process_request (self, request, client_address):
if hasattr(self, 'syncronous') and self.syncronous:
return self.process_request_thread (request, client_address)
return SocketServer.ThreadingMixIn.process_request (self, request, client_address)
class ThreadingUnixStreamServer_Custom (ThreadingMixIn_Custom, SocketServer.UnixStreamServer): pass
class ThreadingTCPServer_Custom (ThreadingMixIn_Custom, SocketServer.TCPServer): pass
# TCP port
#
class SCGIServer (ThreadingTCPServer_Custom):
def __init__(self, handler_class=SCGIHandler, host="", port=4000):
self.allow_reuse_address = True
ThreadingTCPServer_Custom.__init__ (self, (host, port), handler_class)
class SCGIServerFork (SocketServer.ForkingTCPServer):
def __init__(self, handler_class=SCGIHandler, host="", port=4000):
self.allow_reuse_address = True
SocketServer.ForkingTCPServer.__init__ (self, (host, port), handler_class)
# Unix socket
#
class SCGIUnixServer (ThreadingUnixStreamServer_Custom):
def __init__(self, unix_socket, handler_class=SCGIHandler):
self.allow_reuse_address = True
ThreadingUnixStreamServer_Custom.__init__ (self, unix_socket, handler_class)
class SCGIUnixServerFork (SocketServer.UnixStreamServer):
def __init__(self, unix_socket, handler_class=SCGIHandler):
self.allow_reuse_address = True
SocketServer.UnixStreamServer.__init__ (self, unix_socket, handler_class)
def ServerFactory (threading=False, *args, **kargs):
unix_socket = kargs.get('unix_socket', None)
if threading:
if unix_socket:
return SCGIUnixServer (*args, **kargs)
else:
return SCGIServer(*args, **kargs)
else:
if unix_socket:
return SCGIUnixServerFork(*args, **kargs)
else:
return SCGIServerFork(*args, **kargs)
|
|
"""Conferences API Version 1.0.
This API client was generated using a template. Make sure this code is valid before using it.
"""
import logging
from datetime import date, datetime
from .base import BaseCanvasAPI
from .base import BaseModel
class ConferencesAPI(BaseCanvasAPI):
"""Conferences API Version 1.0."""
def __init__(self, *args, **kwargs):
"""Init method for ConferencesAPI."""
super(ConferencesAPI, self).__init__(*args, **kwargs)
self.logger = logging.getLogger("py3canvas.ConferencesAPI")
def list_conferences_courses(self, course_id):
"""
List conferences.
Retrieve the paginated list of conferences for this context
This API returns a JSON object containing the list of conferences,
the key for the list of conferences is "conferences"
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""
ID
"""
path["course_id"] = course_id
self.logger.debug(
"GET /api/v1/courses/{course_id}/conferences with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"GET",
"/api/v1/courses/{course_id}/conferences".format(**path),
data=data,
params=params,
all_pages=True,
)
def list_conferences_groups(self, group_id):
"""
List conferences.
Retrieve the paginated list of conferences for this context
This API returns a JSON object containing the list of conferences,
the key for the list of conferences is "conferences"
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - group_id
"""
ID
"""
path["group_id"] = group_id
self.logger.debug(
"GET /api/v1/groups/{group_id}/conferences with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"GET",
"/api/v1/groups/{group_id}/conferences".format(**path),
data=data,
params=params,
all_pages=True,
)
def list_conferences_for_current_user(self, state=None):
"""
List conferences for the current user.
Retrieve the paginated list of conferences for all courses and groups
the current user belongs to
This API returns a JSON object containing the list of conferences.
The key for the list of conferences is "conferences".
"""
path = {}
data = {}
params = {}
# OPTIONAL - state
"""
If set to "live", returns only conferences that are live (i.e., have
started and not finished yet). If omitted, returns all conferences for
this user's groups and courses.
"""
if state is not None:
params["state"] = state
self.logger.debug(
"GET /api/v1/conferences with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"GET",
"/api/v1/conferences".format(**path),
data=data,
params=params,
all_pages=True,
)
class Conferencerecording(BaseModel):
"""Conferencerecording Model."""
def __init__(
self,
duration_minutes=None,
title=None,
updated_at=None,
created_at=None,
playback_url=None,
):
"""Init method for Conferencerecording class."""
self._duration_minutes = duration_minutes
self._title = title
self._updated_at = updated_at
self._created_at = created_at
self._playback_url = playback_url
self.logger = logging.getLogger("py3canvas.Conferencerecording")
@property
def duration_minutes(self):
"""duration_minutes."""
return self._duration_minutes
@duration_minutes.setter
def duration_minutes(self, value):
"""Setter for duration_minutes property."""
self.logger.warn(
"Setting values on duration_minutes will NOT update the remote Canvas instance."
)
self._duration_minutes = value
@property
def title(self):
"""title."""
return self._title
@title.setter
def title(self, value):
"""Setter for title property."""
self.logger.warn(
"Setting values on title will NOT update the remote Canvas instance."
)
self._title = value
@property
def updated_at(self):
"""updated_at."""
return self._updated_at
@updated_at.setter
def updated_at(self, value):
"""Setter for updated_at property."""
self.logger.warn(
"Setting values on updated_at will NOT update the remote Canvas instance."
)
self._updated_at = value
@property
def created_at(self):
"""created_at."""
return self._created_at
@created_at.setter
def created_at(self, value):
"""Setter for created_at property."""
self.logger.warn(
"Setting values on created_at will NOT update the remote Canvas instance."
)
self._created_at = value
@property
def playback_url(self):
"""playback_url."""
return self._playback_url
@playback_url.setter
def playback_url(self, value):
"""Setter for playback_url property."""
self.logger.warn(
"Setting values on playback_url will NOT update the remote Canvas instance."
)
self._playback_url = value
class Conference(BaseModel):
"""Conference Model."""
def __init__(
self,
id=None,
conference_type=None,
conference_key=None,
description=None,
duration=None,
ended_at=None,
started_at=None,
title=None,
users=None,
has_advanced_settings=None,
long_running=None,
user_settings=None,
recordings=None,
url=None,
join_url=None,
context_type=None,
context_id=None,
):
"""Init method for Conference class."""
self._id = id
self._conference_type = conference_type
self._conference_key = conference_key
self._description = description
self._duration = duration
self._ended_at = ended_at
self._started_at = started_at
self._title = title
self._users = users
self._has_advanced_settings = has_advanced_settings
self._long_running = long_running
self._user_settings = user_settings
self._recordings = recordings
self._url = url
self._join_url = join_url
self._context_type = context_type
self._context_id = context_id
self.logger = logging.getLogger("py3canvas.Conference")
@property
def id(self):
"""The id of the conference."""
return self._id
@id.setter
def id(self, value):
"""Setter for id property."""
self.logger.warn(
"Setting values on id will NOT update the remote Canvas instance."
)
self._id = value
@property
def conference_type(self):
"""The type of conference."""
return self._conference_type
@conference_type.setter
def conference_type(self, value):
"""Setter for conference_type property."""
self.logger.warn(
"Setting values on conference_type will NOT update the remote Canvas instance."
)
self._conference_type = value
@property
def conference_key(self):
"""The 3rd party's ID for the conference."""
return self._conference_key
@conference_key.setter
def conference_key(self, value):
"""Setter for conference_key property."""
self.logger.warn(
"Setting values on conference_key will NOT update the remote Canvas instance."
)
self._conference_key = value
@property
def description(self):
"""The description for the conference."""
return self._description
@description.setter
def description(self, value):
"""Setter for description property."""
self.logger.warn(
"Setting values on description will NOT update the remote Canvas instance."
)
self._description = value
@property
def duration(self):
"""The expected duration the conference is supposed to last."""
return self._duration
@duration.setter
def duration(self, value):
"""Setter for duration property."""
self.logger.warn(
"Setting values on duration will NOT update the remote Canvas instance."
)
self._duration = value
@property
def ended_at(self):
"""The date that the conference ended at, null if it hasn't ended."""
return self._ended_at
@ended_at.setter
def ended_at(self, value):
"""Setter for ended_at property."""
self.logger.warn(
"Setting values on ended_at will NOT update the remote Canvas instance."
)
self._ended_at = value
@property
def started_at(self):
"""The date the conference started at, null if it hasn't started."""
return self._started_at
@started_at.setter
def started_at(self, value):
"""Setter for started_at property."""
self.logger.warn(
"Setting values on started_at will NOT update the remote Canvas instance."
)
self._started_at = value
@property
def title(self):
"""The title of the conference."""
return self._title
@title.setter
def title(self, value):
"""Setter for title property."""
self.logger.warn(
"Setting values on title will NOT update the remote Canvas instance."
)
self._title = value
@property
def users(self):
"""Array of user ids that are participants in the conference."""
return self._users
@users.setter
def users(self, value):
"""Setter for users property."""
self.logger.warn(
"Setting values on users will NOT update the remote Canvas instance."
)
self._users = value
@property
def has_advanced_settings(self):
"""True if the conference type has advanced settings."""
return self._has_advanced_settings
@has_advanced_settings.setter
def has_advanced_settings(self, value):
"""Setter for has_advanced_settings property."""
self.logger.warn(
"Setting values on has_advanced_settings will NOT update the remote Canvas instance."
)
self._has_advanced_settings = value
@property
def long_running(self):
"""If true the conference is long running and has no expected end time."""
return self._long_running
@long_running.setter
def long_running(self, value):
"""Setter for long_running property."""
self.logger.warn(
"Setting values on long_running will NOT update the remote Canvas instance."
)
self._long_running = value
@property
def user_settings(self):
"""A collection of settings specific to the conference type."""
return self._user_settings
@user_settings.setter
def user_settings(self, value):
"""Setter for user_settings property."""
self.logger.warn(
"Setting values on user_settings will NOT update the remote Canvas instance."
)
self._user_settings = value
@property
def recordings(self):
"""A List of recordings for the conference."""
return self._recordings
@recordings.setter
def recordings(self, value):
"""Setter for recordings property."""
self.logger.warn(
"Setting values on recordings will NOT update the remote Canvas instance."
)
self._recordings = value
@property
def url(self):
"""URL for the conference, may be null if the conference type doesn't set it."""
return self._url
@url.setter
def url(self, value):
"""Setter for url property."""
self.logger.warn(
"Setting values on url will NOT update the remote Canvas instance."
)
self._url = value
@property
def join_url(self):
"""URL to join the conference, may be null if the conference type doesn't set it."""
return self._join_url
@join_url.setter
def join_url(self, value):
"""Setter for join_url property."""
self.logger.warn(
"Setting values on join_url will NOT update the remote Canvas instance."
)
self._join_url = value
@property
def context_type(self):
"""The type of this conference's context, typically 'Course' or 'Group'."""
return self._context_type
@context_type.setter
def context_type(self, value):
"""Setter for context_type property."""
self.logger.warn(
"Setting values on context_type will NOT update the remote Canvas instance."
)
self._context_type = value
@property
def context_id(self):
"""The ID of this conference's context."""
return self._context_id
@context_id.setter
def context_id(self, value):
"""Setter for context_id property."""
self.logger.warn(
"Setting values on context_id will NOT update the remote Canvas instance."
)
self._context_id = value
|
|
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import codecs
import glob
import json
import os
import shutil
import stat
import sys
import tempfile
import zipfile
from botocore.compat import six
from cement.utils.misc import minimal_logger
from six import StringIO
from yaml import load, dump, safe_dump
from yaml.parser import ParserError
from yaml.scanner import ScannerError
import pathspec
try:
import configparser
except ImportError:
import ConfigParser as configparser
from ..core import io
from ..objects.exceptions import NotInitializedError, InvalidSyntaxError, \
NotFoundError
LOG = minimal_logger(__name__)
def get_aws_home():
sep = os.path.sep
p = '~' + sep + '.aws' + sep
return os.path.expanduser(p)
def get_ssh_folder():
sep = os.path.sep
p = '~' + sep + '.ssh' + sep
p = os.path.expanduser(p)
if not os.path.exists(p):
os.makedirs(p)
return p
beanstalk_directory = '.elasticbeanstalk' + os.path.sep
global_config_file = beanstalk_directory + 'config.global.yml'
local_config_file = beanstalk_directory + 'config.yml'
aws_config_folder = get_aws_home()
aws_config_location = aws_config_folder + 'config'
aws_access_key = 'aws_access_key_id'
aws_secret_key = 'aws_secret_access_key'
region_key = 'region'
default_section = 'default'
ebcli_section = 'profile eb-cli'
app_version_folder = beanstalk_directory + 'app_versions'
logs_folder = beanstalk_directory + 'logs' + os.path.sep
_marker = object()
def _get_option(config, section, key, default):
try:
return config.get(section, key)
except (configparser.NoSectionError, configparser.NoOptionError):
return default
def is_git_directory_present():
return os.path.isdir('.git')
def clean_up():
# remove dir
cwd = os.getcwd()
try:
_traverse_to_project_root()
if os.path.isdir(beanstalk_directory):
shutil.rmtree(beanstalk_directory)
finally:
os.chdir(cwd)
def _set_not_none(config, section, option, value):
if value:
config.set(section, option, value)
def get_war_file_location():
cwd = os.getcwd()
try:
_traverse_to_project_root()
lst = glob.glob('build/libs/*.war')
try:
return os.path.join(os.getcwd(), lst[0])
except IndexError:
raise NotFoundError('Can not find .war artifact in build' +
os.path.sep + 'libs' + os.path.sep)
finally:
os.chdir(cwd)
def old_eb_config_present():
return os.path.isfile(beanstalk_directory + 'config')
def config_file_present():
return os.path.isfile(local_config_file)
def project_file_path(filename):
return os.path.join(get_project_root(), filename)
def project_file_exists(filename):
return file_exists(project_file_path(filename))
def get_values_from_old_eb():
old_config_file = beanstalk_directory + 'config'
config = configparser.ConfigParser()
config.read(old_config_file)
app_name = _get_option(config, 'global', 'ApplicationName', None)
cred_file = _get_option(config, 'global', 'AwsCredentialFile', None)
default_env = _get_option(config, 'global', 'EnvironmentName', None)
solution_stack_name = _get_option(config, 'global', 'SolutionStack', None)
region = _get_option(config, 'global', 'Region', None)
access_id, secret_key = read_old_credentials(cred_file)
return {'app_name': app_name,
'access_id': access_id,
'secret_key': secret_key,
'default_env': default_env,
'platform': solution_stack_name,
'region': region,
}
def read_old_credentials(file_location):
if file_location is None:
return None, None
config_str = '[default]\n' + open(file_location, 'r').read()
config_fp = StringIO(config_str)
config = configparser.ConfigParser()
config.readfp(config_fp)
access_id = _get_option(config, 'default', 'AWSAccessKeyId', None)
secret_key = _get_option(config, 'default', 'AWSSecretKey', None)
return access_id, secret_key
def save_to_aws_config(access_key, secret_key):
config = configparser.ConfigParser()
if not os.path.isdir(aws_config_folder):
os.makedirs(aws_config_folder)
config.read(aws_config_location)
if ebcli_section not in config.sections():
config.add_section(ebcli_section)
_set_not_none(config, ebcli_section, aws_access_key, access_key)
_set_not_none(config, ebcli_section, aws_secret_key, secret_key)
with open(aws_config_location, 'w') as f:
config.write(f)
set_user_only_permissions(aws_config_location)
def set_user_only_permissions(location):
"""
Sets permissions so that only a user can read/write (chmod 400).
Can be a folder or a file.
:param location: Full location of either a folder or a location
"""
if os.path.isdir(location):
for root, dirs, files in os.walk(location):
for d in dirs:
pass
_set_user_only_permissions_file(os.path.join(root, d), ex=True)
for f in files:
_set_user_only_permissions_file(os.path.join(root, f))
else:
_set_user_only_permissions_file(location)
def _set_user_only_permissions_file(location, ex=False):
"""
:param ex: Boolean: add executable permission
"""
permission = stat.S_IRUSR | stat.S_IWUSR
if ex:
permission |= stat.S_IXUSR
os.chmod(location, permission)
def set_all_unrestricted_permissions(location):
"""
Set permissions so that user, group, and others all have read,
write and execute permissions (chmod 777).
:param location: Full location of either a folder or a location
"""
os.chmod(location, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
def get_current_directory_name():
dirname, filename = os.path.split(os.getcwd())
if sys.version_info[0] < 3:
filename = filename.decode('utf8')
return filename
def get_application_name(default=_marker):
result = get_config_setting('global', 'application_name')
if result is not None:
return result
# get_config_setting should throw error if directory is not set up
LOG.debug('Directory found, but no config or app name exists')
if default is _marker:
raise NotInitializedError
return default
def touch_config_folder():
if not os.path.isdir(beanstalk_directory):
os.makedirs(beanstalk_directory)
def create_config_file(app_name, region, solution_stack):
"""
We want to make sure we do not override the file if it already exists,
but we do want to fill in all missing pieces
:param app_name: name of the application
:return: VOID: no return value
"""
LOG.debug('Creating config file at ' + os.getcwd())
if not os.path.isdir(beanstalk_directory):
os.makedirs(beanstalk_directory)
# add to global without writing over any settings if they exist
write_config_setting('global', 'application_name', app_name)
write_config_setting('global', 'default_region', region)
write_config_setting('global', 'default_platform', solution_stack)
def _traverse_to_project_root():
cwd = os.getcwd()
if not os.path.isdir(beanstalk_directory):
LOG.debug('beanstalk directory not found in ' + cwd +
' -Going up a level')
os.chdir(os.path.pardir) # Go up one directory
if cwd == os.getcwd(): # We can't move any further
LOG.debug('Still at the same directory ' + cwd)
raise NotInitializedError('EB is not yet initialized')
_traverse_to_project_root()
else:
LOG.debug('Project root found at: ' + cwd)
def get_project_root():
cwd = os.getcwd()
try:
_traverse_to_project_root()
return os.getcwd()
finally:
os.chdir(cwd)
def get_zip_location(file_name):
cwd = os.getcwd()
try:
_traverse_to_project_root()
if not os.path.isdir(app_version_folder):
# create it
os.makedirs(app_version_folder)
return os.path.abspath(app_version_folder) + os.path.sep + file_name
finally:
os.chdir(cwd)
def get_logs_location(folder_name):
cwd = os.getcwd()
try:
_traverse_to_project_root()
if not os.path.isdir(logs_folder):
# create it
os.makedirs(logs_folder)
return os.path.abspath(os.path.join(logs_folder, folder_name))
finally:
os.chdir(cwd)
def program_is_installed(program):
return False if os_which(program) is None else True
def os_which(program):
path = os.getenv('PATH')
for p in path.split(os.path.pathsep):
p = os.path.join(p, program)
if sys.platform.startswith('win'):
# Add .exe for windows
p += '.exe'
if os.path.exists(p) and os.access(p, os.X_OK):
return p
def delete_file(location):
if os.path.exists(location):
os.remove(location)
def delete_directory(location):
if os.path.isdir(location):
shutil.rmtree(location)
def delete_app_versions():
cwd = os.getcwd()
try:
_traverse_to_project_root()
delete_directory(app_version_folder)
finally:
os.chdir(cwd)
def zip_up_folder(directory, location, ignore_list=None):
cwd = os.getcwd()
try:
os.chdir(directory)
io.log_info('Zipping up folder at location: ' + str(os.getcwd()))
zipf = zipfile.ZipFile(location, 'w', zipfile.ZIP_DEFLATED)
_zipdir('./', zipf, ignore_list=ignore_list)
zipf.close()
LOG.debug('File size: ' + str(os.path.getsize(location)))
finally:
os.chdir(cwd)
def zip_up_project(location, ignore_list=None):
cwd = os.getcwd()
try:
_traverse_to_project_root()
zip_up_folder('./', location, ignore_list=ignore_list)
finally:
os.chdir(cwd)
def _zipdir(path, zipf, ignore_list=None):
if ignore_list is None:
ignore_list = ['.gitignore']
ignore_list = ['./' + i for i in ignore_list]
zipped_roots = []
for root, dirs, files in os.walk(path):
if '.elasticbeanstalk' in root:
io.log_info(' -skipping: {}'.format(root))
continue
for f in files:
cur_file = os.path.join(root, f)
if cur_file.endswith('~') or cur_file in ignore_list:
# Ignore editor backup files (like file.txt~)
# Ignore anything in the .ebignore file
io.log_info(' -skipping: {}'.format(cur_file))
else:
if root not in zipped_roots:
# Windows requires us to index the folders.
io.log_info(' +adding: {}/'.format(root))
zipf.write(root)
zipped_roots.append(root)
io.log_info(' +adding: {}'.format(cur_file))
zipf.write(cur_file)
def unzip_folder(file_location, directory):
if not os.path.isdir(directory):
os.makedirs(directory)
zip = zipfile.ZipFile(file_location, 'r')
for cur_file in zip.namelist():
if not cur_file.endswith('/'):
root, name = os.path.split(cur_file)
path = os.path.normpath(os.path.join(directory, root))
if not os.path.isdir(path):
os.makedirs(path)
open(os.path.join(path, name), 'wb').write(zip.read(cur_file))
def save_to_file(data, location, filename):
if not os.path.isdir(location):
os.makedirs(location)
file_location = os.path.join(location, filename)
data_file = open(file_location, 'wb')
data_file.write(data)
data_file.close()
return file_location
def delete_env_file(env_name):
cwd = os.getcwd()
file_name = beanstalk_directory + env_name
try:
_traverse_to_project_root()
for file_ext in ['.ebe.yml', '.env.yml']:
path = file_name + file_ext
delete_file(path)
finally:
os.chdir(cwd)
def get_editor():
editor = None
try:
editor = get_config_setting('global', 'editor')
except NotInitializedError:
pass
if not editor:
editor = os.getenv('EDITOR')
if not editor:
platform = sys.platform
windows = platform.startswith('win')
if windows:
editor = None
else:
editor = 'nano'
return editor
def save_env_file(env):
cwd = os.getcwd()
env_name = env['EnvironmentName']
# ..yml extension helps editors enable syntax highlighting
file_name = env_name + '.env.yml'
file_name = beanstalk_directory + file_name
try:
_traverse_to_project_root()
file_name = os.path.abspath(file_name)
with codecs.open(file_name, 'w', encoding='utf8') as f:
f.write(safe_dump(env, default_flow_style=False,
line_break=os.linesep))
except NotInitializedError:
_, file_name = tempfile.mkstemp('.env.yml')
with codecs.open(file_name, 'w', encoding='utf8') as f:
f.write(safe_dump(env, default_flow_style=False,
line_break=os.linesep))
finally:
os.chdir(cwd)
return file_name
def get_environment_from_path(path):
try:
if os.path.exists(path):
with codecs.open(path, 'r', encoding='utf8') as f:
env = load(f)
except (ScannerError, ParserError):
raise InvalidSyntaxError('The environment file contains '
'invalid syntax.')
return env
def get_environment_from_file(env_name):
cwd = os.getcwd()
file_name = beanstalk_directory + env_name
try:
_traverse_to_project_root()
file_ext = '.env.yml'
path = file_name + file_ext
if os.path.exists(path):
with codecs.open(path, 'r', encoding='utf8') as f:
env = load(f)
except (ScannerError, ParserError):
raise InvalidSyntaxError('The environment file contains '
'invalid syntax.')
finally:
os.chdir(cwd)
return env
def write_config_setting(section, key_name, value):
cwd = os.getcwd() # save working directory
try:
_traverse_to_project_root()
config = _get_yaml_dict(local_config_file)
if not config:
config = {}
config.setdefault(section, {})[key_name] = value
with codecs.open(local_config_file, 'w', encoding='utf8') as f:
f.write(safe_dump(config, default_flow_style=False,
line_break=os.linesep))
finally:
os.chdir(cwd) # go back to working directory
def get_config_setting(section, key_name, default=_marker):
# get setting from global if it exists
cwd = os.getcwd() # save working directory
try:
_traverse_to_project_root()
config_global = _get_yaml_dict(global_config_file)
config_local = _get_yaml_dict(local_config_file)
# Grab value, local gets priority
try:
value = config_global[section][key_name]
except KeyError:
value = None
try:
if config_local:
value = config_local[section][key_name]
except KeyError:
pass # Revert to global value
if value is None and default != _marker:
return default
except NotInitializedError:
if default == _marker:
raise
else:
return default
finally:
os.chdir(cwd) # move back to working directory
return value
def get_json_dict(fullpath):
"""
Read json file at fullpath and deserialize as dict.
:param fullpath: str: path to the json file
:return: dict
"""
return json.loads(read_from_text_file(fullpath))
def write_json_dict(json_data, fullpath):
def date_handler(obj):
return obj.isoformat() if hasattr(obj, 'isoformat') else obj
data = json.dumps(json_data, sort_keys=True, indent=4,
default=date_handler)
write_to_text_file(data, fullpath)
def _get_yaml_dict(filename):
try:
with codecs.open(filename, 'r', encoding='utf8') as f:
return load(f)
except IOError:
return {}
def file_exists(full_path):
return os.path.isfile(full_path)
def eb_file_exists(location):
cwd = os.getcwd()
try:
_traverse_to_project_root()
path = beanstalk_directory + location
return os.path.isfile(path)
finally:
os.chdir(cwd)
def directory_empty(location):
return not os.listdir(location)
def get_ebignore_list():
EB_IGNORE_FILE_NAME = '.ebignore'
location = get_project_file_full_location(EB_IGNORE_FILE_NAME)
if not os.path.isfile(location):
return None
'''
This library will parse the ignore file, compare it to the current files
and give us a list of files to ignore
'''
# Patch iter_tree to not throw recursion error on non recursive links
from pathspec import util
def iter_tree(root):
"""
Walks the specified root path for all files.
*root* (``str``) is the root directory to search for files.
Raises ``RecursionError`` if recursion is detected.
Returns an ``Iterable`` yielding each file path (``str``) relative to
*root*.
.. _`recursion`: http://docs.python.org/2/library/os.html#os.walk
"""
# Keep track of files encountered. Map real path to relative path.
memo = {}
root = os.path.abspath(root)
for parent, _dirs, files in os.walk(root, followlinks=True):
# Get parent path relative to root path.
parent = os.path.relpath(parent, root)
# Check for recursion.
real = os.path.realpath(parent)
if real in memo:
abspath = os.path.abspath(parent)
if real != abspath and real in abspath:
# if real is a parent of current parent
raise util.RecursionError(real_path=real, first_path=memo[real], second_path=parent)
else:
# not recursion, just a sideways link
continue
memo[real] = parent
# Yield files.
for path in files:
if parent != '.':
path = os.path.join(parent, path)
yield path
util.iter_tree = iter_tree
with open(location, 'r') as f:
spec = pathspec.PathSpec.from_lines('gitignore', f)
ignore_list = [f for f in spec.match_tree(get_project_root())]
ignore_list.append(EB_IGNORE_FILE_NAME)
return ignore_list
def make_eb_dir(location):
cwd = os.getcwd()
try:
_traverse_to_project_root()
path = beanstalk_directory + location
if not os.path.isdir(path):
os.makedirs(path)
finally:
os.chdir(cwd)
def write_to_eb_data_file(location, data):
cwd = os.getcwd()
try:
_traverse_to_project_root()
path = beanstalk_directory + location
write_to_data_file(path, data)
finally:
os.chdir(cwd)
def read_from_eb_data_file(location):
cwd = os.getcwd()
try:
_traverse_to_project_root()
path = beanstalk_directory + location
read_from_data_file(path)
finally:
os.chdir(cwd)
def write_to_data_file(location, data):
with codecs.open(location, 'wb', encoding=None) as f:
f.write(data)
def read_from_data_file(location):
with codecs.open(location, 'rb', encoding=None) as f:
return f.read()
def read_from_text_file(location):
with codecs.open(location, 'rt', encoding=None) as f:
return f.read()
def write_to_text_file(data, location):
with codecs.open(location, 'wt', encoding=None) as f:
f.write(data)
def append_to_text_file(location, data):
with codecs.open(location, 'at', encoding=None) as f:
f.write(data)
def readlines_from_text_file(location):
with codecs.open(location, 'rt', encoding=None) as f:
return f.readlines()
def get_project_file_full_location(location):
cwd = os.getcwd()
try:
_traverse_to_project_root()
full_path = os.path.abspath(location)
return full_path
finally:
os.chdir(cwd)
def get_eb_file_full_location(location):
return get_project_file_full_location(beanstalk_directory + location)
def get_home():
return os.path.expanduser('~')
def get_filename_without_extension(file_location):
filename = os.path.basename(file_location)
extension = 'fake'
while extension != '':
# Split multiple extensions
filename, extension = os.path.splitext(filename)
return filename
|
|
import os
import codecs
import logging
import json
from collections import namedtuple
from django.utils.datastructures import MultiValueDict as MultiDict
from django.conf import settings
from django.utils.http import urlencode
from django.core.urlresolvers import reverse
import dateutil.parser
from time import mktime, strptime
import datetime
import elasticsearch
from unipath import Path
#from sheer.utility import find_in_search_path
from .filters import filter_dsl_from_multidict
from .middleware import get_request
ALLOWED_SEARCH_PARAMS = ('doc_type',
'analyze_wildcard', 'analyzer', 'default_operator', 'df',
'explain', 'fields', 'indices_boost', 'lenient',
'allow_no_indices', 'expand_wildcards', 'ignore_unavailable',
'lowercase_expanded_terms', 'from_', 'preference', 'q', 'routing',
'scroll', 'search_type', 'size', 'sort', 'source', 'stats',
'suggest_field', 'suggest_mode', 'suggest_size', 'suggest_text', 'timeout',
'version')
FakeQuery = namedtuple('FakeQuery',['es','es_index'])
def mapping_for_type(typename, es, es_index):
return es.indices.get_mapping(index=es_index, doc_type=typename)
def field_or_source_value(fieldname, hit_dict):
if 'fields' in hit_dict and fieldname in hit_dict['fields']:
return hit_dict['fields'][fieldname]
if '_source' in hit_dict and fieldname in hit_dict['_source']:
return hit_dict['_source'][fieldname]
def datatype_for_fieldname_in_mapping(fieldname, hit_type, mapping_dict, es, es_index):
try:
return mapping_dict[es_index]["mappings"][hit_type]["properties"][fieldname]["type"]
except KeyError:
return None
def coerced_value(value, datatype):
if datatype == None or value == None:
return value
TYPE_MAP = {'string': unicode,
'date': dateutil.parser.parse,
'dict': dict,
'float': float,
'long': float,
'boolean': bool}
coercer = TYPE_MAP[datatype]
if type(value) == list:
if value and type(value[0]) == list:
return [[coercer(y) for y in v] for v in value]
else:
return [coercer(v) for v in value] or ""
else:
return coercer(value)
class QueryHit(object):
def __init__(self, hit_dict, es, es_index):
self.hit_dict = hit_dict
self.type = hit_dict['_type']
self.es = es
self.es_index = es_index
self.mapping = mapping_for_type(self.type, es=es, es_index=es_index)
def __str__(self):
return str(self.hit_dict.get('_source'))
def __repr__(self):
return self.__str__()
@property
def permalink(self):
import sheerlike
if self.type in sheerlike.PERMALINK_REGISTRY:
pattern_name = sheerlike.PERMALINK_REGISTRY[self.type]
return reverse(pattern_name,kwargs=dict(doc_id=self._id))
else:
raise NotImplementedError("Please use django's reverse url system,"
"or register a permalink for %s" % self.type)
def __getattr__(self, attrname):
value = field_or_source_value(attrname, self.hit_dict)
datatype = datatype_for_fieldname_in_mapping(
attrname, self.type, self.mapping, self.es, self.es_index)
return coerced_value(value, datatype)
def json_compatible(self):
hit_dict = self.hit_dict
fields = hit_dict.get('fields') or hit_dict.get('_source', {}).keys()
return dict((field, getattr(self, field)) for field in fields)
class QueryResults(object):
def __init__(self, query, result_dict, pagenum=1):
self.result_dict = result_dict
self.total = int(result_dict['hits']['total'])
self.query = query
# confusing: using the word 'query' to mean different things
# above, it's the Query object
# below, it's Elasticsearch query DSL
if 'query' in result_dict:
self.size = int(result_dict['query'].get('size', '10'))
self.from_ = int(result_dict['query'].get('from', 1))
self.pages = self.total / self.size + \
int(self.total % self.size > 0)
else:
self.size, self.from_, self.pages = 10, 1, 1
self.current_page = pagenum
def __iter__(self):
if 'hits' in self.result_dict and 'hits' in self.result_dict['hits']:
for hit in self.result_dict['hits']['hits']:
query_hit = QueryHit(hit, self.query.es, self.query.es_index)
yield query_hit
def aggregations(self, fieldname):
if "aggregations" in self.result_dict and \
fieldname in self.result_dict['aggregations']:
return self.result_dict['aggregations'][fieldname]['buckets']
def json_compatible(self):
response_data = {}
response_data['total'] = self.result_dict['hits']['total']
if self.size:
response_data['size'] = self.size
if self.from_:
response_data['from'] = self.from_
if self.pages:
response_data['pages'] = self.pages
response_data['results'] = [
hit.json_compatible() for hit in self.__iter__()]
return response_data
def url_for_page(self, pagenum):
request = get_request()
current_args = request.GET
args_dict = MultiDict(current_args)
if pagenum != 1:
args_dict['page'] = pagenum
elif 'page' in args_dict:
del args_dict['page']
encoded = urlencode(args_dict, doseq=True)
if encoded:
url = "".join([request.path, "?", urlencode(args_dict, doseq=True)])
return url
else:
return request.path
class Query(object):
def __init__(self, filename,es, es_index, json_safe=False):
# TODO: make the no filename case work
self.es_index = es_index
self.es = es
self.filename = filename
self.__results = None
self.json_safe = json_safe
def search(self, aggregations=None, use_url_arguments=True, size=10, **kwargs):
query_file = json.loads(file(self.filename).read())
query_dict = query_file['query']
'''
These dict constructors split the kwargs from the template into filter
arguments and arguments that can be placed directly into the query body.
The dict constructor syntax supports python 2.6, 2.7, and 3.x
If python 2.7, use dict comprehension and iteritems()
With python 3, use dict comprehension and items() (items() replaces
iteritems and is just as fast)
'''
filter_args = dict((key, value) for (key, value) in kwargs.items()
if key.startswith('filter_'))
non_filter_args = dict((key, value) for (key, value) in kwargs.items()
if not key.startswith('filter_'))
query_dict.update(non_filter_args)
pagenum = 1
request = get_request()
# Add in filters from the template.
new_multidict = MultiDict()
# First add the url arguments if requested
if use_url_arguments:
new_multidict = MultiDict(request.GET.copy())
# Next add the arguments from the search() function used in the
# template
for key, value in filter_args.items():
new_multidict.update({key: value})
filters = filter_dsl_from_multidict(new_multidict)
args_flat = request.GET.copy()
query_body = {}
if aggregations:
aggs_dsl = {}
if type(aggregations) is str:
aggregations = [aggregations] # so we can treat it as a list
for fieldname in aggregations:
aggs_dsl[fieldname] = {'terms':
{'field': fieldname, 'size': 10000}}
query_body['aggs'] = aggs_dsl
else:
if 'page' in args_flat:
args_flat['from_'] = int(
query_dict.get('size', '10')) * (int(args_flat['page']) - 1)
pagenum = int(args_flat['page'])
args_flat_filtered = dict(
[(k, v) for k, v in args_flat.items() if v])
query_dict.update(args_flat_filtered)
query_body['query'] = {'filtered': {'filter': {}}}
if filters:
query_body['query']['filtered']['filter'][
'and'] = [f for f in filters]
if 'filters' in query_file:
if 'and' not in query_body['query']['filtered']['filter']:
query_body['query']['filtered']['filter']['and'] = []
for json_filter in query_file['filters']:
query_body['query']['filtered'][
'filter']['and'].append(json_filter)
final_query_dict = dict((k, v)
for (k, v) in query_dict.items() if k in ALLOWED_SEARCH_PARAMS)
final_query_dict['index'] = self.es_index
final_query_dict['body'] = query_body
response = self.es.search(**final_query_dict)
response['query'] = query_dict
return QueryResults(self,response, pagenum)
def possible_values_for(self, field, **kwargs):
results = self.search(aggregations=[field], **kwargs)
return results.aggregations(field)
class QueryFinder(object):
def __init__(self):
self.es = elasticsearch.Elasticsearch(settings.SHEER_ELASTICSEARCH_SERVER)
self.es_index = settings.SHEER_ELASTICSEARCH_INDEX
self.searchpath = [Path(site).child('_queries') for site in settings.SHEER_SITES]
def __getattr__(self, name):
for dir in self.searchpath:
query_filename = name + ".json"
query_file_path = os.path.join(dir, query_filename)
if os.path.exists(query_file_path):
query = Query(query_file_path, self.es, self.es_index)
return query
class QueryJsonEncoder(json.JSONEncoder):
query_classes = [QueryResults, QueryHit]
def default(self, obj):
if type(obj) in (datetime.datetime, datetime.date):
return obj.isoformat()
if type(obj) in self.query_classes:
return obj.json_compatible()
return json.JSONEncoder.default(self, obj)
def more_like_this(hit, **kwargs):
es = elasticsearch.Elasticsearch(settings.SHEER_ELASTICSEARCH_SERVER)
es_index = settings.SHEER_ELASTICSEARCH_INDEX
doctype, docid = hit.type, hit._id
raw_results = es.mlt(
index=es_index, doc_type=doctype, id=docid, **kwargs)
# this is bad and I should feel bad
# (I do)
fake_query = FakeQuery(es,es_index)
return QueryResults(fake_query,raw_results)
def get_document(doctype, docid):
es = elasticsearch.Elasticsearch(settings.SHEER_ELASTICSEARCH_SERVER)
es_index = settings.SHEER_ELASTICSEARCH_INDEX
raw_results = es.get(index=es_index, doc_type=doctype, id=docid)
return QueryHit(raw_results, es, es_index)
|
|
"""Support for Motion Blinds sensors."""
import logging
from motionblinds import BlindType
from homeassistant.const import (
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_SIGNAL_STRENGTH,
PERCENTAGE,
SIGNAL_STRENGTH_DECIBELS_MILLIWATT,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import DOMAIN, KEY_COORDINATOR, KEY_GATEWAY
_LOGGER = logging.getLogger(__name__)
ATTR_BATTERY_VOLTAGE = "battery_voltage"
TYPE_BLIND = "blind"
TYPE_GATEWAY = "gateway"
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Perform the setup for Motion Blinds."""
entities = []
motion_gateway = hass.data[DOMAIN][config_entry.entry_id][KEY_GATEWAY]
coordinator = hass.data[DOMAIN][config_entry.entry_id][KEY_COORDINATOR]
for blind in motion_gateway.device_list.values():
entities.append(MotionSignalStrengthSensor(coordinator, blind, TYPE_BLIND))
if blind.type == BlindType.TopDownBottomUp:
entities.append(MotionTDBUBatterySensor(coordinator, blind, "Bottom"))
entities.append(MotionTDBUBatterySensor(coordinator, blind, "Top"))
elif blind.battery_voltage > 0:
# Only add battery powered blinds
entities.append(MotionBatterySensor(coordinator, blind))
entities.append(
MotionSignalStrengthSensor(coordinator, motion_gateway, TYPE_GATEWAY)
)
async_add_entities(entities)
class MotionBatterySensor(CoordinatorEntity, Entity):
"""
Representation of a Motion Battery Sensor.
Updates are done by the cover platform.
"""
def __init__(self, coordinator, blind):
"""Initialize the Motion Battery Sensor."""
super().__init__(coordinator)
self._blind = blind
@property
def unique_id(self):
"""Return the unique id of the blind."""
return f"{self._blind.mac}-battery"
@property
def device_info(self):
"""Return the device info of the blind."""
return {"identifiers": {(DOMAIN, self._blind.mac)}}
@property
def name(self):
"""Return the name of the blind battery sensor."""
return f"{self._blind.blind_type}-battery-{self._blind.mac[12:]}"
@property
def available(self):
"""Return True if entity is available."""
return self._blind.available
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return PERCENTAGE
@property
def device_class(self):
"""Return the device class of this entity."""
return DEVICE_CLASS_BATTERY
@property
def state(self):
"""Return the state of the sensor."""
return self._blind.battery_level
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
return {ATTR_BATTERY_VOLTAGE: self._blind.battery_voltage}
async def async_added_to_hass(self):
"""Subscribe to multicast pushes."""
self._blind.Register_callback(self.unique_id, self.schedule_update_ha_state)
await super().async_added_to_hass()
async def async_will_remove_from_hass(self):
"""Unsubscribe when removed."""
self._blind.Remove_callback(self.unique_id)
await super().async_will_remove_from_hass()
class MotionTDBUBatterySensor(MotionBatterySensor):
"""
Representation of a Motion Battery Sensor for a Top Down Bottom Up blind.
Updates are done by the cover platform.
"""
def __init__(self, coordinator, blind, motor):
"""Initialize the Motion Battery Sensor."""
super().__init__(coordinator, blind)
self._motor = motor
@property
def unique_id(self):
"""Return the unique id of the blind."""
return f"{self._blind.mac}-{self._motor}-battery"
@property
def name(self):
"""Return the name of the blind battery sensor."""
return f"{self._blind.blind_type}-{self._motor}-battery-{self._blind.mac[12:]}"
@property
def state(self):
"""Return the state of the sensor."""
if self._blind.battery_level is None:
return None
return self._blind.battery_level[self._motor[0]]
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
attributes = {}
if self._blind.battery_voltage is not None:
attributes[ATTR_BATTERY_VOLTAGE] = self._blind.battery_voltage[
self._motor[0]
]
return attributes
class MotionSignalStrengthSensor(CoordinatorEntity, Entity):
"""Representation of a Motion Signal Strength Sensor."""
def __init__(self, coordinator, device, device_type):
"""Initialize the Motion Signal Strength Sensor."""
super().__init__(coordinator)
self._device = device
self._device_type = device_type
@property
def unique_id(self):
"""Return the unique id of the blind."""
return f"{self._device.mac}-RSSI"
@property
def device_info(self):
"""Return the device info of the blind."""
return {"identifiers": {(DOMAIN, self._device.mac)}}
@property
def name(self):
"""Return the name of the blind signal strength sensor."""
if self._device_type == TYPE_GATEWAY:
return "Motion gateway signal strength"
return f"{self._device.blind_type} signal strength - {self._device.mac[12:]}"
@property
def available(self):
"""Return True if entity is available."""
return self._device.available
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return SIGNAL_STRENGTH_DECIBELS_MILLIWATT
@property
def device_class(self):
"""Return the device class of this entity."""
return DEVICE_CLASS_SIGNAL_STRENGTH
@property
def entity_registry_enabled_default(self):
"""Return if the entity should be enabled when first added to the entity registry."""
return False
@property
def state(self):
"""Return the state of the sensor."""
return self._device.RSSI
async def async_added_to_hass(self):
"""Subscribe to multicast pushes."""
self._device.Register_callback(self.unique_id, self.schedule_update_ha_state)
await super().async_added_to_hass()
async def async_will_remove_from_hass(self):
"""Unsubscribe when removed."""
self._device.Remove_callback(self.unique_id)
await super().async_will_remove_from_hass()
|
|
"""Plotting of motion fields and other visualization tools."""
from matplotlib.pylab import cm, figure
from numpy import arange, meshgrid, nan, size
from scipy.ndimage import gaussian_filter
try:
from skimage.measure import find_contours
skimage_imported = True
except ImportError:
skimage_imported = False
def plot_contour_overlay(I1, I2, contour_level, minval, maxval):
"""Plot two consecutive images so that a contour of the first image is
plotted on top of the second one.
Parameters
----------
I1 : array-like
The first image whose contour is plotted.
I2 : array-like
The second image that is plotted. Must have the same shape as I1.
contour_level : float
The level value of the contour to be plotted.
minval : float
Minimum image value to plot. Pixels in I2 below this threshold are not
plotted.
maxval : float
Maximum image value to plot. Values above maxval are set to maxval.
Returns
-------
out : matplotlib.figure.Figure
Handle of the plotted figure.
"""
if I1.shape != I2.shape:
raise ValueError("I1 and I2 must have the same shape")
if not skimage_imported:
raise ImportError()
contours = find_contours(gaussian_filter(I1, 1), contour_level)
fig = figure()
ax = fig.gca()
I2 = I2.copy()
I2[I2 < minval] = nan
I2[I2 > maxval] = maxval
im = ax.imshow(I2, vmin=minval, vmax=maxval, cmap=cm.gray)
for ct in contours:
ax.plot(ct[:, 1], ct[:, 0], linewidth=2.0, linestyle="-", color='r')
fig.colorbar(im)
ax.set_xlim(0, size(I1, 1))
ax.set_ylim(size(I1, 0), 0)
ax.set_xticks([])
ax.set_yticks([])
return fig
def plot_motion_quiver(V, stride=10):
"""Draw a quiver plot from a motion field.
Parameters
----------
V : array-like
The motion field (a two-dimensional array).
stride : int
The gap (number of pixels) between neighbouring motion vectors.
Returns
-------
out : matplotlib.figure.Figure
Handle of the plotted figure.
"""
fig = figure()
ax = fig.gca()
V = V.copy()
V[:, :, 1] = -V[:, :, 1]
X,Y = meshgrid(arange(size(V, 1)), arange(size(V, 0))[::-1])
ax.quiver(X[::stride, ::stride], Y[::stride, ::stride], V[::stride, ::stride, 0],
V[::stride, ::stride, 1], units="xy", scale=1.0)
ax.set_xlim(0, size(V, 1))
ax.set_ylim(0, size(V, 0))
ax.set_xticks([])
ax.set_yticks([])
return fig
def plot_motion_field_components(V, sel_comp=["U", "V"], minval=-10.0,
maxval=10.0, plot_title=None):
"""Plot individual components of a motion field as colormapped images.
Parameters
----------
V : array-like
The motion field (a two-dimensional array).
sel_comp : list
List of selected components. The available options are 'U'=horizontal
component, 'V'=vertical component.
minval : float
Minimum value to plot.
maxval : float
Maximum value to plot.
plot_title : str
Title of the plot.
Returns
-------
out : list
Handles of the plotted figures (of type matplotlib.figure.Figure).
"""
figs = []
for sc in sel_comp:
fig = figure()
ax = fig.gca()
if sc == "U":
ci = 0
else:
ci = 1
im = ax.imshow(V[:, :, ci], vmin=minval, vmax=maxval, cmap=cm.jet)
ax.set_xticks([])
ax.set_yticks([])
cb = fig.colorbar(im)
if sc == "U":
cb.set_label("motion U-component")
else:
cb.set_label("motion V-component")
if plot_title != None:
ax.set_title(plot_title)
figs.append(fig)
return figs
def plot_motion_field_overlay(I, V, minval, maxval, stride=10,
colorbar_label=None, plot_title=None):
"""Plot motion quiver on top of an image.
Parameters
----------
I : array-like
The image on which the quiver is to be plotted.
V : array-like
The motion field (a two-dimensional array). Must have the same shape as I.
minval : float
Minimum image value that is plotted.
maxval : float
Maximum image value. Values above maxval are set to maxval.
stride : int
The gap (number of pixels) between neighbouring motion vectors.
colorbar_label : str
Label of the colorbar.
plot_title : str
Title of the plot.
Returns
-------
out : matplotlib.figure.Figure
Handle of the plotted figure.
"""
if len(V.shape) != 3:
raise ValueError("V must be a three-dimensional array")
if I.shape != V.shape[0:2]:
raise ValueError("I and V must have the same shape")
fig = figure()
ax = fig.gca()
I = I.copy()
I[I < minval] = nan
I[I > maxval] = maxval
X,Y = meshgrid(arange(size(I, 1)), -arange(size(I, 0)))
im = ax.imshow(I, extent=(0, size(I, 1), -size(I, 0)+1, 0), vmin=minval,
vmax=maxval, cmap=cm.jet)
ax.quiver(X[::stride, ::stride], Y[::stride, ::stride],
V[::stride, ::stride, 0], -V[::stride, ::stride, 1],
color='r', units="xy", scale=1.0)
cb = fig.colorbar(im)
if colorbar_label != None:
cb.set_label(colorbar_label)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim(0, size(I, 1))
ax.set_ylim(-size(I, 0), 0)
if plot_title != None:
ax.set_title(plot_title)
return fig
def plot_motion_field_quality(V, qc, minval, maxval, plot_title=None):
"""Plot quality maps associated with the given motion field.
Parameters
----------
V : array-like
The motion field V(x,y,q), where x and y denote spatial coordinates and
q>=3 denotes the quality channel (the first two are u and v-components of
motion).
qc : int
Index of the quality channel to plot.
plot_title : str
Title of the plot.
Returns
-------
out : matplotlib.figure.Figure
Handle of the plotted figure.
"""
if len(V.shape) != 3:
raise ValueError("V must be a three-dimensional array")
if V.shape[2] <= 2:
raise ValueError("V does not contain any quality channels")
if 2+qc > V.shape[2]:
raise ValueError("invalid quality channel index")
fig = figure()
ax = fig.gca()
im = ax.imshow(V[:, :, 2+qc], vmin=minval, vmax=maxval, cmap=cm.jet)
ax.set_xticks([])
ax.set_yticks([])
cb = fig.colorbar(im)
cb.set_label("motion quality channel %d" % (qc+1))
if plot_title != None:
ax.set_title(plot_title)
return fig
|
|
from collections.abc import Mapping
import os
import numpy as np
import pytest
import openmc
import openmc.exceptions as exc
import openmc.lib
from tests import cdtemp
@pytest.fixture(scope='module')
def pincell_model():
"""Set up a model to test with and delete files when done"""
openmc.reset_auto_ids()
pincell = openmc.examples.pwr_pin_cell()
pincell.settings.verbosity = 1
# Add a tally
filter1 = openmc.MaterialFilter(pincell.materials)
filter2 = openmc.EnergyFilter([0.0, 1.0, 1.0e3, 20.0e6])
mat_tally = openmc.Tally()
mat_tally.filters = [filter1, filter2]
mat_tally.nuclides = ['U235', 'U238']
mat_tally.scores = ['total', 'elastic', '(n,gamma)']
pincell.tallies.append(mat_tally)
# Add an expansion tally
zernike_tally = openmc.Tally()
filter3 = openmc.ZernikeFilter(5, r=.63)
cells = pincell.geometry.root_universe.cells
filter4 = openmc.CellFilter(list(cells.values()))
zernike_tally.filters = [filter3, filter4]
zernike_tally.scores = ['fission']
pincell.tallies.append(zernike_tally)
# Add an energy function tally
energyfunc_tally = openmc.Tally()
energyfunc_filter = openmc.EnergyFunctionFilter(
[0.0, 20e6], [0.0, 20e6])
energyfunc_tally.scores = ['fission']
energyfunc_tally.filters = [energyfunc_filter]
pincell.tallies.append(energyfunc_tally)
# Write XML files in tmpdir
with cdtemp():
pincell.export_to_xml()
yield
@pytest.fixture(scope='module')
def uo2_trigger_model():
"""Set up a simple UO2 model with k-eff trigger"""
model = openmc.model.Model()
m = openmc.Material(name='UO2')
m.add_nuclide('U235', 1.0)
m.add_nuclide('O16', 2.0)
m.set_density('g/cm3', 10.0)
model.materials.append(m)
cyl = openmc.ZCylinder(r=1.0, boundary_type='vacuum')
c = openmc.Cell(fill=m, region=-cyl)
model.geometry.root_universe = openmc.Universe(cells=[c])
model.settings.batches = 10
model.settings.inactive = 5
model.settings.particles = 100
model.settings.source = openmc.Source(space=openmc.stats.Box(
[-0.5, -0.5, -1], [0.5, 0.5, 1], only_fissionable=True))
model.settings.verbosity = 1
model.settings.keff_trigger = {'type': 'std_dev', 'threshold': 0.001}
model.settings.trigger_active = True
model.settings.trigger_max_batches = 10
model.settings.trigger_batch_interval = 1
# Write XML files in tmpdir
with cdtemp():
model.export_to_xml()
yield
@pytest.fixture(scope='module')
def lib_init(pincell_model, mpi_intracomm):
openmc.lib.init(intracomm=mpi_intracomm)
yield
openmc.lib.finalize()
@pytest.fixture(scope='module')
def lib_simulation_init(lib_init):
openmc.lib.simulation_init()
yield
@pytest.fixture(scope='module')
def lib_run(lib_simulation_init):
openmc.lib.run()
def test_cell_mapping(lib_init):
cells = openmc.lib.cells
assert isinstance(cells, Mapping)
assert len(cells) == 3
for cell_id, cell in cells.items():
assert isinstance(cell, openmc.lib.Cell)
assert cell_id == cell.id
def test_cell(lib_init):
cell = openmc.lib.cells[1]
assert isinstance(cell.fill, openmc.lib.Material)
cell.fill = openmc.lib.materials[1]
assert str(cell) == 'Cell[0]'
assert cell.name == "Fuel"
cell.name = "Not fuel"
assert cell.name == "Not fuel"
def test_cell_temperature(lib_init):
cell = openmc.lib.cells[1]
cell.set_temperature(100.0, 0)
assert cell.get_temperature(0) == 100.0
cell.set_temperature(200)
assert cell.get_temperature() == 200.0
def test_new_cell(lib_init):
with pytest.raises(exc.AllocationError):
openmc.lib.Cell(1)
new_cell = openmc.lib.Cell()
new_cell_with_id = openmc.lib.Cell(10)
assert len(openmc.lib.cells) == 5
def test_material_mapping(lib_init):
mats = openmc.lib.materials
assert isinstance(mats, Mapping)
assert len(mats) == 3
for mat_id, mat in mats.items():
assert isinstance(mat, openmc.lib.Material)
assert mat_id == mat.id
def test_material(lib_init):
m = openmc.lib.materials[3]
assert m.nuclides == ['H1', 'O16', 'B10', 'B11']
old_dens = m.densities
test_dens = [1.0e-1, 2.0e-1, 2.5e-1, 1.0e-3]
m.set_densities(m.nuclides, test_dens)
assert m.densities == pytest.approx(test_dens)
assert m.volume is None
m.volume = 10.0
assert m.volume == 10.0
with pytest.raises(exc.OpenMCError):
m.set_density(1.0, 'goblins')
rho = 2.25e-2
m.set_density(rho)
assert sum(m.densities) == pytest.approx(rho)
m.set_density(0.1, 'g/cm3')
assert m.density == pytest.approx(0.1)
assert m.name == "Hot borated water"
m.name = "Not hot borated water"
assert m.name == "Not hot borated water"
def test_material_add_nuclide(lib_init):
m = openmc.lib.materials[3]
m.add_nuclide('Xe135', 1e-12)
assert m.nuclides[-1] == 'Xe135'
assert m.densities[-1] == 1e-12
def test_new_material(lib_init):
with pytest.raises(exc.AllocationError):
openmc.lib.Material(1)
new_mat = openmc.lib.Material()
new_mat_with_id = openmc.lib.Material(10)
assert len(openmc.lib.materials) == 5
def test_nuclide_mapping(lib_init):
nucs = openmc.lib.nuclides
assert isinstance(nucs, Mapping)
assert len(nucs) == 13
for name, nuc in nucs.items():
assert isinstance(nuc, openmc.lib.Nuclide)
assert name == nuc.name
def test_settings(lib_init):
settings = openmc.lib.settings
assert settings.inactive == 5
assert settings.generations_per_batch == 1
assert settings.particles == 100
assert settings.seed == 1
settings.seed = 11
def test_tally_mapping(lib_init):
tallies = openmc.lib.tallies
assert isinstance(tallies, Mapping)
assert len(tallies) == 3
for tally_id, tally in tallies.items():
assert isinstance(tally, openmc.lib.Tally)
assert tally_id == tally.id
def test_energy_function_filter(lib_init):
"""Test special __new__ and __init__ for EnergyFunctionFilter"""
efunc = openmc.lib.EnergyFunctionFilter([0.0, 1.0], [0.0, 2.0])
assert len(efunc.energy) == 2
assert (efunc.energy == [0.0, 1.0]).all()
assert len(efunc.y) == 2
assert (efunc.y == [0.0, 2.0]).all()
def test_tally(lib_init):
t = openmc.lib.tallies[1]
assert t.type == 'volume'
assert len(t.filters) == 2
assert isinstance(t.filters[0], openmc.lib.MaterialFilter)
assert isinstance(t.filters[1], openmc.lib.EnergyFilter)
# Create new filter and replace existing
with pytest.raises(exc.AllocationError):
openmc.lib.MaterialFilter(uid=1)
mats = openmc.lib.materials
f = openmc.lib.MaterialFilter([mats[2], mats[1]])
assert f.bins[0] == mats[2]
assert f.bins[1] == mats[1]
t.filters = [f]
assert t.filters == [f]
assert t.nuclides == ['U235', 'U238']
with pytest.raises(exc.DataError):
t.nuclides = ['Zr2']
t.nuclides = ['U234', 'Zr90']
assert t.nuclides == ['U234', 'Zr90']
assert t.scores == ['total', '(n,elastic)', '(n,gamma)']
new_scores = ['scatter', 'fission', 'nu-fission', '(n,2n)']
t.scores = new_scores
assert t.scores == new_scores
t2 = openmc.lib.tallies[2]
assert len(t2.filters) == 2
assert isinstance(t2.filters[0], openmc.lib.ZernikeFilter)
assert isinstance(t2.filters[1], openmc.lib.CellFilter)
assert len(t2.filters[1].bins) == 3
assert t2.filters[0].order == 5
t3 = openmc.lib.tallies[3]
assert len(t3.filters) == 1
t3_f = t3.filters[0]
assert isinstance(t3_f, openmc.lib.EnergyFunctionFilter)
assert len(t3_f.energy) == 2
assert len(t3_f.y) == 2
t3_f.set_data([0.0, 1.0, 2.0], [0.0, 1.0, 4.0])
assert len(t3_f.energy) == 3
assert len(t3_f.y) == 3
def test_new_tally(lib_init):
with pytest.raises(exc.AllocationError):
openmc.lib.Material(1)
new_tally = openmc.lib.Tally()
new_tally.scores = ['flux']
new_tally_with_id = openmc.lib.Tally(10)
new_tally_with_id.scores = ['flux']
assert len(openmc.lib.tallies) == 5
def test_tally_activate(lib_simulation_init):
t = openmc.lib.tallies[1]
assert not t.active
t.active = True
assert t.active
def test_tally_writable(lib_simulation_init):
t = openmc.lib.tallies[1]
assert t.writable
t.writable = False
assert not t.writable
# Revert tally to writable state for lib_run fixtures
t.writable = True
def test_tally_results(lib_run):
t = openmc.lib.tallies[1]
assert t.num_realizations == 10 # t was made active in test_tally_active
assert np.all(t.mean >= 0)
nonzero = (t.mean > 0.0)
assert np.all(t.std_dev[nonzero] >= 0)
assert np.all(t.ci_width()[nonzero] >= 1.95*t.std_dev[nonzero])
t2 = openmc.lib.tallies[2]
n = 5
assert t2.mean.size == (n + 1) * (n + 2) // 2 * 3 # Number of Zernike coeffs * 3 cells
def test_global_tallies(lib_run):
assert openmc.lib.num_realizations() == 5
gt = openmc.lib.global_tallies()
for mean, std_dev in gt:
assert mean >= 0
def test_statepoint(lib_run):
openmc.lib.statepoint_write('test_sp.h5')
assert os.path.exists('test_sp.h5')
def test_source_bank(lib_run):
source = openmc.lib.source_bank()
assert np.all(source['E'] > 0.0)
assert np.all(source['wgt'] == 1.0)
assert np.allclose(np.linalg.norm(source['u'], axis=1), 1.0)
def test_by_batch(lib_run):
openmc.lib.hard_reset()
# Running next batch before simulation is initialized should raise an
# exception
with pytest.raises(exc.AllocationError):
openmc.lib.next_batch()
openmc.lib.simulation_init()
try:
for _ in openmc.lib.iter_batches():
# Make sure we can get k-effective during inactive/active batches
mean, std_dev = openmc.lib.keff()
assert 0.0 < mean < 2.5
assert std_dev > 0.0
assert openmc.lib.num_realizations() == 5
for i in range(3):
openmc.lib.next_batch()
assert openmc.lib.num_realizations() == 8
finally:
openmc.lib.simulation_finalize()
def test_set_n_batches(lib_run):
# Run simulation_init so that current_batch reset to 0
openmc.lib.hard_reset()
openmc.lib.simulation_init()
settings = openmc.lib.settings
assert settings.get_batches() == 10
# Setting n_batches less than n_inactive should raise error
with pytest.raises(exc.InvalidArgumentError):
settings.set_batches(3)
# n_batches should stay the same
assert settings.get_batches() == 10
for i in range(7):
openmc.lib.next_batch()
# Setting n_batches less than current_batch should raise error
with pytest.raises(exc.InvalidArgumentError):
settings.set_batches(6)
# n_batches should stay the same
assert settings.get_batches() == 10
# Change n_batches from 10 to 20
settings.set_batches(20)
for _ in openmc.lib.iter_batches():
pass
openmc.lib.simulation_finalize()
# n_active should have been overwritten from 5 to 15
assert openmc.lib.num_realizations() == 15
# Ensure statepoint created at new value of n_batches
assert os.path.exists('statepoint.20.h5')
def test_reset(lib_run):
# Init and run 10 batches.
openmc.lib.hard_reset()
openmc.lib.simulation_init()
try:
for i in range(10):
openmc.lib.next_batch()
# Make sure there are 5 realizations for the 5 active batches.
assert openmc.lib.num_realizations() == 5
assert openmc.lib.tallies[2].num_realizations == 5
_, keff_sd1 = openmc.lib.keff()
tally_sd1 = openmc.lib.tallies[2].std_dev[0]
# Reset and run 3 more batches. Check the number of realizations.
openmc.lib.reset()
for i in range(3):
openmc.lib.next_batch()
assert openmc.lib.num_realizations() == 3
assert openmc.lib.tallies[2].num_realizations == 3
# Check the tally std devs to make sure results were cleared.
_, keff_sd2 = openmc.lib.keff()
tally_sd2 = openmc.lib.tallies[2].std_dev[0]
assert keff_sd2 > keff_sd1
assert tally_sd2 > tally_sd1
finally:
openmc.lib.simulation_finalize()
def test_reproduce_keff(lib_init):
# Get k-effective after run
openmc.lib.hard_reset()
openmc.lib.run()
keff0 = openmc.lib.keff()
# Reset, run again, and get k-effective again. they should match
openmc.lib.hard_reset()
openmc.lib.run()
keff1 = openmc.lib.keff()
assert keff0 == pytest.approx(keff1)
def test_find_cell(lib_init):
cell, instance = openmc.lib.find_cell((0., 0., 0.))
assert cell is openmc.lib.cells[1]
cell, instance = openmc.lib.find_cell((0.4, 0., 0.))
assert cell is openmc.lib.cells[2]
with pytest.raises(exc.GeometryError):
openmc.lib.find_cell((100., 100., 100.))
def test_find_material(lib_init):
mat = openmc.lib.find_material((0., 0., 0.))
assert mat is openmc.lib.materials[1]
mat = openmc.lib.find_material((0.4, 0., 0.))
assert mat is openmc.lib.materials[2]
def test_regular_mesh(lib_init):
mesh = openmc.lib.RegularMesh()
mesh.dimension = (2, 3, 4)
assert mesh.dimension == (2, 3, 4)
with pytest.raises(exc.AllocationError):
mesh2 = openmc.lib.RegularMesh(mesh.id)
# Make sure each combination of parameters works
ll = (0., 0., 0.)
ur = (10., 10., 10.)
width = (1., 1., 1.)
mesh.set_parameters(lower_left=ll, upper_right=ur)
assert mesh.lower_left == pytest.approx(ll)
assert mesh.upper_right == pytest.approx(ur)
mesh.set_parameters(lower_left=ll, width=width)
assert mesh.lower_left == pytest.approx(ll)
assert mesh.width == pytest.approx(width)
mesh.set_parameters(upper_right=ur, width=width)
assert mesh.upper_right == pytest.approx(ur)
assert mesh.width == pytest.approx(width)
meshes = openmc.lib.meshes
assert isinstance(meshes, Mapping)
assert len(meshes) == 1
for mesh_id, mesh in meshes.items():
assert isinstance(mesh, openmc.lib.RegularMesh)
assert mesh_id == mesh.id
mf = openmc.lib.MeshFilter(mesh)
assert mf.mesh == mesh
msf = openmc.lib.MeshSurfaceFilter(mesh)
assert msf.mesh == mesh
def test_rectilinear_mesh(lib_init):
mesh = openmc.lib.RectilinearMesh()
x_grid = [-10., 0., 10.]
y_grid = [0., 10., 20.]
z_grid = [10., 20., 30.]
mesh.set_grid(x_grid, y_grid, z_grid)
assert np.all(mesh.lower_left == (-10., 0., 10.))
assert np.all(mesh.upper_right == (10., 20., 30.))
assert np.all(mesh.dimension == (2, 2, 2))
for i, diff_x in enumerate(np.diff(x_grid)):
for j, diff_y in enumerate(np.diff(y_grid)):
for k, diff_z in enumerate(np.diff(z_grid)):
assert np.all(mesh.width[i, j, k, :] == (10, 10, 10))
with pytest.raises(exc.AllocationError):
mesh2 = openmc.lib.RectilinearMesh(mesh.id)
meshes = openmc.lib.meshes
assert isinstance(meshes, Mapping)
assert len(meshes) == 2
mesh = meshes[mesh.id]
assert isinstance(mesh, openmc.lib.RectilinearMesh)
mf = openmc.lib.MeshFilter(mesh)
assert mf.mesh == mesh
msf = openmc.lib.MeshSurfaceFilter(mesh)
assert msf.mesh == mesh
def test_restart(lib_init, mpi_intracomm):
# Finalize and re-init to make internal state consistent with XML.
openmc.lib.hard_reset()
openmc.lib.finalize()
openmc.lib.init(intracomm=mpi_intracomm)
openmc.lib.simulation_init()
# Run for 7 batches then write a statepoint.
for i in range(7):
openmc.lib.next_batch()
openmc.lib.statepoint_write('restart_test.h5', True)
# Run 3 more batches and copy the keff.
for i in range(3):
openmc.lib.next_batch()
keff0 = openmc.lib.keff()
# Restart the simulation from the statepoint and the 3 remaining active batches.
openmc.lib.simulation_finalize()
openmc.lib.hard_reset()
openmc.lib.finalize()
openmc.lib.init(args=('-r', 'restart_test.h5'))
openmc.lib.simulation_init()
for i in range(3):
openmc.lib.next_batch()
keff1 = openmc.lib.keff()
openmc.lib.simulation_finalize()
# Compare the keff values.
assert keff0 == pytest.approx(keff1)
def test_load_nuclide(lib_init):
# load multiple nuclides
openmc.lib.load_nuclide('H3')
assert 'H3' in openmc.lib.nuclides
openmc.lib.load_nuclide('Pu239')
assert 'Pu239' in openmc.lib.nuclides
# load non-existent nuclide
with pytest.raises(exc.DataError):
openmc.lib.load_nuclide('Pu3')
def test_id_map(lib_init):
expected_ids = np.array([[(3, 3), (2, 2), (3, 3)],
[(2, 2), (1, 1), (2, 2)],
[(3, 3), (2, 2), (3, 3)]], dtype='int32')
# create a plot object
s = openmc.lib.plot._PlotBase()
s.width = 1.26
s.height = 1.26
s.v_res = 3
s.h_res = 3
s.origin = (0.0, 0.0, 0.0)
s.basis = 'xy'
s.level = -1
ids = openmc.lib.plot.id_map(s)
assert np.array_equal(expected_ids, ids)
def test_property_map(lib_init):
expected_properties = np.array(
[[(293.6, 0.740582), (293.6, 6.55), (293.6, 0.740582)],
[ (293.6, 6.55), (293.6, 10.29769), (293.6, 6.55)],
[(293.6, 0.740582), (293.6, 6.55), (293.6, 0.740582)]], dtype='float')
# create a plot object
s = openmc.lib.plot._PlotBase()
s.width = 1.26
s.height = 1.26
s.v_res = 3
s.h_res = 3
s.origin = (0.0, 0.0, 0.0)
s.basis = 'xy'
s.level = -1
properties = openmc.lib.plot.property_map(s)
assert np.allclose(expected_properties, properties, atol=1e-04)
def test_position(lib_init):
pos = openmc.lib.plot._Position(1.0, 2.0, 3.0)
assert tuple(pos) == (1.0, 2.0, 3.0)
pos[0] = 1.3
pos[1] = 2.3
pos[2] = 3.3
assert tuple(pos) == (1.3, 2.3, 3.3)
def test_global_bounding_box(lib_init):
expected_llc = (-0.63, -0.63, -np.inf)
expected_urc = (0.63, 0.63, np.inf)
llc, urc = openmc.lib.global_bounding_box()
assert tuple(llc) == expected_llc
assert tuple(urc) == expected_urc
def test_trigger_set_n_batches(uo2_trigger_model, mpi_intracomm):
openmc.lib.finalize()
openmc.lib.init(intracomm=mpi_intracomm)
openmc.lib.simulation_init()
settings = openmc.lib.settings
# Change n_batches to 12 and n_max_batches to 20
settings.set_batches(12, set_max_batches=False, add_sp_batch=False)
settings.set_batches(20, set_max_batches=True, add_sp_batch=True)
assert settings.get_batches(get_max_batches=False) == 12
assert settings.get_batches(get_max_batches=True) == 20
for _ in openmc.lib.iter_batches():
pass
openmc.lib.simulation_finalize()
# n_active should have been overwritten from 5 to 15
assert openmc.lib.num_realizations() == 15
# Ensure statepoint was created only at batch 20 when calling set_batches
assert not os.path.exists('statepoint.12.h5')
assert os.path.exists('statepoint.20.h5')
|
|
# written by python 3.6.1
#-*- coding: utf-8 -*-
from urllib.request import urlopen
import json
import string
import re
from bs4 import BeautifulSoup
import logging
import time
FILE_PATH = "./boxofficemojo.com/movie_data.txt"
LOG_PATH = "./boxofficemojo.com/scraping.log"
logging.basicConfig(filename=LOG_PATH,level=logging.DEBUG)
Keys = ["Name", "URL", "Genre","Runtime", "Rating", "MovieRanking"
, "PercentageofTotalGross", "WidestRelease", "CloseDate", "InRelease", "TotalGross"
, "Distributor", "Budget", "Domestic_Gross", "Domestic_Percentage"
, "Foreign_Gross", "Foreign_Percentage", "Worldwide_Gross", "OpeningWeekend"
, "Countryclicktoviewweekendbreakdown", "Dist", "ReleaseDate"
, "OpeningWknd", "ofTotal", "TotalGross", "AsOf"]
def add_empty_data(arrData, count):
for i in range(0,count):
arrData.append(" ")
return arrData
def remove_special_chars(dictData):
newDict= {}
for key in dictData:
new_key= re.sub(r'\W+', '', key)
newDict[new_key] = dictData[key]
return newDict
def save_to_json(filePath, dictData, countriesData=None):
dictData = remove_special_chars(dictData)
countriesData = remove_special_chars(countriesData)
if countriesData:
merged = dict(dictData)
merged.update(countriesData)
dictData = merged
with open(filePath, "a") as outfile:
json.dump(dictData, outfile, ensure_ascii=False)
def write_header(filePath):
# Write a header
text_file = open(filePath, "ab")
for header in Keys:
text_file.write((header + u"|").encode('utf-8'))
text_file.write("\n".encode('utf-8'))
text_file.close()
def save_to_file(filePath, dictData, countriesData=None):
dictData = remove_special_chars(dictData)
if countriesData:
countriesData = remove_special_chars(countriesData)
if countriesData:
merged = dict(dictData)
merged.update(countriesData)
dictData = merged
Arranged= []
add_empty_data(Arranged, 50)
text_file = open(filePath, "ab")
for key, value in dictData.items():
for i ,k in enumerate(Keys):
if key == k:
Arranged[i]= value
for data in Arranged:
text_file.write((data + u"|").encode('utf-8'))
text_file.write("\n".encode('utf-8'))
text_file.close()
def get_total_lifetime_grosses(link, arrData):
url = "http://www.boxofficemojo.com"+ link
page = urlopen(url)
soup = BeautifulSoup(page, "lxml")
# Assume that domestic info is from USA
arrData['Countryclicktoviewweekendbreakdown']= "USA"
#print(main_tbl)
tables = soup.find_all('table', attrs={'border': '0' , 'cellspacing':'0', 'cellpadding':'0' , 'width':'100%'})
#print( len(tables))
#td_count = 9
if len(tables) == 4:
#print(tables[3]) # Total lifetime grosses
mp_boxes= tables[3].find_all("div", {"class", "mp_box_tab"})
a= len(mp_boxes)
for box in mp_boxes:
if(box.text == "Total Lifetime Grosses"):
div_content= box.findNext('div')
trs = div_content.find_all('tr')
for tr in trs:
tds = tr.find_all('td')
if len(tds) == 3:
if tds[0].text.strip() == "Domestic:":
arrData["Total Gross"] = tds[1].text.strip()
arrData["% ofTotal"] = tds[2].text.strip()
arrData[tds[0].text.strip()+"_Gross"] = tds[1].text.strip()
arrData[tds[0].text.strip()+"_Percentage"] = tds[2].text.strip()
if(box.text == "Domestic Summary"):
div_content = box.findNext('div')
DS_tables = div_content.find_all('table', attrs = { 'border': '0' , 'cellspacing':'0', 'cellpadding':'0'})
for DS_table in DS_tables:
DS_trs = DS_table.find_all('tr')
for DS_tr in DS_trs:
DS_tr_title = DS_tr.td.text.strip()
if(DS_tr_title == "Opening\xa0Weekend:") or (DS_tr_title == "Opening Weekend:"):
DS_tr_content = DS_tr.td.findNext('td')
if DS_tr_content:
arrData["Opening Weekend"] = DS_tr_content.text.strip()
arrData["OpeningWknd"] = DS_tr_content.text.strip()
elif "(#" in DS_tr_title:
arrData['Movie Ranking'] = DS_tr_title
elif "%\xa0of\xa0Total\xa0Gross" in DS_tr_title or "% of Total Gross" in DS_tr_title:
DS_tr_content = DS_tr.td.findNext('td')
if DS_tr_content:
arrData['Percentage of Total Gross'] = DS_tr_content.text.strip()
elif DS_tr_title == "Widest\xa0Release:" or DS_tr_title == "Widest Release:":
DS_tr_content = DS_tr.td.findNext('td')
if DS_tr_content:
arrData['Widest Release'] = DS_tr_content.text.strip() # 14.
elif DS_tr_title == "Close\xa0Date:" or DS_tr_title == "Close Date:":
DS_tr_content = DS_tr.td.findNext('td')
if DS_tr_content:
arrData['Close Date'] = DS_tr_content.text.strip() # 15.
elif DS_tr_title == "In\xa0Release:" or DS_tr_title == "In Release:":
DS_tr_content = DS_tr.td.findNext('td')
if DS_tr_content:
arrData['In Release'] = DS_tr_content.text.strip() # 15.
if(box.text == "The Players"):
#print(box.findNext('div'))
pass
return arrData
def get_movie_foreign(link, arrData):
try:
eachCountry = {}
ColumnHeaders= []
url = "http://www.boxofficemojo.com"+ link + "&page=intl"
page = urlopen(url)
soup = BeautifulSoup(page, "lxml")
contents = soup.find('table', attrs={'border': '3' , 'cellspacing':'0', 'cellpadding':'5', 'align':'center', 'style':'margin-top: 5px;'})
if len(contents) == 1:
#print(contents)
intl_table = contents.tr.td.table
if intl_table:
trs = intl_table.find_all("tr")
if len(trs) == 3:
#print ("no data")
temp= 0
else:
for row,tr in enumerate(trs):
if row == 0:
tds= tr.find_all("td") # get each header's text
for td in tds:
header= td.text.strip()
if "/" in header:
divided_header = header.split('/')
ColumnHeaders.append(divided_header[0])
ColumnHeaders.append(divided_header[1])
else:
ColumnHeaders.append(td.text.strip())
if(row < 3): # don't save unncessary data
continue
tds= tr.find_all("td")
for column, td in enumerate(tds):
# 11. Country, 12.Dist, 13. Release Date, 14.OW, 15.% of Total, 16.Total gross, 17. as of
eachCountry[ColumnHeaders[column]] = td.text.strip()
save_to_file(FILE_PATH, arrData, eachCountry)
#save_to_json(FILE_PATH, arrData, eachCountry)
eachCountry.clear()
return arrData
except Exception as e:
logging.exception(e)
return arrData
def get_movie_detail(movies_list, link, arrData):
if link not in movies_list:
movies_list.append(link)
url = "http://www.boxofficemojo.com"+ link # 1. URL
page = urlopen(url)
soup = BeautifulSoup(page, "lxml")
contents= soup.find('table', attrs={'border': '0' , 'cellspacing':'1', 'cellpadding':'4' , 'bgcolor':'#dcdcdc', 'width':'95%'})
tabledata = contents.find_all("td")
name_table = soup.find('table', attrs={'border': '0' , 'cellspacing':'0', 'cellpadding':'0' , 'width':'100%', 'style':'padding-top: 5px;'})
name = name_table.font.b.getText() # 0. Name
# 2. Distributor, 3. Release Date, 4. Genre, 5. Runtime, 6. Rating, 7. Budget, 8. TotalGross
arrData['Name'] = name
arrData['URL'] = url
if len(tabledata) == 6:
Distributor = tabledata[0].b.getText()
ReleaseDate = tabledata[1].b.getText()
Genre = tabledata[2].b.getText()
Runtime = tabledata[3].b.getText()
Rating = tabledata[4].b.getText()
Budget = tabledata[5].b.getText()
arrData['Distributor'] = Distributor
arrData['ReleaseDate'] = ReleaseDate
arrData['Genre'] = Genre
arrData['Runtime'] = Runtime
arrData['Rating'] = Rating
arrData['Budget'] = Budget
#arrData.extend([name , url , Distributor, ReleaseDate,Genre ,Runtime , Rating,Budget])
#add_empty_data(arrData, 1) # match gap for missing column
elif len(tabledata) == 7:
TotalGross = tabledata[0].b.getText()
Distributor = tabledata[1].b.getText()
ReleaseDate = tabledata[2].b.getText()
Genre = tabledata[3].b.getText()
Runtime = tabledata[4].b.getText()
Rating = tabledata[5].b.getText()
Budget = tabledata[6].b.getText()
arrData['TotalGross'] = TotalGross
arrData['Distributor'] = Distributor
arrData['ReleaseDate'] = ReleaseDate
arrData['Genre'] = Genre
arrData['Runtime'] = Runtime
arrData['Rating'] = Rating
arrData['Budget'] = Budget
#arrData.extend([ name , url , Distributor, ReleaseDate,Genre ,Runtime , Rating,Budget ,TotalGross])
#print (result)
#print contents2[0]
return arrData
def get_all_movies():
# Alphabet loop for how movies are indexed including
# movies that start with a special character or number
index = ["NUM"] + list(string.ascii_uppercase)
# List of movie urls
movies_list = []
# dict data
arrData = {}
startTime = time.time()
lapTime= 0.0
# if you want to jump directly to somewhere (Set None to be not skipped)
JumpTo = 'S'
IsJumpTarget = False
JumpToPage = 8
write_header(FILE_PATH)
logging.debug("running...start at : " + str(time.time()))
# Loop through the pages for each letter
for letter_idx, letter in enumerate(index):
if JumpTo:
indexOfTargetLetter = index.index(JumpTo)
if letter_idx < indexOfTargetLetter:
logging.debug("skip this letter")
IsJumpTarget= False
continue
elif letter_idx == indexOfTargetLetter:
IsJumpTarget= True
url = ("http://www.boxofficemojo.com/movies/alphabetical.htm?letter=" + letter)
page1 = urlopen(url)
soup1 = BeautifulSoup(page1, "lxml")
navi = soup1.find('div', attrs={"class" : "alpha-nav-holder"})
bs= navi.font.find_all('b')
count_bs= len(bs)
logging.debug("pages count : " + str(count_bs))
if letter == "NUM":
count_bs = 1
# Loop through the pages within each letter
for num in range(1, count_bs+1):
logging.debug("begin to scrap letter : " + letter + ", page : " + str(num))
if JumpToPage:
if num < JumpToPage and IsJumpTarget == True: # do not jump this page, if it's not target letter
logging.debug("skip this page")
continue
url = ("http://www.boxofficemojo.com/movies/alphabetical.htm?"
"letter=" + letter + "&page=" + str(num))
try:
page = urlopen(url)
soup = BeautifulSoup(page, "lxml")
rows = soup.find(id="body").find("table").find("table").find_all(
"table")[1].find_all("tr")
# skip index row
if len(rows) > 1:
counter = 1
for row in rows:
trackingStartTime= time.time()
# skip index row
if counter > 1:
link = row.td.font.a['href']
arrData = get_movie_detail(movies_list, link, arrData)
arrData = get_movie_foreign(link, arrData)
arrData = get_total_lifetime_grosses(link, arrData)
save_to_file(FILE_PATH, arrData)
arrData.clear()
lapTime= time.time() - trackingStartTime
logging.debug("each movie's lapTime : " + str(lapTime))
counter += 1
except Exception as e:
logging.exception(e)
TotalElaspedTime= (time.time() - startTime)
logging.debug('done.' + str(TotalElaspedTime))
get_all_movies()
|
|
#!/usr/bin/env python
# Siconos is a program dedicated to modeling, simulation and control
# of non smooth dynamical systems.
#
# Copyright 2016 INRIA.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
from siconos.kernel import NewtonEulerDS, NewtonImpactNSL,\
NewtonEulerR, NewtonEulerFrom1DLocalFrameR, Interaction,\
MoreauJeanOSI, TimeDiscretisation, LCP, TimeStepping,\
changeFrameAbsToBody,changeFrameBodyToAbs,\
rotationVectorFromQuaternion, quaternionFromRotationVector,\
rotateAbsToBody,\
SiconosVector, NonSmoothDynamicalSystem
import numpy as np
import math
t0 = 0.0 # start time
h = 0.001 # time step
N= 10000
T = h*N
theta = 0.5 # theta scheme
class UnstableRotation(NewtonEulerDS):
def __init__(self,x, v):
I = np.zeros((3, 3))
I[0, 0] = 5.0
I[1, 1] = 10.0
I[2, 2] = 1.0
m=1.0
NewtonEulerDS.__init__(self,x, v, m, I)
# Allocation of _MExt
self.setMExtPtr(SiconosVector(3))
# specify that MExt is expressed in the inertial frame.
self.setIsMextExpressedInInertialFrame(True)
def computeMExt(self, time, mExt=None):
td = 2.0 - h
if mExt is None :
mExt = self._mExt
if isinstance(mExt,SiconosVector):
mExt.zero()
if (0 <= time < td):
mExt.setValue(0, 20.0)
elif (td <= time <= td + h):
mExt.setValue(1, 1.0 / (5.0 * h))
else:
mExt[:]=0
if (0 <= time < td):
mExt[0] =20.
elif (td <= time <= td + h):
mExt[1]= 1.0 / (5.0 * h)
#
# dynamical system
#
x = [0, 0, 0, 1.0, 0, 0, 0] # initial configuration
v = [0, 0, 0, 0, 0, 0] # initial velocity
unstableRotation = UnstableRotation(x, v)
class HeavyTop(NewtonEulerDS):
def __init__(self,x, v):
I = np.zeros((3, 3))
I[0, 0] = 5.0
I[1, 1] = 5.0
I[2, 2] = 1.0
m=1.0
NewtonEulerDS.__init__(self,x, v, m, I)
self._Mg=20
self._l=1.0
self.setComputeJacobianMIntqByFD(True)
# Allocation of _mInt
self._mInt = SiconosVector(3)
def centermass(self,q):
r= np.zeros(3)
E3 = SiconosVector(3)
E3.zero()
E3.setValue(2,1.0)
rotateAbsToBody(q,E3)
r[0] = E3.getValue(0)
r[1] = E3.getValue(1)
r[2] = E3.getValue(2)
return r
def computeMInt(self, time, q, v, mInt=None):
if mInt is None :
mInt = self._mInt
if isinstance(mInt,SiconosVector):
r = self.centermass(q)
m = self._Mg*self._l*np.cross(r,[0,0,1.0])
mInt.setValue(0,m[0])
mInt.setValue(1,m[1])
mInt.setValue(2,m[2])
changeFrameAbsToBody(q,mInt)
#print("mInt========")
mInt.display()
else:
r = self.centermass(q)
m = self._Mg*self._l*np.cross(r,[0,0,1.0])
m_sv = SiconosVector(m)
changeFrameAbsToBody(q,m_sv)
m_sv.display()
mInt[0] = m_sv.getValue(0)
mInt[1] = m_sv.getValue(1)
mInt[2] = m_sv.getValue(2)
print("mInt", mInt)
rotationVector_init= SiconosVector(3)
rotationVector_init.zero()
rotationVector_init.setValue(0,0.3)
x=SiconosVector(7)
quaternionFromRotationVector(rotationVector_init,x)
#x = [0, 0, 0, 1.0, 0, 0, 0] # initial configuration
v = [0, 0, 0, 0, 0, 50] # initial velocity
heavytop = HeavyTop(x, v)
ds = unstableRotation
#ds = heavytop
ds.display()
# test swig director
# ds.computeMInt(1,x,v)
# ds._mInt.display()
# m=SiconosVector(3)
# ds.computeMInt(1,x,v,m)
# m.display()
# m=np.zeros(3)
# ds.computeMInt(1,x,v,m)
# print m
# raw_input()
# Non-Smooth Dynamical System
#
nsds = NonSmoothDynamicalSystem(t0, T)
# add the dynamical system to the non smooth dynamical system
nsds.insertDynamicalSystem(ds)
#
# Simulation
#
# (1) OneStepIntegrators
OSI = MoreauJeanOSI(theta)
# (2) Time discretisation --
t = TimeDiscretisation(t0, h)
# (3) one step non smooth problem
osnspb = LCP()
# (4) Simulation setup with (1) (2) (3)
s = TimeStepping(nsds, t, OSI, osnspb)
#s.setDisplayNewtonConvergence(True)
s.setNewtonTolerance(1e-10)
#s.setNewtonMaxIteration(1)
# end of model definition
#
# computation
#
# Get the values to be plotted
# ->saved in a matrix dataPlot
dataPlot = np.empty((N+1, 26))
#
# numpy pointers on dense Siconos vectors
#
q = ds.q()
v = ds.twist()
p = ds.p(1)
#
# initial data
#
k=0
dataPlot[k, 1] = q[0]
dataPlot[k, 2] = q[1]
dataPlot[k, 3] = q[2]
dataPlot[k, 4] = q[3]
dataPlot[k, 5] = q[4]
dataPlot[k, 6] = q[5]
dataPlot[k, 7] = q[6]
dataPlot[k, 8] = v[0]
dataPlot[k, 9] = v[1]
dataPlot[k, 10] = v[2]
dataPlot[k, 11] = v[3]
dataPlot[k, 12] = v[4]
dataPlot[k, 13] = v[5]
omega = v[3:6]
print("omega", omega)
angular_momentum = np.dot(ds.inertia(),omega)
am= SiconosVector(angular_momentum)
changeFrameBodyToAbs(q,am)
dataPlot[k, 14] = am.getValue(0)
dataPlot[k, 15] = am.getValue(1)
dataPlot[k, 16] = am.getValue(2)
dataPlot[k, 17] = am.norm2()
rotationVector = SiconosVector(3)
rotationVectorFromQuaternion(q[3],q[4],q[5],q[6], rotationVector)
dataPlot[k, 18] = rotationVector.getValue(0)
dataPlot[k, 19] = rotationVector.getValue(1)
dataPlot[k, 20] = rotationVector.getValue(2)
dataPlot[k, 22] = h* omega[0]
dataPlot[k, 23] = h* omega[1]
dataPlot[k, 24] = h* omega[2]
dataPlot[k, 25] = np.linalg.norm(h*omega)
k = 1
# time loop
while(s.hasNextEvent() and k < N):
print(' ' )
print (
'------- k = ',
k,
'-----------------------------------------')
print(' ' )
s.computeOneStep()
dataPlot[k, 0] = s.nextTime()
dataPlot[k, 1] = q[0]
dataPlot[k, 2] = q[1]
dataPlot[k, 3] = q[2]
dataPlot[k, 4] = q[3]
dataPlot[k, 5] = q[4]
dataPlot[k, 6] = q[5]
dataPlot[k, 7] = q[6]
dataPlot[k, 8] = v[0]
dataPlot[k, 9] = v[1]
dataPlot[k, 10] = v[2]
dataPlot[k, 11] = v[3]
dataPlot[k, 12] = v[4]
dataPlot[k, 13] = v[5]
omega = v[3:6]
angular_momentum = np.dot(ds.inertia(),omega)
am= SiconosVector(angular_momentum)
changeFrameBodyToAbs(q,am)
a = np.zeros(1)
a[0] = am.getValue(0)
#a[1] = am.getValue(1)
# print "omega", omega
# print "angular_momentum", angular_momentum,
# print "q=", q
# print " norm(a[1:2])", np.linalg.norm(a)
#raw_input()
dataPlot[k, 14] = am.getValue(0)
dataPlot[k, 15] = am.getValue(1)
dataPlot[k, 16] = am.getValue(2)
dataPlot[k, 17] = am.norm2()
rotationVector = SiconosVector(3)
rotationVectorFromQuaternion(q[3],q[4],q[5],q[6], rotationVector)
dataPlot[k, 18] = rotationVector.getValue(0)
dataPlot[k, 19] = rotationVector.getValue(1)
dataPlot[k, 20] = rotationVector.getValue(2)
dataPlot[k, 22] = h* omega[0]
dataPlot[k, 23] = h* omega[1]
dataPlot[k, 24] = h* omega[2]
dataPlot[k, 25] = np.linalg.norm(h*omega)
k = k + 1
s.nextStep()
dataPlot=np.resize(dataPlot,(k-2,26))
np.savetxt("result-py.dat", dataPlot)
#
# comparison with the reference file
#
from siconos.kernel import SimpleMatrix, getMatrix
#
# plots
#
from matplotlib.pyplot import subplot, title, plot, grid, show, figure
figure(num='Moreau Jean Siconos', figsize=(12, 12))
subplot(321)
title('angular velocities Omega')
plot(dataPlot[:, 0], dataPlot[:, 11])
plot(dataPlot[:, 0], dataPlot[:, 12])
#plot(dataPlot[:, 0], dataPlot[:, 13])
subplot(322)
title('rotation vector')
plot(dataPlot[:, 0], dataPlot[:, 18])
plot(dataPlot[:, 0], dataPlot[:, 19])
plot(dataPlot[:, 0], dataPlot[:, 20])
subplot(323)
title('Theta (h Omega)')
plot(dataPlot[:, 0], dataPlot[:, 22])
plot(dataPlot[:, 0], dataPlot[:, 23])
plot(dataPlot[:, 0], dataPlot[:, 24])
subplot(325)
title('norm of Theta')
plot(dataPlot[:, 0], dataPlot[:, 25])
subplot(324)
title('angular momentum (pi[0])')
plot(dataPlot[:, 0], dataPlot[:, 14])
#plot(dataPlot[:, 0], dataPlot[:, 15])
#plot(dataPlot[:, 0], dataPlot[:, 16])
subplot(326)
title('norm of angular momentum pi')
plot(dataPlot[:, 0], dataPlot[:, 17])
grid()
show()
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import unittest
from datetime import datetime
from itsdangerous import URLSafeSerializer
from parameterized import parameterized
from airflow import DAG
from airflow.api_connexion.exceptions import EXCEPTIONS_LINK_MAP
from airflow.configuration import conf
from airflow.models import DagBag, DagModel
from airflow.models.serialized_dag import SerializedDagModel
from airflow.operators.dummy_operator import DummyOperator
from airflow.security import permissions
from airflow.utils.session import provide_session
from airflow.www import app
from tests.test_utils.api_connexion_utils import assert_401, create_user, delete_user
from tests.test_utils.config import conf_vars
from tests.test_utils.db import clear_db_dags, clear_db_runs, clear_db_serialized_dags
SERIALIZER = URLSafeSerializer(conf.get('webserver', 'secret_key'))
FILE_TOKEN = SERIALIZER.dumps(__file__)
class TestDagEndpoint(unittest.TestCase):
dag_id = "test_dag"
task_id = "op1"
dag2_id = "test_dag2"
@staticmethod
def clean_db():
clear_db_runs()
clear_db_dags()
clear_db_serialized_dags()
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
with conf_vars({("api", "auth_backend"): "tests.test_utils.remote_user_api_auth_backend"}):
cls.app = app.create_app(testing=True) # type:ignore
create_user(
cls.app, # type: ignore
username="test",
role_name="Test",
permissions=[
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_DAG),
],
)
create_user(cls.app, username="test_no_permissions", role_name="TestNoPermissions") # type: ignore
create_user(
cls.app, username="test_granular_permissions", role_name="TestGranularDag" # type: ignore
)
cls.app.appbuilder.sm.sync_perm_for_dag( # type: ignore # pylint: disable=no-member
"TEST_DAG_1",
access_control={'TestGranularDag': [permissions.ACTION_CAN_EDIT, permissions.ACTION_CAN_READ]},
)
with DAG(cls.dag_id, start_date=datetime(2020, 6, 15), doc_md="details") as dag:
DummyOperator(task_id=cls.task_id)
with DAG(cls.dag2_id, start_date=datetime(2020, 6, 15)) as dag2: # no doc_md
DummyOperator(task_id=cls.task_id)
cls.dag = dag # type:ignore
cls.dag2 = dag2 # type: ignore
dag_bag = DagBag(os.devnull, include_examples=False)
dag_bag.dags = {dag.dag_id: dag, dag2.dag_id: dag2}
cls.app.dag_bag = dag_bag # type:ignore
@classmethod
def tearDownClass(cls) -> None:
delete_user(cls.app, username="test") # type: ignore
delete_user(cls.app, username="test_no_permissions") # type: ignore
delete_user(cls.app, username="test_granular_permissions") # type: ignore
def setUp(self) -> None:
self.clean_db()
self.client = self.app.test_client() # type:ignore
def tearDown(self) -> None:
self.clean_db()
@provide_session
def _create_dag_models(self, count, session=None):
for num in range(1, count + 1):
dag_model = DagModel(
dag_id=f"TEST_DAG_{num}",
fileloc=f"/tmp/dag_{num}.py",
schedule_interval="2 2 * * *",
)
session.add(dag_model)
class TestGetDag(TestDagEndpoint):
@conf_vars({("webserver", "secret_key"): "mysecret"})
def test_should_respond_200(self):
self._create_dag_models(1)
response = self.client.get("/api/v1/dags/TEST_DAG_1", environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 200
self.assertEqual(
{
"dag_id": "TEST_DAG_1",
"description": None,
"fileloc": "/tmp/dag_1.py",
"file_token": 'Ii90bXAvZGFnXzEucHki.EnmIdPaUPo26lHQClbWMbDFD1Pk',
"is_paused": False,
"is_subdag": False,
"owners": [],
"root_dag_id": None,
"schedule_interval": {"__type": "CronExpression", "value": "2 2 * * *"},
"tags": [],
},
response.json,
)
@conf_vars({("webserver", "secret_key"): "mysecret"})
@provide_session
def test_should_respond_200_with_schedule_interval_none(self, session=None):
dag_model = DagModel(
dag_id="TEST_DAG_1",
fileloc="/tmp/dag_1.py",
schedule_interval=None,
)
session.add(dag_model)
session.commit()
response = self.client.get("/api/v1/dags/TEST_DAG_1", environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 200
self.assertEqual(
{
"dag_id": "TEST_DAG_1",
"description": None,
"fileloc": "/tmp/dag_1.py",
"file_token": 'Ii90bXAvZGFnXzEucHki.EnmIdPaUPo26lHQClbWMbDFD1Pk',
"is_paused": False,
"is_subdag": False,
"owners": [],
"root_dag_id": None,
"schedule_interval": None,
"tags": [],
},
response.json,
)
def test_should_respond_200_with_granular_dag_access(self):
self._create_dag_models(1)
response = self.client.get(
"/api/v1/dags/TEST_DAG_1", environ_overrides={'REMOTE_USER': "test_granular_permissions"}
)
assert response.status_code == 200
def test_should_respond_404(self):
response = self.client.get("/api/v1/dags/INVALID_DAG", environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 404
def test_should_raises_401_unauthenticated(self):
self._create_dag_models(1)
response = self.client.get("/api/v1/dags/TEST_DAG_1")
assert_401(response)
def test_should_raise_403_forbidden(self):
response = self.client.get(
f"/api/v1/dags/{self.dag_id}/details", environ_overrides={'REMOTE_USER': "test_no_permissions"}
)
assert response.status_code == 403
def test_should_respond_403_with_granular_access_for_different_dag(self):
self._create_dag_models(3)
response = self.client.get(
"/api/v1/dags/TEST_DAG_2", environ_overrides={'REMOTE_USER': "test_granular_permissions"}
)
assert response.status_code == 403
class TestGetDagDetails(TestDagEndpoint):
def test_should_respond_200(self):
response = self.client.get(
f"/api/v1/dags/{self.dag_id}/details", environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 200
expected = {
"catchup": True,
"concurrency": 16,
"dag_id": "test_dag",
"dag_run_timeout": None,
"default_view": "tree",
"description": None,
"doc_md": "details",
"fileloc": __file__,
"file_token": FILE_TOKEN,
"is_paused": None,
"is_subdag": False,
"orientation": "LR",
"owners": [],
"schedule_interval": {
"__type": "TimeDelta",
"days": 1,
"microseconds": 0,
"seconds": 0,
},
"start_date": "2020-06-15T00:00:00+00:00",
"tags": None,
"timezone": "Timezone('UTC')",
}
assert response.json == expected
def test_should_response_200_with_doc_md_none(self):
response = self.client.get(
f"/api/v1/dags/{self.dag2_id}/details", environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 200
expected = {
"catchup": True,
"concurrency": 16,
"dag_id": "test_dag2",
"dag_run_timeout": None,
"default_view": "tree",
"description": None,
"doc_md": None,
"fileloc": __file__,
"file_token": FILE_TOKEN,
"is_paused": None,
"is_subdag": False,
"orientation": "LR",
"owners": [],
"schedule_interval": {
"__type": "TimeDelta",
"days": 1,
"microseconds": 0,
"seconds": 0,
},
"start_date": "2020-06-15T00:00:00+00:00",
"tags": None,
"timezone": "Timezone('UTC')",
}
assert response.json == expected
def test_should_respond_200_serialized(self):
# Create empty app with empty dagbag to check if DAG is read from db
with conf_vars({("api", "auth_backend"): "tests.test_utils.remote_user_api_auth_backend"}):
app_serialized = app.create_app(testing=True)
dag_bag = DagBag(os.devnull, include_examples=False, read_dags_from_db=True)
app_serialized.dag_bag = dag_bag
client = app_serialized.test_client()
SerializedDagModel.write_dag(self.dag)
expected = {
"catchup": True,
"concurrency": 16,
"dag_id": "test_dag",
"dag_run_timeout": None,
"default_view": "tree",
"description": None,
"doc_md": "details",
"fileloc": __file__,
"file_token": FILE_TOKEN,
"is_paused": None,
"is_subdag": False,
"orientation": "LR",
"owners": [],
"schedule_interval": {
"__type": "TimeDelta",
"days": 1,
"microseconds": 0,
"seconds": 0,
},
"start_date": "2020-06-15T00:00:00+00:00",
"tags": None,
"timezone": "Timezone('UTC')",
}
response = client.get(
f"/api/v1/dags/{self.dag_id}/details", environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 200
assert response.json == expected
response = self.client.get(
f"/api/v1/dags/{self.dag_id}/details", environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 200
expected = {
'catchup': True,
'concurrency': 16,
'dag_id': 'test_dag',
'dag_run_timeout': None,
'default_view': 'tree',
'description': None,
'doc_md': 'details',
'fileloc': __file__,
"file_token": FILE_TOKEN,
'is_paused': None,
'is_subdag': False,
'orientation': 'LR',
'owners': [],
'schedule_interval': {'__type': 'TimeDelta', 'days': 1, 'microseconds': 0, 'seconds': 0},
'start_date': '2020-06-15T00:00:00+00:00',
'tags': None,
'timezone': "Timezone('UTC')",
}
assert response.json == expected
def test_should_raises_401_unauthenticated(self):
response = self.client.get(f"/api/v1/dags/{self.dag_id}/details")
assert_401(response)
def test_should_raise_404_when_dag_is_not_found(self):
response = self.client.get(
"/api/v1/dags/non_existing_dag_id/details", environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 404
self.assertEqual(
response.json,
{
'detail': 'The DAG with dag_id: non_existing_dag_id was not found',
'status': 404,
'title': 'DAG not found',
'type': EXCEPTIONS_LINK_MAP[404],
},
)
class TestGetDags(TestDagEndpoint):
def test_should_respond_200(self):
self._create_dag_models(2)
response = self.client.get("api/v1/dags", environ_overrides={'REMOTE_USER': "test"})
file_token = SERIALIZER.dumps("/tmp/dag_1.py")
file_token2 = SERIALIZER.dumps("/tmp/dag_2.py")
assert response.status_code == 200
self.assertEqual(
{
"dags": [
{
"dag_id": "TEST_DAG_1",
"description": None,
"fileloc": "/tmp/dag_1.py",
"file_token": file_token,
"is_paused": False,
"is_subdag": False,
"owners": [],
"root_dag_id": None,
"schedule_interval": {
"__type": "CronExpression",
"value": "2 2 * * *",
},
"tags": [],
},
{
"dag_id": "TEST_DAG_2",
"description": None,
"fileloc": "/tmp/dag_2.py",
"file_token": file_token2,
"is_paused": False,
"is_subdag": False,
"owners": [],
"root_dag_id": None,
"schedule_interval": {
"__type": "CronExpression",
"value": "2 2 * * *",
},
"tags": [],
},
],
"total_entries": 2,
},
response.json,
)
def test_should_respond_200_with_granular_dag_access(self):
self._create_dag_models(3)
response = self.client.get(
"/api/v1/dags", environ_overrides={'REMOTE_USER': "test_granular_permissions"}
)
assert response.status_code == 200
assert len(response.json['dags']) == 1
assert response.json['dags'][0]['dag_id'] == 'TEST_DAG_1'
@parameterized.expand(
[
("api/v1/dags?limit=1", ["TEST_DAG_1"]),
("api/v1/dags?limit=2", ["TEST_DAG_1", "TEST_DAG_10"]),
(
"api/v1/dags?offset=5",
["TEST_DAG_5", "TEST_DAG_6", "TEST_DAG_7", "TEST_DAG_8", "TEST_DAG_9"],
),
(
"api/v1/dags?offset=0",
[
"TEST_DAG_1",
"TEST_DAG_10",
"TEST_DAG_2",
"TEST_DAG_3",
"TEST_DAG_4",
"TEST_DAG_5",
"TEST_DAG_6",
"TEST_DAG_7",
"TEST_DAG_8",
"TEST_DAG_9",
],
),
("api/v1/dags?limit=1&offset=5", ["TEST_DAG_5"]),
("api/v1/dags?limit=1&offset=1", ["TEST_DAG_10"]),
("api/v1/dags?limit=2&offset=2", ["TEST_DAG_2", "TEST_DAG_3"]),
]
)
def test_should_respond_200_and_handle_pagination(self, url, expected_dag_ids):
self._create_dag_models(10)
response = self.client.get(url, environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 200
dag_ids = [dag["dag_id"] for dag in response.json["dags"]]
self.assertEqual(expected_dag_ids, dag_ids)
self.assertEqual(10, response.json["total_entries"])
def test_should_respond_200_default_limit(self):
self._create_dag_models(101)
response = self.client.get("api/v1/dags", environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 200
self.assertEqual(100, len(response.json["dags"]))
self.assertEqual(101, response.json["total_entries"])
def test_should_raises_401_unauthenticated(self):
response = self.client.get("api/v1/dags")
assert_401(response)
def test_should_respond_403_unauthorized(self):
self._create_dag_models(1)
response = self.client.get("api/v1/dags", environ_overrides={'REMOTE_USER': "test_no_permissions"})
assert response.status_code == 403
class TestPatchDag(TestDagEndpoint):
file_token = SERIALIZER.dumps("/tmp/dag_1.py")
def test_should_respond_200_on_patch_is_paused(self):
dag_model = self._create_dag_model()
response = self.client.patch(
f"/api/v1/dags/{dag_model.dag_id}",
json={
"is_paused": False,
},
environ_overrides={'REMOTE_USER': "test"},
)
self.assertEqual(response.status_code, 200)
expected_response = {
"dag_id": "TEST_DAG_1",
"description": None,
"fileloc": "/tmp/dag_1.py",
"file_token": self.file_token,
"is_paused": False,
"is_subdag": False,
"owners": [],
"root_dag_id": None,
"schedule_interval": {
"__type": "CronExpression",
"value": "2 2 * * *",
},
"tags": [],
}
self.assertEqual(response.json, expected_response)
def test_should_respond_200_on_patch_with_granular_dag_access(self):
self._create_dag_models(1)
response = self.client.patch(
"/api/v1/dags/TEST_DAG_1",
json={
"is_paused": False,
},
environ_overrides={'REMOTE_USER': "test_granular_permissions"},
)
assert response.status_code == 200
def test_should_respond_400_on_invalid_request(self):
patch_body = {
"is_paused": True,
"schedule_interval": {
"__type": "CronExpression",
"value": "1 1 * * *",
},
}
dag_model = self._create_dag_model()
response = self.client.patch(f"/api/v1/dags/{dag_model.dag_id}", json=patch_body)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json,
{
'detail': "Property is read-only - 'schedule_interval'",
'status': 400,
'title': 'Bad Request',
'type': EXCEPTIONS_LINK_MAP[400],
},
)
def test_should_respond_404(self):
response = self.client.get("/api/v1/dags/INVALID_DAG", environ_overrides={'REMOTE_USER': "test"})
self.assertEqual(response.status_code, 404)
@provide_session
def _create_dag_model(self, session=None):
dag_model = DagModel(
dag_id="TEST_DAG_1", fileloc="/tmp/dag_1.py", schedule_interval="2 2 * * *", is_paused=True
)
session.add(dag_model)
return dag_model
def test_should_raises_401_unauthenticated(self):
dag_model = self._create_dag_model()
response = self.client.patch(
f"/api/v1/dags/{dag_model.dag_id}",
json={
"is_paused": False,
},
)
assert_401(response)
def test_should_respond_200_with_update_mask(self):
dag_model = self._create_dag_model()
payload = {
"is_paused": False,
}
response = self.client.patch(
f"/api/v1/dags/{dag_model.dag_id}?update_mask=is_paused",
json=payload,
environ_overrides={'REMOTE_USER': "test"},
)
self.assertEqual(response.status_code, 200)
expected_response = {
"dag_id": "TEST_DAG_1",
"description": None,
"fileloc": "/tmp/dag_1.py",
"file_token": self.file_token,
"is_paused": False,
"is_subdag": False,
"owners": [],
"root_dag_id": None,
"schedule_interval": {
"__type": "CronExpression",
"value": "2 2 * * *",
},
"tags": [],
}
self.assertEqual(response.json, expected_response)
@parameterized.expand(
[
(
{
"is_paused": True,
},
"update_mask=description",
"Only `is_paused` field can be updated through the REST API",
),
(
{
"is_paused": True,
},
"update_mask=schedule_interval, description",
"Only `is_paused` field can be updated through the REST API",
),
]
)
def test_should_respond_400_for_invalid_fields_in_update_mask(self, payload, update_mask, error_message):
dag_model = self._create_dag_model()
response = self.client.patch(
f"/api/v1/dags/{dag_model.dag_id}?{update_mask}",
json=payload,
environ_overrides={'REMOTE_USER': "test"},
)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.json['detail'], error_message)
def test_should_respond_403_unauthorized(self):
dag_model = self._create_dag_model()
response = self.client.patch(
f"/api/v1/dags/{dag_model.dag_id}",
json={
"is_paused": False,
},
environ_overrides={'REMOTE_USER': "test_no_permissions"},
)
assert response.status_code == 403
|
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import logging
import urlparse
from tempest_lib.common.utils import data_utils
from tempest_lib import decorators
from tempest_lib import exceptions as lib_exc
import testtools
from tempest.api.compute import base
from tempest.common.utils.linux import remote_client
from tempest import config
from tempest import test
CONF = config.CONF
LOG = logging.getLogger(__name__)
class ServerActionsTestJSON(base.BaseV2ComputeTest):
run_ssh = CONF.compute.run_ssh
def setUp(self):
# NOTE(afazekas): Normally we use the same server with all test cases,
# but if it has an issue, we build a new one
super(ServerActionsTestJSON, self).setUp()
# Check if the server is in a clean state after test
try:
self.client.wait_for_server_status(self.server_id, 'ACTIVE')
except Exception:
# Rebuild server if something happened to it during a test
self.__class__.server_id = self.rebuild_server(self.server_id)
def tearDown(self):
server = self.client.get_server(self.server_id)
self.assertEqual(self.image_ref, server['image']['id'])
self.server_check_teardown()
super(ServerActionsTestJSON, self).tearDown()
@classmethod
def setup_clients(cls):
super(ServerActionsTestJSON, cls).setup_clients()
cls.client = cls.servers_client
@classmethod
def resource_setup(cls):
cls.prepare_instance_network()
super(ServerActionsTestJSON, cls).resource_setup()
cls.server_id = cls.rebuild_server(None)
@test.idempotent_id('6158df09-4b82-4ab3-af6d-29cf36af858d')
@testtools.skipUnless(CONF.compute_feature_enabled.change_password,
'Change password not available.')
@test.attr(type='gate')
def test_change_server_password(self):
# The server's password should be set to the provided password
new_password = 'Newpass1234'
self.client.change_password(self.server_id, new_password)
self.client.wait_for_server_status(self.server_id, 'ACTIVE')
if self.run_ssh:
# Verify that the user can authenticate with the new password
server = self.client.get_server(self.server_id)
linux_client = remote_client.RemoteClient(server, self.ssh_user,
new_password)
linux_client.validate_authentication()
def _test_reboot_server(self, reboot_type):
if self.run_ssh:
# Get the time the server was last rebooted,
server = self.client.get_server(self.server_id)
linux_client = remote_client.RemoteClient(server, self.ssh_user,
self.password)
boot_time = linux_client.get_boot_time()
self.client.reboot(self.server_id, reboot_type)
self.client.wait_for_server_status(self.server_id, 'ACTIVE')
if self.run_ssh:
# Log in and verify the boot time has changed
linux_client = remote_client.RemoteClient(server, self.ssh_user,
self.password)
new_boot_time = linux_client.get_boot_time()
self.assertTrue(new_boot_time > boot_time,
'%s > %s' % (new_boot_time, boot_time))
@test.attr(type='smoke')
@test.idempotent_id('2cb1baf6-ac8d-4429-bf0d-ba8a0ba53e32')
def test_reboot_server_hard(self):
# The server should be power cycled
self._test_reboot_server('HARD')
@decorators.skip_because(bug="1014647")
@test.attr(type='smoke')
@test.idempotent_id('4640e3ef-a5df-482e-95a1-ceeeb0faa84d')
def test_reboot_server_soft(self):
# The server should be signaled to reboot gracefully
self._test_reboot_server('SOFT')
@test.attr(type='smoke')
@test.idempotent_id('aaa6cdf3-55a7-461a-add9-1c8596b9a07c')
def test_rebuild_server(self):
# The server should be rebuilt using the provided image and data
meta = {'rebuild': 'server'}
new_name = data_utils.rand_name('server')
file_contents = 'Test server rebuild.'
personality = [{'path': 'rebuild.txt',
'contents': base64.b64encode(file_contents)}]
password = 'rebuildPassw0rd'
rebuilt_server = self.client.rebuild(self.server_id,
self.image_ref_alt,
name=new_name,
metadata=meta,
personality=personality,
adminPass=password)
# Verify the properties in the initial response are correct
self.assertEqual(self.server_id, rebuilt_server['id'])
rebuilt_image_id = rebuilt_server['image']['id']
self.assertTrue(self.image_ref_alt.endswith(rebuilt_image_id))
self.assertEqual(self.flavor_ref, rebuilt_server['flavor']['id'])
# Verify the server properties after the rebuild completes
self.client.wait_for_server_status(rebuilt_server['id'], 'ACTIVE')
server = self.client.get_server(rebuilt_server['id'])
rebuilt_image_id = server['image']['id']
self.assertTrue(self.image_ref_alt.endswith(rebuilt_image_id))
self.assertEqual(new_name, server['name'])
if self.run_ssh:
# Verify that the user can authenticate with the provided password
linux_client = remote_client.RemoteClient(server, self.ssh_user,
password)
linux_client.validate_authentication()
if self.image_ref_alt != self.image_ref:
self.client.rebuild(self.server_id, self.image_ref)
@test.attr(type='gate')
@test.idempotent_id('30449a88-5aff-4f9b-9866-6ee9b17f906d')
def test_rebuild_server_in_stop_state(self):
# The server in stop state should be rebuilt using the provided
# image and remain in SHUTOFF state
server = self.client.get_server(self.server_id)
old_image = server['image']['id']
new_image = self.image_ref_alt \
if old_image == self.image_ref else self.image_ref
self.client.stop(self.server_id)
self.client.wait_for_server_status(self.server_id, 'SHUTOFF')
rebuilt_server = self.client.rebuild(self.server_id, new_image)
# Verify the properties in the initial response are correct
self.assertEqual(self.server_id, rebuilt_server['id'])
rebuilt_image_id = rebuilt_server['image']['id']
self.assertEqual(new_image, rebuilt_image_id)
self.assertEqual(self.flavor_ref, rebuilt_server['flavor']['id'])
# Verify the server properties after the rebuild completes
self.client.wait_for_server_status(rebuilt_server['id'], 'SHUTOFF')
server = self.client.get_server(rebuilt_server['id'])
rebuilt_image_id = server['image']['id']
self.assertEqual(new_image, rebuilt_image_id)
# Restore to the original image (The tearDown will test it again)
if self.image_ref_alt != self.image_ref:
self.client.rebuild(self.server_id, old_image)
self.client.wait_for_server_status(self.server_id, 'SHUTOFF')
self.client.start(self.server_id)
def _detect_server_image_flavor(self, server_id):
# Detects the current server image flavor ref.
server = self.client.get_server(server_id)
current_flavor = server['flavor']['id']
new_flavor_ref = self.flavor_ref_alt \
if current_flavor == self.flavor_ref else self.flavor_ref
return current_flavor, new_flavor_ref
def _test_resize_server_confirm(self, stop=False):
# The server's RAM and disk space should be modified to that of
# the provided flavor
previous_flavor_ref, new_flavor_ref = \
self._detect_server_image_flavor(self.server_id)
if stop:
self.servers_client.stop(self.server_id)
self.servers_client.wait_for_server_status(self.server_id,
'SHUTOFF')
self.client.resize(self.server_id, new_flavor_ref)
self.client.wait_for_server_status(self.server_id, 'VERIFY_RESIZE')
self.client.confirm_resize(self.server_id)
expected_status = 'SHUTOFF' if stop else 'ACTIVE'
self.client.wait_for_server_status(self.server_id, expected_status)
server = self.client.get_server(self.server_id)
self.assertEqual(new_flavor_ref, server['flavor']['id'])
if stop:
# NOTE(mriedem): tearDown requires the server to be started.
self.client.start(self.server_id)
@test.idempotent_id('1499262a-9328-4eda-9068-db1ac57498d2')
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
@test.attr(type='smoke')
def test_resize_server_confirm(self):
self._test_resize_server_confirm(stop=False)
@test.idempotent_id('138b131d-66df-48c9-a171-64f45eb92962')
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
@test.attr(type='smoke')
def test_resize_server_confirm_from_stopped(self):
self._test_resize_server_confirm(stop=True)
@test.idempotent_id('c03aab19-adb1-44f5-917d-c419577e9e68')
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
@test.attr(type='gate')
def test_resize_server_revert(self):
# The server's RAM and disk space should return to its original
# values after a resize is reverted
previous_flavor_ref, new_flavor_ref = \
self._detect_server_image_flavor(self.server_id)
self.client.resize(self.server_id, new_flavor_ref)
self.client.wait_for_server_status(self.server_id, 'VERIFY_RESIZE')
self.client.revert_resize(self.server_id)
self.client.wait_for_server_status(self.server_id, 'ACTIVE')
server = self.client.get_server(self.server_id)
self.assertEqual(previous_flavor_ref, server['flavor']['id'])
@test.idempotent_id('b963d4f1-94b3-4c40-9e97-7b583f46e470')
@testtools.skipUnless(CONF.compute_feature_enabled.snapshot,
'Snapshotting not available, backup not possible.')
@test.attr(type='gate')
@test.services('image')
def test_create_backup(self):
# Positive test:create backup successfully and rotate backups correctly
# create the first and the second backup
backup1 = data_utils.rand_name('backup-1')
resp = self.servers_client.create_backup(self.server_id,
'daily',
2,
backup1).response
oldest_backup_exist = True
# the oldest one should be deleted automatically in this test
def _clean_oldest_backup(oldest_backup):
if oldest_backup_exist:
try:
self.os.image_client.delete_image(oldest_backup)
except lib_exc.NotFound:
pass
else:
LOG.warning("Deletion of oldest backup %s should not have "
"been successful as it should have been "
"deleted during rotation." % oldest_backup)
image1_id = data_utils.parse_image_id(resp['location'])
self.addCleanup(_clean_oldest_backup, image1_id)
self.os.image_client.wait_for_image_status(image1_id, 'active')
backup2 = data_utils.rand_name('backup-2')
self.servers_client.wait_for_server_status(self.server_id, 'ACTIVE')
resp = self.servers_client.create_backup(self.server_id,
'daily',
2,
backup2).response
image2_id = data_utils.parse_image_id(resp['location'])
self.addCleanup(self.os.image_client.delete_image, image2_id)
self.os.image_client.wait_for_image_status(image2_id, 'active')
# verify they have been created
properties = {
'image_type': 'backup',
'backup_type': "daily",
'instance_uuid': self.server_id,
}
image_list = self.os.image_client.image_list_detail(
properties,
status='active',
sort_key='created_at',
sort_dir='asc')
self.assertEqual(2, len(image_list))
self.assertEqual((backup1, backup2),
(image_list[0]['name'], image_list[1]['name']))
# create the third one, due to the rotation is 2,
# the first one will be deleted
backup3 = data_utils.rand_name('backup-3')
self.servers_client.wait_for_server_status(self.server_id, 'ACTIVE')
resp = self.servers_client.create_backup(self.server_id,
'daily',
2,
backup3).response
image3_id = data_utils.parse_image_id(resp['location'])
self.addCleanup(self.os.image_client.delete_image, image3_id)
# the first back up should be deleted
self.servers_client.wait_for_server_status(self.server_id, 'ACTIVE')
self.os.image_client.wait_for_resource_deletion(image1_id)
oldest_backup_exist = False
image_list = self.os.image_client.image_list_detail(
properties,
status='active',
sort_key='created_at',
sort_dir='asc')
self.assertEqual(2, len(image_list),
'Unexpected number of images for '
'v2:test_create_backup; was the oldest backup not '
'yet deleted? Image list: %s' %
[image['name'] for image in image_list])
self.assertEqual((backup2, backup3),
(image_list[0]['name'], image_list[1]['name']))
def _get_output(self):
output = self.servers_client.get_console_output(
self.server_id, 10).data
self.assertTrue(output, "Console output was empty.")
lines = len(output.split('\n'))
self.assertEqual(lines, 10)
@test.idempotent_id('4b8867e6-fffa-4d54-b1d1-6fdda57be2f3')
@testtools.skipUnless(CONF.compute_feature_enabled.console_output,
'Console output not supported.')
@test.attr(type='gate')
def test_get_console_output(self):
# Positive test:Should be able to GET the console output
# for a given server_id and number of lines
# This reboot is necessary for outputting some console log after
# creating a instance backup. If a instance backup, the console
# log file is truncated and we cannot get any console log through
# "console-log" API.
# The detail is https://bugs.launchpad.net/nova/+bug/1251920
self.servers_client.reboot(self.server_id, 'HARD')
self.servers_client.wait_for_server_status(self.server_id, 'ACTIVE')
self.wait_for(self._get_output)
@test.idempotent_id('89104062-69d8-4b19-a71b-f47b7af093d7')
@testtools.skipUnless(CONF.compute_feature_enabled.console_output,
'Console output not supported.')
@test.attr(type='gate')
def test_get_console_output_with_unlimited_size(self):
server = self.create_test_server(wait_until='ACTIVE')
def _check_full_length_console_log():
output = self.servers_client.get_console_output(server['id'],
None).data
self.assertTrue(output, "Console output was empty.")
lines = len(output.split('\n'))
# NOTE: This test tries to get full length console log, and the
# length should be bigger than the one of test_get_console_output.
self.assertTrue(lines > 10, "Cannot get enough console log length."
" (lines: %s)" % lines)
self.wait_for(_check_full_length_console_log)
@test.idempotent_id('5b65d4e7-4ecd-437c-83c0-d6b79d927568')
@testtools.skipUnless(CONF.compute_feature_enabled.console_output,
'Console output not supported.')
@test.attr(type='gate')
def test_get_console_output_server_id_in_shutoff_status(self):
# Positive test:Should be able to GET the console output
# for a given server_id in SHUTOFF status
# NOTE: SHUTOFF is irregular status. To avoid test instability,
# one server is created only for this test without using
# the server that was created in setupClass.
server = self.create_test_server(wait_until='ACTIVE')
temp_server_id = server['id']
self.servers_client.stop(temp_server_id)
self.servers_client.wait_for_server_status(temp_server_id, 'SHUTOFF')
self.wait_for(self._get_output)
@test.idempotent_id('bd61a9fd-062f-4670-972b-2d6c3e3b9e73')
@testtools.skipUnless(CONF.compute_feature_enabled.pause,
'Pause is not available.')
@test.attr(type='gate')
def test_pause_unpause_server(self):
self.client.pause_server(self.server_id)
self.client.wait_for_server_status(self.server_id, 'PAUSED')
self.client.unpause_server(self.server_id)
self.client.wait_for_server_status(self.server_id, 'ACTIVE')
@test.idempotent_id('0d8ee21e-b749-462d-83da-b85b41c86c7f')
@testtools.skipUnless(CONF.compute_feature_enabled.suspend,
'Suspend is not available.')
@test.attr(type='gate')
def test_suspend_resume_server(self):
self.client.suspend_server(self.server_id)
self.client.wait_for_server_status(self.server_id, 'SUSPENDED')
self.client.resume_server(self.server_id)
self.client.wait_for_server_status(self.server_id, 'ACTIVE')
@test.idempotent_id('77eba8e0-036e-4635-944b-f7a8f3b78dc9')
@testtools.skipUnless(CONF.compute_feature_enabled.shelve,
'Shelve is not available.')
@test.attr(type='gate')
def test_shelve_unshelve_server(self):
self.client.shelve_server(self.server_id)
offload_time = CONF.compute.shelved_offload_time
if offload_time >= 0:
self.client.wait_for_server_status(self.server_id,
'SHELVED_OFFLOADED',
extra_timeout=offload_time)
else:
self.client.wait_for_server_status(self.server_id,
'SHELVED')
self.client.shelve_offload_server(self.server_id)
self.client.wait_for_server_status(self.server_id,
'SHELVED_OFFLOADED')
server = self.client.get_server(self.server_id)
image_name = server['name'] + '-shelved'
params = {'name': image_name}
images = self.images_client.list_images(params)
self.assertEqual(1, len(images))
self.assertEqual(image_name, images[0]['name'])
self.client.unshelve_server(self.server_id)
self.client.wait_for_server_status(self.server_id, 'ACTIVE')
@test.attr(type='gate')
@test.idempotent_id('af8eafd4-38a7-4a4b-bdbc-75145a580560')
def test_stop_start_server(self):
self.servers_client.stop(self.server_id)
self.servers_client.wait_for_server_status(self.server_id, 'SHUTOFF')
self.servers_client.start(self.server_id)
self.servers_client.wait_for_server_status(self.server_id, 'ACTIVE')
@test.attr(type='gate')
@test.idempotent_id('80a8094c-211e-440a-ab88-9e59d556c7ee')
def test_lock_unlock_server(self):
# Lock the server,try server stop(exceptions throw),unlock it and retry
self.servers_client.lock_server(self.server_id)
server = self.servers_client.get_server(self.server_id)
self.assertEqual(server['status'], 'ACTIVE')
# Locked server is not allowed to be stopped by non-admin user
self.assertRaises(lib_exc.Conflict,
self.servers_client.stop, self.server_id)
self.servers_client.unlock_server(self.server_id)
self.servers_client.stop(self.server_id)
self.servers_client.wait_for_server_status(self.server_id, 'SHUTOFF')
self.servers_client.start(self.server_id)
self.servers_client.wait_for_server_status(self.server_id, 'ACTIVE')
def _validate_url(self, url):
valid_scheme = ['http', 'https']
parsed_url = urlparse.urlparse(url)
self.assertNotEqual('None', parsed_url.port)
self.assertNotEqual('None', parsed_url.hostname)
self.assertIn(parsed_url.scheme, valid_scheme)
@test.idempotent_id('c6bc11bf-592e-4015-9319-1c98dc64daf5')
@testtools.skipUnless(CONF.compute_feature_enabled.vnc_console,
'VNC Console feature is disabled.')
@test.attr(type='gate')
def test_get_vnc_console(self):
# Get the VNC console of type 'novnc' and 'xvpvnc'
console_types = ['novnc', 'xvpvnc']
for console_type in console_types:
body = self.servers_client.get_vnc_console(self.server_id,
console_type)
self.assertEqual(console_type, body['type'])
self.assertNotEqual('', body['url'])
self._validate_url(body['url'])
|
|
# -*- coding: utf-8 -*-
# Copyright 2015 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
felix.fetcd
~~~~~~~~~~~~
Etcd polling functions.
"""
from collections import defaultdict
import functools
import random
from socket import timeout as SocketTimeout
import httplib
import json
import logging
from etcd import (EtcdException, EtcdClusterIdChanged, EtcdKeyNotFound,
EtcdEventIndexCleared)
import etcd
import gevent
import sys
from gevent.event import Event
from urllib3 import Timeout
import urllib3.exceptions
from urllib3.exceptions import ReadTimeoutError, ConnectTimeoutError
from calico import common
from calico.common import ValidationFailed, validate_ip_addr, canonicalise_ip
from calico.datamodel_v1 import (VERSION_DIR, READY_KEY, CONFIG_DIR,
RULES_KEY_RE, TAGS_KEY_RE, ENDPOINT_KEY_RE,
dir_for_per_host_config,
PROFILE_DIR, HOST_DIR, EndpointId, POLICY_DIR,
HOST_IP_KEY_RE, IPAM_V4_CIDR_KEY_RE)
from calico.etcdutils import PathDispatcher
from calico.felix.actor import Actor, actor_message
from calico.felix.futils import intern_dict, intern_list, logging_exceptions
_log = logging.getLogger(__name__)
RETRY_DELAY = 5
# Etcd paths that we care about for use with the PathDispatcher class.
# We use angle-brackets to name parameters that we want to capture.
PER_PROFILE_DIR = PROFILE_DIR + "/<profile_id>"
TAGS_KEY = PER_PROFILE_DIR + "/tags"
RULES_KEY = PER_PROFILE_DIR + "/rules"
PER_HOST_DIR = HOST_DIR + "/<hostname>"
HOST_IP_KEY = PER_HOST_DIR + "/bird_ip"
WORKLOAD_DIR = PER_HOST_DIR + "/workload"
PER_ORCH_DIR = WORKLOAD_DIR + "/<orchestrator>"
PER_WORKLOAD_DIR = PER_ORCH_DIR + "/<workload_id>"
ENDPOINT_DIR = PER_WORKLOAD_DIR + "/endpoint"
PER_ENDPOINT_KEY = ENDPOINT_DIR + "/<endpoint_id>"
IPAM_DIR = VERSION_DIR + "/ipam"
IPAM_V4_DIR = IPAM_DIR + "/v4"
POOL_V4_DIR = IPAM_V4_DIR + "/pool"
CIDR_V4_KEY = POOL_V4_DIR + "/<pool_id>"
RESYNC_KEYS = [
VERSION_DIR,
POLICY_DIR,
PROFILE_DIR,
CONFIG_DIR,
HOST_DIR,
IPAM_DIR,
IPAM_V4_DIR,
POOL_V4_DIR,
]
class EtcdAPI(Actor):
"""
Our API to etcd.
Since the python-etcd API is blocking, we defer API watches to
a worker greenlet and communicate with it via Events.
As and when we add status reporting, to avoid needing to interrupt
in-progress polls, I expect we'll want a second worker greenlet that
manages an "upstream" connection to etcd.
"""
def __init__(self, config, hosts_ipset):
super(EtcdAPI, self).__init__()
self._config = config
# Start up the main etcd-watching greenlet. It will wait for an
# event from us before doing anything.
self._watcher = _EtcdWatcher(config, hosts_ipset)
self._watcher.link(self._on_worker_died)
self._watcher.start()
# Start up a greenlet to trigger periodic resyncs.
self._resync_greenlet = gevent.spawn(self._periodically_resync)
self._resync_greenlet.link_exception(self._on_worker_died)
@logging_exceptions
def _periodically_resync(self):
"""
Greenlet: if enabled, periodically triggers a resync from etcd.
"""
_log.info("Started periodic resync thread, waiting for config.")
self._watcher.configured.wait()
interval = self._config.RESYNC_INTERVAL
_log.info("Config loaded, resync interval %s.", interval)
if interval == 0:
_log.info("Interval is 0, periodic resync disabled.")
return
while True:
# Jitter by 20% of interval.
jitter = random.random() * 0.2 * interval
sleep_time = interval + jitter
_log.debug("After jitter, next periodic resync will be in %.1f "
"seconds.", sleep_time)
gevent.sleep(sleep_time)
self.force_resync(reason="periodic resync", async=True)
@actor_message()
def load_config(self):
"""
Loads our config from etcd, should only be called once.
:return: an event which is triggered when the config has been loaded.
"""
self._watcher.load_config.set()
return self._watcher.configured
@actor_message()
def start_watch(self, splitter):
"""
Starts watching etcd for changes. Implicitly loads the config
if it hasn't been loaded yet.
"""
self._watcher.load_config.set()
self._watcher.splitter = splitter
self._watcher.begin_polling.set()
@actor_message()
def force_resync(self, reason="unknown"):
"""
Force a resync from etcd after the current poll completes.
:param str reason: Optional reason to log out.
"""
_log.info("Forcing a resync from etcd. Reason: %s.", reason)
self._watcher.resync_after_current_poll = True
def _on_worker_died(self, watch_greenlet):
"""
Greenlet: spawned by the gevent Hub if the etcd watch loop ever
stops, kills the process.
"""
_log.critical("Worker greenlet died: %s; exiting.", watch_greenlet)
sys.exit(1)
class _EtcdWatcher(gevent.Greenlet):
"""
Greenlet that watches the etcd data model for changes.
(1) Waits for the load_config event to be triggered.
(2) Connects to etcd and waits for the Ready flag to be set,
indicating the data model is consistent.
(3) Loads the config from etcd and passes it to the config object.
(4) Waits for the begin_polling Event to be triggered.
(5) Loads a complete snapshot from etcd and passes it to the
UpdateSplitter.
(6) Watches etcd for changes, sending them incrementally to the
UpdateSplitter.
(On etcd error) starts again from step (5)
This greenlet is expected to be managed by the EtcdAPI Actor.
"""
def __init__(self, config, hosts_ipset):
super(_EtcdWatcher, self).__init__()
self.config = config
self.hosts_ipset = hosts_ipset
# Events triggered by the EtcdAPI Actor to tell us to load the config
# and start polling. These are one-way flags.
self.load_config = Event()
self.begin_polling = Event()
# Flag used to trigger a resync. this is modified from other
# greenlets, which is safe in Python.
self.resync_after_current_poll = False
# Event that we trigger once the config is loaded.
self.configured = Event()
# Etcd client, initialised lazily.
self.client = None
self.my_config_dir = dir_for_per_host_config(self.config.HOSTNAME)
# Polling state initialized at poll start time.
self.splitter = None
self.next_etcd_index = None
# Cache of known endpoints, used to resolve deletions of whole
# directory trees.
self.endpoint_ids_per_host = defaultdict(set)
# Next-hop IP addresses of our hosts, if populated in etcd.
self.ipv4_by_hostname = {}
# Program the dispatcher with the paths we care about. Since etcd
# gives us a single event for a recursive directory deletion, we have
# to handle deletes for lots of directories that we otherwise wouldn't
# care about.
self.dispatcher = PathDispatcher()
reg = self.dispatcher.register
# Top-level directories etc. If these go away, stop polling and
# resync.
for key in RESYNC_KEYS:
reg(key, on_del=self._resync)
reg(READY_KEY, on_set=self.on_ready_flag_set, on_del=self._resync)
# Profiles and their contents.
reg(PER_PROFILE_DIR, on_del=self.on_profile_delete)
reg(TAGS_KEY, on_set=self.on_tags_set, on_del=self.on_tags_delete)
reg(RULES_KEY, on_set=self.on_rules_set, on_del=self.on_rules_delete)
# Hosts, workloads and endpoints.
reg(PER_HOST_DIR, on_del=self.on_host_delete)
reg(HOST_IP_KEY,
on_set=self.on_host_ip_set,
on_del=self.on_host_ip_delete)
reg(WORKLOAD_DIR, on_del=self.on_host_delete)
reg(PER_ORCH_DIR, on_del=self.on_orch_delete)
reg(PER_WORKLOAD_DIR, on_del=self.on_workload_delete)
reg(ENDPOINT_DIR, on_del=self.on_workload_delete)
reg(PER_ENDPOINT_KEY,
on_set=self.on_endpoint_set, on_del=self.on_endpoint_delete)
reg(CIDR_V4_KEY,
on_set=self.on_ipam_v4_pool_set,
on_del=self.on_ipam_v4_pool_delete)
@logging_exceptions
def _run(self):
"""
Greenlet main loop: loads the initial dump from etcd and then
monitors for changes and feeds them to the splitter.
"""
self.load_config.wait()
while True:
_log.info("Reconnecting and loading snapshot from etcd...")
self._reconnect(copy_cluster_id=False)
self._wait_for_ready()
while not self.configured.is_set():
self._load_config()
# Unblock anyone who's waiting on the config.
self.configured.set()
if not self.begin_polling.is_set():
_log.info("etcd worker about to wait for begin_polling event")
self.begin_polling.wait()
try:
# Load initial dump from etcd. First just get all the
# endpoints and profiles by id. The response contains a
# generation ID allowing us to then start polling for updates
# without missing any.
self._load_initial_dump()
while True:
# Wait for something to change.
response = self._wait_for_etcd_event()
self.dispatcher.handle_event(response)
except ResyncRequired:
_log.info("Polling aborted, doing resync.")
def _reconnect(self, copy_cluster_id=True):
etcd_addr = self.config.ETCD_ADDR
if ":" in etcd_addr:
host, port = etcd_addr.split(":")
port = int(port)
else:
host = etcd_addr
port = 4001
if self.client and copy_cluster_id:
old_cluster_id = self.client.expected_cluster_id
_log.info("(Re)connecting to etcd. Old etcd cluster ID was %s.",
old_cluster_id)
else:
_log.info("(Re)connecting to etcd. No previous cluster ID.")
old_cluster_id = None
self.client = etcd.Client(host=host, port=port,
expected_cluster_id=old_cluster_id)
def _wait_for_ready(self):
_log.info("Waiting for etcd to be ready...")
ready = False
while not ready:
try:
db_ready = self.client.read(READY_KEY,
timeout=10).value
except EtcdKeyNotFound:
_log.warn("Ready flag not present in etcd; felix will pause "
"updates until the orchestrator sets the flag.")
db_ready = "false"
except EtcdException as e:
# Note: we don't log the
_log.error("Failed to retrieve ready flag from etcd (%r). "
"Felix will not receive updates until the "
"connection to etcd is restored.", e)
db_ready = "false"
if db_ready == "true":
_log.info("etcd is ready.")
ready = True
else:
_log.info("etcd not ready. Will retry.")
gevent.sleep(RETRY_DELAY)
continue
def _load_config(self):
"""
Loads our start-of-day configuration from etcd. Does not return
until the config is successfully loaded.
"""
while True:
try:
global_cfg = self.client.read(CONFIG_DIR)
global_dict = _build_config_dict(global_cfg)
try:
host_cfg = self.client.read(self.my_config_dir)
host_dict = _build_config_dict(host_cfg)
except EtcdKeyNotFound:
# It is not an error for there to be no per-host
# config; default to empty.
_log.info("No configuration overrides for this node")
host_dict = {}
except (EtcdKeyNotFound, EtcdException) as e:
# Note: we don't log the stack trace because it's too
# spammy and adds little.
_log.error("Failed to read config. etcd may be down or "
"the data model may not be ready: %r. Will "
"retry.", e)
gevent.sleep(RETRY_DELAY)
else:
self.config.report_etcd_config(host_dict, global_dict)
return
def _load_initial_dump(self):
"""
Loads a snapshot from etcd and passes it to the update splitter.
:raises ResyncRequired: if the Ready flag is not set in the snapshot.
"""
initial_dump = self.client.read(VERSION_DIR, recursive=True)
_log.info("Loaded snapshot from etcd cluster %s, parsing it...",
self.client.expected_cluster_id)
rules_by_id = {}
tags_by_id = {}
endpoints_by_id = {}
ipv4_pools_by_id = {}
self.endpoint_ids_per_host.clear()
self.ipv4_by_hostname.clear()
still_ready = False
for child in initial_dump.children:
profile_id, rules = parse_if_rules(child)
if profile_id:
rules_by_id[profile_id] = rules
continue
profile_id, tags = parse_if_tags(child)
if profile_id:
tags_by_id[profile_id] = tags
continue
endpoint_id, endpoint = parse_if_endpoint(self.config, child)
if endpoint_id and endpoint:
endpoints_by_id[endpoint_id] = endpoint
self.endpoint_ids_per_host[endpoint_id.host].add(endpoint_id)
continue
pool_id, pool = parse_if_ipam_v4_pool(child)
if pool_id and pool:
ipv4_pools_by_id[pool_id] = pool
continue
if self.config.IP_IN_IP_ENABLED:
hostname, ip = parse_if_host_ip(child)
if hostname and ip:
self.ipv4_by_hostname[hostname] = ip
continue
# Double-check the flag hasn't changed since we read it before.
if child.key == READY_KEY:
if child.value == "true":
still_ready = True
else:
_log.warning("Aborting resync because ready flag was"
"unset since we read it.")
raise ResyncRequired()
if not still_ready:
_log.warn("Aborting resync; ready flag no longer present.")
raise ResyncRequired()
# Actually apply the snapshot. This does not return anything, but
# just sends the relevant messages to the relevant threads to make
# all the processing occur.
_log.info("Snapshot parsed, passing to update splitter")
self.splitter.apply_snapshot(rules_by_id,
tags_by_id,
endpoints_by_id,
ipv4_pools_by_id,
async=True)
if self.config.IP_IN_IP_ENABLED:
# We only support IPv4 for host tracking right now so there's not
# much point in going via the splitter.
# FIXME Support IP-in-IP for IPv6.
_log.info("Sending (%d) host IPs to ipset.",
len(self.ipv4_by_hostname))
self.hosts_ipset.replace_members(self.ipv4_by_hostname.values(),
async=True)
# The etcd_index is the high-water-mark for the snapshot, record that
# we want to poll starting at the next index.
self.next_etcd_index = initial_dump.etcd_index + 1
def _wait_for_etcd_event(self):
"""
Polls etcd until something changes.
Retries on read timeouts and other non-fatal errors.
:returns: The etcd response object for the change.
:raises ResyncRequired: If we get out of sync with etcd or hit
a fatal error.
"""
response = None
while not response:
if self.resync_after_current_poll:
_log.debug("Told to resync, aborting poll.")
self.resync_after_current_poll = False
raise ResyncRequired()
try:
_log.debug("About to wait for etcd update %s",
self.next_etcd_index)
response = self.client.read(VERSION_DIR,
wait=True,
waitIndex=self.next_etcd_index,
recursive=True,
timeout=Timeout(connect=10,
read=90),
check_cluster_uuid=True)
_log.debug("etcd response: %r", response)
except (ReadTimeoutError, SocketTimeout) as e:
# This is expected when we're doing a poll and nothing
# happened. socket timeout doesn't seem to be caught by
# urllib3 1.7.1. Simply reconnect.
_log.debug("Read from etcd timed out (%r), retrying.", e)
# Force a reconnect to ensure urllib3 doesn't recycle the
# connection. (We were seeing this with urllib3 1.7.1.)
self._reconnect()
except (ConnectTimeoutError,
urllib3.exceptions.HTTPError,
httplib.HTTPException) as e:
# We don't log out the stack trace here because it can spam the
# logs heavily if the requests keep failing. The errors are
# very descriptive anyway.
_log.warning("Low-level HTTP error, reconnecting to "
"etcd: %r.", e)
self._reconnect()
except (EtcdClusterIdChanged, EtcdEventIndexCleared) as e:
_log.warning("Out of sync with etcd (%r). Reconnecting "
"for full sync.", e)
raise ResyncRequired()
except EtcdException as e:
# Sadly, python-etcd doesn't have a dedicated exception
# for the "no more machines in cluster" error. Parse the
# message:
msg = (e.message or "unknown").lower()
# Limit our retry rate in case etcd is down.
gevent.sleep(1)
if "no more machines" in msg:
# This error comes from python-etcd when it can't
# connect to any servers. When we retry, it should
# reconnect.
# TODO: We should probably limit retries here and die
# That'd recover from errors caused by resource
# exhaustion/leaks.
_log.error("Connection to etcd failed, will retry.")
else:
# Assume any other errors are fatal to our poll and
# do a full resync.
_log.exception("Unknown etcd error %r; doing resync.",
e.message)
self._reconnect()
raise ResyncRequired()
except:
_log.exception("Unexpected exception during etcd poll")
raise
# Since we're polling on a subtree, we can't just increment
# the index, we have to look at the modifiedIndex to spot
# if we've skipped a lot of updates.
self.next_etcd_index = max(self.next_etcd_index,
response.modifiedIndex) + 1
return response
def _resync(self, response, **kwargs):
"""
Force a resync.
:raises ResyncRequired: always.
"""
raise ResyncRequired()
def on_ready_flag_set(self, response):
if response.value != "true":
raise ResyncRequired()
def on_endpoint_set(self, response, hostname, orchestrator,
workload_id, endpoint_id):
"""Handler for endpoint updates, passes the update to the splitter."""
combined_id = EndpointId(hostname, orchestrator, workload_id,
endpoint_id)
_log.debug("Endpoint %s updated", combined_id)
self.endpoint_ids_per_host[combined_id.host].add(combined_id)
endpoint = parse_endpoint(self.config, combined_id, response.value)
self.splitter.on_endpoint_update(combined_id, endpoint, async=True)
def on_endpoint_delete(self, response, hostname, orchestrator,
workload_id, endpoint_id):
"""Handler for endpoint deleted, passes the update to the splitter."""
combined_id = EndpointId(hostname, orchestrator, workload_id,
endpoint_id)
_log.debug("Endpoint %s deleted", combined_id)
self.endpoint_ids_per_host[combined_id.host].discard(combined_id)
if not self.endpoint_ids_per_host[combined_id.host]:
del self.endpoint_ids_per_host[combined_id.host]
self.splitter.on_endpoint_update(combined_id, None, async=True)
def on_rules_set(self, response, profile_id):
"""Handler for rules updates, passes the update to the splitter."""
_log.debug("Rules for %s set", profile_id)
rules = parse_rules(profile_id, response.value)
profile_id = intern(profile_id.encode("utf8"))
self.splitter.on_rules_update(profile_id, rules, async=True)
def on_rules_delete(self, response, profile_id):
"""Handler for rules deletes, passes the update to the splitter."""
_log.debug("Rules for %s deleted", profile_id)
self.splitter.on_rules_update(profile_id, None, async=True)
def on_tags_set(self, response, profile_id):
"""Handler for tags updates, passes the update to the splitter."""
_log.debug("Tags for %s set", profile_id)
rules = parse_tags(profile_id, response.value)
profile_id = intern(profile_id.encode("utf8"))
self.splitter.on_tags_update(profile_id, rules, async=True)
def on_tags_delete(self, response, profile_id):
"""Handler for tags deletes, passes the update to the splitter."""
_log.debug("Tags for %s deleted", profile_id)
self.splitter.on_tags_update(profile_id, None, async=True)
def on_profile_delete(self, response, profile_id):
"""
Handler for a whole profile deletion
Fakes a tag and rules delete.
"""
# Fake deletes for the rules and tags.
_log.debug("Whole profile %s deleted", profile_id)
self.splitter.on_rules_update(profile_id, None, async=True)
self.splitter.on_tags_update(profile_id, None, async=True)
def on_host_delete(self, response, hostname):
"""
Handler for deletion of a whole host directory.
Deletes all the contained endpoints.
"""
ids_on_that_host = self.endpoint_ids_per_host.pop(hostname, set())
_log.info("Host %s deleted, removing %d endpoints",
hostname, len(ids_on_that_host))
for endpoint_id in ids_on_that_host:
self.splitter.on_endpoint_update(endpoint_id, None, async=True)
self.on_host_ip_delete(response, hostname)
def on_host_ip_set(self, response, hostname):
if not self.config.IP_IN_IP_ENABLED:
_log.debug("Ignoring update to %s because IP-in-IP is disabled",
response.key)
return
ip = parse_host_ip(hostname, response.value)
if ip:
self.ipv4_by_hostname[hostname] = ip
else:
_log.warning("Invalid IP for hostname %s: %s, treating as "
"deletion", hostname, response.value)
self.ipv4_by_hostname.pop(hostname, None)
self.hosts_ipset.replace_members(self.ipv4_by_hostname.values(),
async=True)
def on_host_ip_delete(self, response, hostname):
if not self.config.IP_IN_IP_ENABLED:
_log.debug("Ignoring update to %s because IP-in-IP is disabled",
response.key)
return
if self.ipv4_by_hostname.pop(hostname, None):
self.hosts_ipset.replace_members(self.ipv4_by_hostname.values(),
async=True)
def on_ipam_v4_pool_set(self, response, pool_id):
pool = parse_ipam_pool(pool_id, response.value)
self.splitter.on_ipam_pool_update(pool_id, pool, async=True)
def on_ipam_v4_pool_delete(self, response, pool_id):
self.splitter.on_ipam_pool_update(pool_id, None, async=True)
def on_orch_delete(self, response, hostname, orchestrator):
"""
Handler for deletion of a whole host orchestrator directory.
Deletes all the contained endpoints.
"""
_log.info("Orchestrator dir %s/%s deleted, removing contained hosts",
hostname, orchestrator)
orchestrator = intern(orchestrator.encode("utf8"))
for endpoint_id in list(self.endpoint_ids_per_host[hostname]):
if endpoint_id.orchestrator == orchestrator:
self.splitter.on_endpoint_update(endpoint_id, None, async=True)
self.endpoint_ids_per_host[hostname].discard(endpoint_id)
if not self.endpoint_ids_per_host[hostname]:
del self.endpoint_ids_per_host[hostname]
def on_workload_delete(self, response, hostname, orchestrator,
workload_id):
"""
Handler for deletion of a whole workload directory.
Deletes all the contained endpoints.
"""
_log.debug("Workload dir %s/%s/%s deleted, removing endpoints",
hostname, orchestrator, workload_id)
orchestrator = intern(orchestrator.encode("utf8"))
workload_id = intern(workload_id.encode("utf8"))
for endpoint_id in list(self.endpoint_ids_per_host[hostname]):
if (endpoint_id.orchestrator == orchestrator and
endpoint_id.workload == workload_id):
self.splitter.on_endpoint_update(endpoint_id, None, async=True)
self.endpoint_ids_per_host[hostname].discard(endpoint_id)
if not self.endpoint_ids_per_host[hostname]:
del self.endpoint_ids_per_host[hostname]
def _build_config_dict(cfg_node):
"""
Updates the config dict provided from the given etcd node, which
should point at a config directory.
"""
config_dict = {}
for child in cfg_node.children:
key = child.key.rsplit("/").pop()
value = str(child.value)
config_dict[key] = value
return config_dict
# Intern JSON keys as we load them to reduce occupancy.
FIELDS_TO_INTERN = set([
# Endpoint dicts. It doesn't seem worth interning items like the MAC
# address or TAP name, which are rarely (if ever) shared.
"profile_id",
"profile_ids",
"state",
"ipv4_gateway",
"ipv6_gateway",
# Rules dicts.
"protocol",
"src_tag",
"dst_tag",
"action",
])
json_decoder = json.JSONDecoder(
object_hook=functools.partial(intern_dict,
fields_to_intern=FIELDS_TO_INTERN)
)
def parse_if_endpoint(config, etcd_node):
m = ENDPOINT_KEY_RE.match(etcd_node.key)
if m:
# Got an endpoint.
host = m.group("hostname")
orch = m.group("orchestrator")
workload_id = m.group("workload_id")
endpoint_id = m.group("endpoint_id")
combined_id = EndpointId(host, orch, workload_id, endpoint_id)
if etcd_node.action == "delete":
_log.debug("Found deleted endpoint %s", endpoint_id)
endpoint = None
else:
endpoint = parse_endpoint(config, combined_id, etcd_node.value)
# EndpointId does the interning for us.
return combined_id, endpoint
return None, None
def parse_endpoint(config, combined_id, raw_json):
endpoint = safe_decode_json(raw_json,
log_tag="endpoint %s" % combined_id.endpoint)
try:
common.validate_endpoint(config, combined_id, endpoint)
except ValidationFailed as e:
_log.warning("Validation failed for endpoint %s, treating as "
"missing: %s", combined_id, e.message)
endpoint = None
else:
_log.debug("Validated endpoint : %s", endpoint)
return endpoint
def parse_if_rules(etcd_node):
m = RULES_KEY_RE.match(etcd_node.key)
if m:
# Got some rules.
profile_id = m.group("profile_id")
if etcd_node.action == "delete":
rules = None
else:
rules = parse_rules(profile_id, etcd_node.value)
return intern(profile_id.encode("utf8")), rules
return None, None
def parse_rules(profile_id, raw_json):
rules = safe_decode_json(raw_json, log_tag="rules %s" % profile_id)
try:
common.validate_rules(profile_id, rules)
except ValidationFailed as e:
_log.exception("Validation failed for profile %s rules: %s; %r",
profile_id, rules, e)
return None
else:
return rules
def parse_if_tags(etcd_node):
m = TAGS_KEY_RE.match(etcd_node.key)
if m:
# Got some tags.
profile_id = m.group("profile_id")
if etcd_node.action == "delete":
tags = None
else:
tags = parse_tags(profile_id, etcd_node.value)
return intern(profile_id.encode("utf8")), tags
return None, None
def parse_tags(profile_id, raw_json):
tags = safe_decode_json(raw_json, log_tag="tags %s" % profile_id)
try:
common.validate_tags(profile_id, tags)
except ValidationFailed:
_log.exception("Validation failed for profile %s tags : %s",
profile_id, tags)
return None
else:
# The tags aren't in a top-level object so we need to manually
# intern them here.
return intern_list(tags)
def parse_if_host_ip(etcd_node):
m = HOST_IP_KEY_RE.match(etcd_node.key)
if m:
# Got some rules.
hostname = m.group("hostname")
if etcd_node.action == "delete":
ip = None
else:
ip = parse_host_ip(hostname, etcd_node.value)
return hostname, ip
return None, None
def parse_host_ip(hostname, raw_value):
if raw_value is None or validate_ip_addr(raw_value):
return canonicalise_ip(raw_value, None)
else:
_log.debug("%s has invalid IP: %r", hostname, raw_value)
return None
def parse_if_ipam_v4_pool(etcd_node):
m = IPAM_V4_CIDR_KEY_RE.match(etcd_node.key)
if m:
# Got some rules.
pool_id = m.group("encoded_cidr")
if etcd_node.action == "delete":
pool = None
else:
pool = parse_ipam_pool(pool_id, etcd_node.value)
return pool_id, pool
return None, None
def parse_ipam_pool(pool_id, raw_json):
pool = safe_decode_json(raw_json, log_tag="ipam pool %s" % pool_id)
try:
common.validate_ipam_pool(pool_id, pool, 4)
except ValidationFailed as e:
_log.exception("Validation failed for ipam pool %s: %s; %r",
pool_id, pool, e)
return None
else:
return pool
def safe_decode_json(raw_json, log_tag=None):
try:
return json_decoder.decode(raw_json)
except (TypeError, ValueError):
_log.warning("Failed to decode JSON for %s: %r. Returning None.",
log_tag, raw_json)
return None
class ResyncRequired(Exception):
pass
|
|
"""Base async HTTP client implementation."""
import sys
from http.client import responses
from vine import Thenable, maybe_promise, promise
from kombu.exceptions import HttpError
from kombu.utils.compat import coro
from kombu.utils.encoding import bytes_to_str
from kombu.utils.functional import maybe_list, memoize
__all__ = ('Headers', 'Response', 'Request')
PYPY = hasattr(sys, 'pypy_version_info')
@memoize(maxsize=1000)
def normalize_header(key):
return '-'.join(p.capitalize() for p in key.split('-'))
class Headers(dict):
"""Represents a mapping of HTTP headers."""
# TODO: This is just a regular dict and will not perform normalization
# when looking up keys etc.
#: Set when all of the headers have been read.
complete = False
#: Internal attribute used to keep track of continuation lines.
_prev_key = None
@Thenable.register
class Request:
"""A HTTP Request.
Arguments:
url (str): The URL to request.
method (str): The HTTP method to use (defaults to ``GET``).
Keyword Arguments:
headers (Dict, ~kombu.asynchronous.http.Headers): Optional headers for
this request
body (str): Optional body for this request.
connect_timeout (float): Connection timeout in float seconds
Default is 30.0.
timeout (float): Time in float seconds before the request times out
Default is 30.0.
follow_redirects (bool): Specify if the client should follow redirects
Enabled by default.
max_redirects (int): Maximum number of redirects (default 6).
use_gzip (bool): Allow the server to use gzip compression.
Enabled by default.
validate_cert (bool): Set to true if the server certificate should be
verified when performing ``https://`` requests.
Enabled by default.
auth_username (str): Username for HTTP authentication.
auth_password (str): Password for HTTP authentication.
auth_mode (str): Type of HTTP authentication (``basic`` or ``digest``).
user_agent (str): Custom user agent for this request.
network_interface (str): Network interface to use for this request.
on_ready (Callable): Callback to be called when the response has been
received. Must accept single ``response`` argument.
on_stream (Callable): Optional callback to be called every time body
content has been read from the socket. If specified then the
response body and buffer attributes will not be available.
on_timeout (callable): Optional callback to be called if the request
times out.
on_header (Callable): Optional callback to be called for every header
line received from the server. The signature
is ``(headers, line)`` and note that if you want
``response.headers`` to be populated then your callback needs to
also call ``client.on_header(headers, line)``.
on_prepare (Callable): Optional callback that is implementation
specific (e.g. curl client will pass the ``curl`` instance to
this callback).
proxy_host (str): Optional proxy host. Note that a ``proxy_port`` must
also be provided or a :exc:`ValueError` will be raised.
proxy_username (str): Optional username to use when logging in
to the proxy.
proxy_password (str): Optional password to use when authenticating
with the proxy server.
ca_certs (str): Custom CA certificates file to use.
client_key (str): Optional filename for client SSL key.
client_cert (str): Optional filename for client SSL certificate.
"""
body = user_agent = network_interface = \
auth_username = auth_password = auth_mode = \
proxy_host = proxy_port = proxy_username = proxy_password = \
ca_certs = client_key = client_cert = None
connect_timeout = 30.0
request_timeout = 30.0
follow_redirects = True
max_redirects = 6
use_gzip = True
validate_cert = True
if not PYPY: # pragma: no cover
__slots__ = ('url', 'method', 'on_ready', 'on_timeout', 'on_stream',
'on_prepare', 'on_header', 'headers',
'__weakref__', '__dict__')
def __init__(self, url, method='GET', on_ready=None, on_timeout=None,
on_stream=None, on_prepare=None, on_header=None,
headers=None, **kwargs):
self.url = url
self.method = method or self.method
self.on_ready = maybe_promise(on_ready) or promise()
self.on_timeout = maybe_promise(on_timeout)
self.on_stream = maybe_promise(on_stream)
self.on_prepare = maybe_promise(on_prepare)
self.on_header = maybe_promise(on_header)
if kwargs:
for k, v in kwargs.items():
setattr(self, k, v)
if not isinstance(headers, Headers):
headers = Headers(headers or {})
self.headers = headers
def then(self, callback, errback=None):
self.on_ready.then(callback, errback)
def __repr__(self):
return '<Request: {0.method} {0.url} {0.body}>'.format(self)
class Response:
"""HTTP Response.
Arguments:
request (~kombu.asynchronous.http.Request): See :attr:`request`.
code (int): See :attr:`code`.
headers (~kombu.asynchronous.http.Headers): See :attr:`headers`.
buffer (bytes): See :attr:`buffer`
effective_url (str): See :attr:`effective_url`.
status (str): See :attr:`status`.
Attributes:
request (~kombu.asynchronous.http.Request): object used to
get this response.
code (int): HTTP response code (e.g. 200, 404, or 500).
headers (~kombu.asynchronous.http.Headers): HTTP headers
for this response.
buffer (bytes): Socket read buffer.
effective_url (str): The destination url for this request after
following redirects.
error (Exception): Error instance if the request resulted in
a HTTP error code.
status (str): Human equivalent of :attr:`code`,
e.g. ``OK``, `Not found`, or 'Internal Server Error'.
"""
if not PYPY: # pragma: no cover
__slots__ = ('request', 'code', 'headers', 'buffer', 'effective_url',
'error', 'status', '_body', '__weakref__')
def __init__(self, request, code, headers=None, buffer=None,
effective_url=None, error=None, status=None):
self.request = request
self.code = code
self.headers = headers if headers is not None else Headers()
self.buffer = buffer
self.effective_url = effective_url or request.url
self._body = None
self.status = status or responses.get(self.code, 'Unknown')
self.error = error
if self.error is None and (self.code < 200 or self.code > 299):
self.error = HttpError(self.code, self.status, self)
def raise_for_error(self):
"""Raise if the request resulted in an HTTP error code.
Raises:
:class:`~kombu.exceptions.HttpError`
"""
if self.error:
raise self.error
@property
def body(self):
"""The full contents of the response body.
Note:
Accessing this property will evaluate the buffer
and subsequent accesses will be cached.
"""
if self._body is None:
if self.buffer is not None:
self._body = self.buffer.getvalue()
return self._body
# these are for compatibility with Requests
@property
def status_code(self):
return self.code
@property
def content(self):
return self.body
@coro
def header_parser(keyt=normalize_header):
while 1:
(line, headers) = yield
if line.startswith('HTTP/'):
continue
elif not line:
headers.complete = True
continue
elif line[0].isspace():
pkey = headers._prev_key
headers[pkey] = ' '.join([headers.get(pkey) or '', line.lstrip()])
else:
key, value = line.split(':', 1)
key = headers._prev_key = keyt(key)
headers[key] = value.strip()
class BaseClient:
Headers = Headers
Request = Request
Response = Response
def __init__(self, hub, **kwargs):
self.hub = hub
self._header_parser = header_parser()
def perform(self, request, **kwargs):
for req in maybe_list(request) or []:
if not isinstance(req, self.Request):
req = self.Request(req, **kwargs)
self.add_request(req)
def add_request(self, request):
raise NotImplementedError('must implement add_request')
def close(self):
pass
def on_header(self, headers, line):
try:
self._header_parser.send((bytes_to_str(line), headers))
except StopIteration:
self._header_parser = header_parser()
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
|
|
## step_stats_watcher.py
## Author: Daniel "Albinohat" Mercado
## This script parses through Stepmania's Stats.xml file and stores information in a text file to be displayed on a livestream.
## Standard Imports
import os, re, sys, threading, time
## Third-Party Imports
from bs4 import BeautifulSoup
## Global Variables - Lazy Mode
VERSION = "0.1.2b released 2015-01-30"
## Initialize a list to check that all the required attributes are present.
config_bools = [0, 0, 0, 0, 0]
bool_config = 0
bool_help = 0
bool_stdout = 0
bool_version = 0
bool_change = 0
bool_exit = 0
bool_show = 1
bool_diff = 1
stats_refresh = 0
diff_refresh = 0
display_name = ""
current_seconds = ""
current_time = ""
current_notes = ""
current_songs = ""
previous_seconds = ""
previous_notes = ""
previous_songs = ""
## WriteDiffThread - A thread which writes the diffs to the required files.
class WriteDiffThread(threading.Thread):
## __init__ - Initializes the attributes of the WriteDiffThread instance.
def __init__(self):
threading.Thread.__init__(self)
self.start()
## run - This method calls the writeDiff method.
def run(self):
writeDiff()
## writeDiff - Writes the differences in rank, PP and accuracy to text files.
def writeDiff():
## Reset the change bool and text.
change_text = "\n== Stats Change @ " + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + " =="
bool_change = 0
## This file's text will display stat changes.
try:
output_diff_file = open(output_diff_path, "w+")
except IOError:
print("\n Error: Unable to write to \"" + output_diff_path + ".\" Please ensure you have the rights to write there.")
sys.exit()
## Write a blank line to the file.
output_diff_file.write("\n")
## Only compare if the previous values aren't empty and if a change has occurred.
if (previous_seconds != "" and previous_notes != "" and previous_songs != ""):
new_seconds = current_seconds - previous_seconds
## Change
if (new_seconds > 0):
if (bool_stdout == 1):
change_text += "\n Gameplay Time: +" + (time.strftime("%H:%M:%S", time.gmtime(float(new_seconds))))
output_diff_file.write("+" + str(time.strftime("%H:%M:%S", time.gmtime(float(new_seconds)))) + "\n")
bool_change = 1
## No Change.
else:
output_diff_file.write("\n")
new_notes = current_notes - previous_notes
if (new_notes > 0):
if (bool_stdout == 1):
change_text += "\n Notes Tapped: +" + str('{0:,}'.format(new_notes))
output_diff_file.write("+" + str('{0:,}'.format(current_notes)) + "\n")
bool_change = 1
## No Change.
else:
output_diff_file.write("\n")
new_songs = current_songs - previous_songs
if (new_songs > 0):
if (bool_stdout == 1):
change_text += "\n Songs Played: +" + str(new_songs)
output_diff_file.write("+" + str(new_songs) + "\n")
bool_change = 1
## No Change.
else:
output_diff_file.write("\n")
if (bool_stdout == 1 and bool_change == 1):
print(change_text)
output_diff_file.close()
## Sleep then clear and close the files.
for i in range(int(diff_refresh) * 4):
if (bool_exit == 1):
return
time.sleep(0.25)
output_diff_file = open(output_diff_path, "w+")
output_diff_file.write(" \n \n \n ")
output_diff_file.close()
## writeStats - Writes the player stats to a text file.
def writeStats():
## Open the file displayed on stream, write to it and close it.
## Line 1 - Username
## Line 2 - Time Played
## Line 3 - Notes Tapped
## Line 4 - Songs Played
try:
output_stats_file = open(output_stats_path, "w+")
except IOError:
print("\n Error: Unable to write to \"" + stream_file + ".\" Please ensure you have the rights to write there.")
sys.exit()
output_stats_file.write(str(display_name) + "\n")
output_stats_file.write("Time: " + str(current_time) + "\n")
output_stats_file.write("Notes: " + str('{0:,}'.format(current_notes)) + "\n")
output_stats_file.write("Songs: " + str(current_songs) + "\n")
output_stats_file.close()
## Validate # of CLA
if (len(sys.argv) < 2 or len(sys.argv) > 4):
print("\n Invalid Syntax. Use -h for help.")
sys.exit()
else:
## Parse through the CLA, ignoring [0] since it it is the filename.
## bool_help set to 1 will cause the script to exit.
for arg in sys.argv:
temp = arg.lower()
if (arg != sys.argv[0]):
if (temp == "-h" or temp == "--help"):
bool_help = 1
elif (temp == "-v" or temp == "--version"):
bool_version = 1
elif (temp == "-s" or temp == "--stdout"):
bool_stdout = 1
elif (temp == "--no-diff"):
bool_diff = 0
elif (re.match("--?\w+", temp)):
print("\n Invalid Syntax. Use -h for help.")
sys.exit()
else:
if (os.path.isfile(arg)):
## Normalize (possibly invalid) save_dir attribute to use /.
try:
config_file = open(arg, "r+")
except IOError:
print("\n Error: Unable to open file: \"" + arg + "\"")
sys.exit()
bool_config = 1
## Massage paths to use '/'
for line in config_file:
line = line.strip()
## Ignore header line.
if (line == "[step_stats_watcher]"):
continue
## Massage paths to use '/'
line = re.sub(r'\\\\|\\', r'/', line)
## Parse the config file and assign the path variables.
key, value = line.split("=", 1)
if (key == "input_stats_path"):
input_stats_path = value
config_bools[0] = 1
elif (key == "output_stats_path"):
output_stats_path = value
config_bools[1] = 1
elif (key == "output_diff_path"):
output_diff_path = value
config_bools[2] = 1
elif (key == "stats_refresh"):
stats_refresh = value
config_bools[3] = 1
elif (key == "diff_refresh"):
diff_refresh = value
config_bools[4] = 1
else:
print("\n Invalid attribute \"" + key + "\". Exiting...\n")
sys.exit()
else:
print("\n Error. Unable to open file \"" + arg + "\"")
sys.exit()
## Print out the help dialog.
if (bool_help == 1):
print("\n Usage: " + sys.argv[0] + " [options] config_file\n")
print(" Options")
print(" -h | --help - Prints out this help.")
print(" -s | --stdout - Prints out stat changes to STDOUT.")
print(" -v | --version - Prints out the version you are using.")
print(" --no-diff - Changes in stats won't be updated in separate text files. Stat-only mode.")
print("\nconfig_file - The INI file containing the settings for the script.")
## Print out the version.
if (bool_version == 1):
## Put a line between help and version.
print("\n Version " + VERSION)
## Exit if either help or version was specified.
if (bool_help == 1 or bool_version == 1):
sys.exit()
## Exit if there was no config file specified.
if (bool_config == 0):
print("\n Invalid Syntax. Use -h for help.")
sys.exit()
## Exit if there are missing configuration entires.
for each in config_bools:
if (each == 0):
print("\n Invalid configuration. At least one required attribute is missing. See the Step Stats Watcher wiki for more information.")
sys.exit()
## Exit if the stats_refresh is smaller than 10 seconds.
if (float(stats_refresh) < 10):
print("\n Invalid configuration. stats_refresh must be at least 10.")
sys.exit()
## Exit if stats_refresh is smaller than diff_refresh.
if (float(stats_refresh) <= float(diff_refresh)):
print("\n Invalid configuration. diff_refresh must be smaller than stats_refresh.")
sys.exit()
print("\nStep Stats Watcher is running. Press CTRL+C to exit.")
bool_init_stats = 1
while(1):
try:
## Open the stats file and read in the contents and close it again.
input_stats_file = open(input_stats_path, "r")
stats_text = input_stats_file.read()
input_stats_file.close()
## Extract the stats of interest from the file.
soup = BeautifulSoup(stats_text, "xml")
display_name = str(soup.DisplayName.contents[0])
current_seconds = int(soup.TotalGameplaySeconds.contents[0])
current_time = time.strftime("%H:%M:%S", time.gmtime(float(current_seconds)))
current_notes = int(soup.TotalTapsAndHolds.contents[0]) + int(soup.TotalJumps.contents[0]) + int(soup.TotalHolds.contents[0]) + int(soup.TotalRolls.contents[0]) + int(soup.TotalMines.contents[0]) + int(soup.TotalHands.contents[0])
current_songs = int(soup.NumTotalSongsPlayed.contents[0])
## Note the initial stats.
if (bool_init_stats == 1):
start_display = display_name
start_seconds = current_seconds
start_notes = current_notes
start_songs = current_songs
bool_init_stats = 0
## Write the current stats to a text file.
writeStats()
## write the difference in stats to a text file if enabled.
if (bool_diff == 1):
WriteDiffThread()
## Update every second since that's how often gameplay time updates.
time.sleep(float(stats_refresh))
## Fill in the previous values to compare.
previous_seconds = current_seconds
previous_notes = current_notes
previous_songs = current_songs
except KeyboardInterrupt:
print("\nCTRL+C Detected. Exiting...")
if (bool_stdout == 1):
print("\n== Session Summary ==")
print(" Gameplay Time: " + str(time.strftime("%H:%M:%S", time.gmtime(float(current_seconds - start_seconds)))))
print(" Notes Tapped: " + str('{0:,}'.format(current_notes - start_notes)))
print(" Songs Played: " + str(current_songs - start_songs))
## Signal to the child thread to exit.
bool_exit = 1
## Exit main.
sys.exit()
|
|
'''
Created on Mar 8, 2015
@author: hijungshin
'''
from visualobjects import VisualObject
from video import Video
import sys
import os
import util
import numpy as np
import process_aligned_json as pjson
from sentence import Sentence
import cv2
import label
from sublinebreak import SublineBreaker
from moviepy.editor import *
import moviepy.video.fx.all as vfx
import videoclips
class Character:
def __init__(self, charobj):
self.obj = charobj
self.stroke = None
class Stroke:
def __init__(self, strokeobj, video):
self.obj = strokeobj
self.video = video
self.list_of_chars = []
self.stcgroup = None
class StcStroke:
def __init__(self, subline, list_of_strokes, stc_id, sentence, stcstrokedir):
self.subline = subline
self.list_of_strokes = list_of_strokes
for stroke in list_of_strokes:
stroke.stcgroup = self
self.stc_id = stc_id
self.stc = sentence
if sentence is not None:
sentence.stcstroke = self
self.obj = VisualObject.group([stroke.obj for stroke in self.list_of_strokes], stcstrokedir, imgname = "sentence%06i.png"%(stc_id) )
self.objdir = stcstrokedir
def obj_upto_inline(self,figdir):
linegroup = self.subline.linegroup
sub_id = self.subline.sub_line_id
list_of_objs = []
for i in range(0, sub_id): #all previous sublines
grayobj = linegroup.list_of_sublines[i].obj.copy()
grayobj.img = util.fg2gray(grayobj.img, 175)
list_of_objs.append(grayobj)
for stcstroke in self.subline.list_of_stcstrokes:
list_of_objs.append(stcstroke.obj)
if stcstroke == self:
break
obj = VisualObject.group(list_of_objs, figdir, "line%i_upto_sub%i_stc%i.png"%(linegroup.line_id, sub_id, self.stc_id))
return obj
def obj_inline(self,figdir):
linegroup = self.subline.linegroup
sub_id = self.subline.sub_line_id
list_of_objs = []
for i in range(0, sub_id): #all previous sublines
grayobj = linegroup.list_of_sublines[i].obj.copy()
grayobj.img = util.fg2gray(grayobj.img, 175)
list_of_objs.append(grayobj)
for stcstroke in self.subline.list_of_stcstrokes:
if stcstroke == self:
list_of_objs.append(stcstroke.obj)
break
else:
grayobj = stcstroke.obj.copy()
grayobj.img = util.fg2gray(grayobj.img, 175)
list_of_objs.append(grayobj)
obj = VisualObject.group(list_of_objs, figdir, "line%i_upto_sub%i_stc%i.png"%(linegroup.line_id, sub_id, self.stc_id))
return obj
def obj_inline_range(self,figdir, id1, id2):
linegroup = self.subline.linegroup
sub_id = self.subline.sub_line_id
list_of_objs = []
for i in range(0, sub_id): #all previous sublines
grayobj = linegroup.list_of_sublines[i].obj.copy()
grayobj.img = util.fg2gray(grayobj.img, 175)
list_of_objs.append(grayobj)
for j in range(0, len(self.subline.list_of_sentences)):
sentence = self.subline.list_of_sentences[j]
if sentence.stcstroke is None:
continue;
stcstroke = sentence.stcstroke
if id1 <= j and j <= id2:
list_of_objs.append(stcstroke.obj)
else:
grayobj = stcstroke.obj.copy()
grayobj.img = util.fg2gray(grayobj.img, 175)
list_of_objs.append(grayobj)
if stcstroke == self:
break;
obj = VisualObject.group(list_of_objs, figdir, "line%i_upto_sub%i_stc%i.png"%(linegroup.line_id, sub_id, self.stc_id))
return obj
class SubLine:
def __init__(self, list_of_strokes, line_id, sub_line_id, sublinedir):
self.list_of_strokes = list_of_strokes
self.line_id = line_id
self.sub_line_id = sub_line_id
self.list_of_sentences = []
self.list_of_video_sentences = []
self.list_of_stcstrokes = []
self.linegroup = None
self.obj_in_line = None
self.obj = VisualObject.group([stroke.obj for stroke in self.list_of_strokes], sublinedir, imgname = "line%06i_%06i.png" % (self.line_id, self.sub_line_id))
self.objdir = sublinedir
self.list_of_labels = []
self.list_of_subsublines = []
def add_label(self, pos):
n = len(self.list_of_labels)
lb = label.getlabels(len(self.list_of_labels), 1)
lb[0].changepos(pos)
self.list_of_labels.append(lb[0])
return '[Figure %i - %i (%s)]' % (self.line_id+1, self.sub_line_id+1, chr(ord('a') +n))
def link_stcstrokes(self, stcstrokedir):
"""link each stroke in self.list_of_strokes to one and only one of self.list_of_stcs"""
n_stcs = len(self.list_of_sentences)
if (n_stcs == 0):
stcstroke = StcStroke(self, self.list_of_strokes, -1, None, stcstrokedir)
self.list_of_stcstrokes.append(stcstroke)
return
closest_stc_ids = []
for stroke in self.list_of_strokes:
min_dist = float("inf")
closest_stc_id = -1
for i in range(0, n_stcs):
stc = self.list_of_sentences[i]
dist = VisualObject.obj_stc_distance(stroke.obj, stc.list_of_words, stc.video)
if (dist < min_dist):
min_dist = dist
closest_stc_id = i
closest_stc_ids.append(closest_stc_id)
closest_stc_ids = np.array(closest_stc_ids)
for i in range(0, n_stcs):
stc = self.list_of_sentences[i]
stroke_ids = np.where(closest_stc_ids == i)[0]
if (len(stroke_ids) > 0):
stc_list_of_strokes = [self.list_of_strokes[x] for x in stroke_ids]
stcstroke = StcStroke(self, stc_list_of_strokes, stc.id, stc, stcstrokedir)
self.list_of_stcstrokes.append(stcstroke)
def link_linegroup(self, linegroup):
self.linegroup = linegroup
list_of_imgobjs = []
for subline in linegroup.list_of_sublines:
if subline == self:
for stroke in subline.list_of_strokes:
list_of_imgobjs.append(stroke.obj)
else:
for stroke in subline.list_of_strokes:
grayobj = stroke.obj.copy()
grayobj.img = util.fg2gray(stroke.obj.img, 200)
list_of_imgobjs.append(grayobj)
self.obj_in_line = VisualObject.group(list_of_imgobjs, self.objdir, imgname="inline%06i_%06i.png" % (self.line_id, self.sub_line_id))
def write_video(self, videodir, myvideo):
lineid = self.line_id
subid = self.sub_line_id
filename = "line%i_sub%i"%(lineid, subid)
imgobj_startt = self.obj.start_fid
imgobj_endt = self.obj.end_fid
if len(self.list_of_video_sentences) > 0:
stc_startt = self.list_of_video_sentences[0].start_fid
stc_endt = self.list_of_video_sentences[-1].end_fid
else:
stc_startt = float("inf")
stc_endt = -1
self.video_startt = myvideo.fid2sec(min(stc_startt, imgobj_startt))
self.video_endt = myvideo.fid2sec(max(stc_endt, imgobj_endt)) + 1.0
self.video_endt = min(myvideo.endt/1000.0, self.video_endt)
# print 'startt', self.video_startt, 'endt', self.video_endt
subclip = VideoFileClip(myvideo.filepath).subclip(self.video_startt, self.video_endt)
subclip = videoclips.colormask(subclip, self.obj.tlx, self.obj.tly, self.obj.brx, self.obj.bry)
clipsrc = videodir + "/" + filename + ".mp4"
subclip.write_videofile(clipsrc, codec='libx264', audio_codec='aac', temp_audiofile='temp-audio.m4a', remove_temp=True) # Many options...
# print 'tlx, tly, brx, bry', self.linegroup.obj.tlx, self.linegroup.obj.tly, self.linegroup.obj.brx, self.linegroup.obj.bry
subclip_crop = vfx.crop(subclip, self.linegroup.obj.tlx, self.linegroup.obj.tly, self.linegroup.obj.brx, self.linegroup.obj.bry)
cropsrc = videodir + "/" + filename +"_crop.mp4"
subclip_crop.write_videofile(cropsrc, codec='libx264', audio_codec='aac', temp_audiofile='temp-audio.m4a', remove_temp=True) # Many options...
class LineGroup:
def __init__(self, list_of_sublines, line_id, linedir):
self.list_of_sublines = list_of_sublines
self.line_id = line_id
self.linedir = linedir
list_of_objs = []
for i in range(0, len(list_of_sublines)):
subline = list_of_sublines[i]
subline.link_linegroup(self)
for stroke in subline.list_of_strokes:
list_of_objs.append(stroke.obj)
self.obj = VisualObject.group(list_of_objs, self.linedir, imgname="line%06i.png" % (line_id))
def obj_upto_subline(self, subline_id):
list_of_objs = []
for i in range(0, subline_id):
grayobj = self.list_of_sublines[i].obj.copy()
grayobj.img = util.fg2gray(grayobj.img, 175)
list_of_objs.append(grayobj)
list_of_objs.append(self.list_of_sublines[subline_id].obj)
return list_of_objs
def obj_highlight_subline(self, subline_id):
list_of_objs = []
for i in range(0, len(self.list_of_sublines)):
obj = self.list_of_sublines[i].obj.copy()
if i < subline_id:
obj.img = util.fg2gray(obj.img, 175)
elif i > subline_id:
obj.img = np.ones(obj.img.shape)*255
list_of_objs.append(obj)
return list_of_objs
def link_char_strokes(list_of_chars, list_of_strokes):
for char in list_of_chars:
charname = os.path.basename(char.obj.imgpath)
charname = os.path.splitext(charname)[0]
for stroke in list_of_strokes:
strokename = os.path.basename(stroke.obj.imgpath)
strokename = os.path.splitext(strokename)[0]
if strokename in charname:
stroke.list_of_chars.append(char)
char.stroke = stroke
break
def get_strokes(video, objdir):
list_of_strokeobjs = VisualObject.objs_from_file(video, objdir)
list_of_strokes = []
for obj in list_of_strokeobjs:
list_of_strokes.append(Stroke(obj, video))
return list_of_strokes
def get_chars(video, xcutdir):
list_of_charobjs = VisualObject.objs_from_file(video, xcutdir)
list_of_chars = []
for charobj in list_of_charobjs:
list_of_chars.append(Character(charobj))
return list_of_chars
def get_sublines(list_of_strokes, linetxt, list_of_sentences, sublinedir, stcstrokesdir):
line_ids = util.stringlist_from_txt(linetxt)
line_ids = util.strings2ints(line_ids)
n_lines = len(np.unique(np.array(line_ids)))
line_ids.append(-1)
list_of_sublines = []
sub_ids = [0 for x in range(0, n_lines)]
start_i = 0
for i in range(0, len(list_of_strokes)):
cur_lineid = line_ids[i]
next_id = line_ids[i + 1]
if (cur_lineid != next_id):
sub_lineid = sub_ids[cur_lineid]
subline = SubLine(list_of_strokes[start_i:i + 1], cur_lineid, sub_lineid, sublinedir)
sub_ids[cur_lineid] += 1
list_of_sublines.append(subline)
start_i = i + 1
link_stc_to_sublines(list_of_sentences, list_of_sublines)
link_stc_to_subline_videos(list_of_sentences, list_of_sublines)
for subline in list_of_sublines:
subline.link_stcstrokes(stcstrokesdir)
return list_of_sublines
def link_stc_to_sublines(list_of_sentences, list_of_sublines):
"""sentence is associated with a subline, or none
IF more than 75% of sentence overlaps with drawing time, it is linked with subline"""
for subline in list_of_sublines:
del subline.list_of_sentences[:]
n_sublines = len(list_of_sublines)
closest_subline_ids = []
for stc in list_of_sentences:
stc_length_fid = stc.video.ms2fid(stc.endt) - stc.video.ms2fid(stc.startt)
closest_subline = None
closest_id = -1
min_dist = float("inf")
for i in range(0, n_sublines):
subline = list_of_sublines[i]
dist = VisualObject.obj_stc_distance(subline.obj, stc.list_of_words, stc.video)
if (dist < 0 and dist < min_dist):
min_dist = dist
closest_subline = list_of_sublines[i]
closest_id = i
closest_subline_ids.append(closest_id)
if (closest_subline is not None and abs(min_dist) >= 0.75*stc_length_fid):
closest_subline.list_of_sentences.append(stc)
stc.subline = closest_subline
stc.subline_index = closest_id
return closest_subline_ids
def link_stc_to_subline_videos(list_of_sentences, list_of_sublines):
"""A sentence is associated with a single subline video.
All sentences that end after current video start and end before next video starts"""
"""Clear"""
for subline in list_of_sublines:
del subline.list_of_video_sentences[:]
n_sublines = len(list_of_sublines)
closest_subline_ids = []
for stc in list_of_sentences:
closest_subline = None
closest_id = -1
min_dist = float("inf")
for i in range(0, n_sublines):
subline = list_of_sublines[i]
dist = VisualObject.obj_stc_distance(subline.obj, stc.list_of_words, stc.video)
if (dist < min_dist):
min_dist = dist
closest_subline = list_of_sublines[i]
closest_id = i
closest_subline_ids.append(closest_id)
stc.subline_video = closest_subline
closest_subline.list_of_video_sentences.append(stc)
return closest_subline_ids
def get_linegroups(list_of_sublines, linetxt, linedir):
line_ids = util.stringlist_from_txt(linetxt)
line_ids = util.strings2ints(line_ids)
numlines = len(np.unique(np.array(line_ids)))
list_of_linegroups = []
for i in range(0, numlines):
sublines_i = []
for subline in list_of_sublines:
if subline.line_id == i:
sublines_i.append(subline)
line_i = LineGroup(sublines_i, i, linedir)
list_of_linegroups.append(line_i)
return list_of_linegroups
def draw(panorama, list_of_linegroups):
panorama_copy = panorama.copy()
for linegroup in list_of_linegroups:
obj = linegroup.obj
cv2.rectangle(panorama_copy, (obj.tlx, obj.tly), (obj.brx, obj.bry), (0, 0, 255), 1)
for subline in linegroup.list_of_sublines:
obj = subline.obj
cv2.rectangle(panorama_copy, (obj.tlx, obj.tly), (obj.brx, obj.bry), (255, 0, 255), 1)
for stcstroke in subline.list_of_stcstrokes:
obj = stcstroke.obj
cv2.rectangle(panorama_copy, (obj.tlx, obj.tly), (obj.brx, obj.bry), (255, 0, 0), 1)
for stroke in stcstroke.list_of_strokes:
obj = stroke.obj
cv2.rectangle(panorama_copy, (obj.tlx, obj.tly), (obj.brx, obj.bry), (255, 255, 0), 1)
for char in stroke.list_of_chars:
obj = char.obj
cv2.rectangle(panorama_copy, (obj.tlx, obj.tly), (obj.brx, obj.bry), (0, 0, 0), 1)
return panorama_copy
def getvisuals(videopath, panoramapath, objdir, scriptpath):
video = Video(videopath)
panorama = cv2.imread(panoramapath)
"""strokes"""
list_of_strokes = get_strokes(video, objdir)
"""xcut characters"""
xcutdir = objdir + "/xcut"
list_of_chars = get_chars(video, xcutdir)
link_char_strokes(list_of_chars, list_of_strokes)
"""sublines"""
sublinedir = objdir + "/sublines_15_03_18"
stcstrokesdir = objdir + "/stcstrokes_15_03_18"
linetxt = objdir + "/linebreak_wo_area_compact_adjust_xgap_ids.txt"
list_of_words = pjson.get_words(scriptpath)
list_of_stcs = pjson.get_sentences(list_of_words)
list_of_sentences = []
stcid = 0
for stc in list_of_stcs:
list_of_sentences.append(Sentence(stc, video, stcid))
stcid += 1
list_of_sublines = get_sublines(list_of_strokes, linetxt, list_of_sentences, sublinedir, stcstrokesdir)
"""lines"""
linedir = objdir + "/linegroups"
list_of_linegroups = get_linegroups(list_of_sublines, linetxt, linedir)
list_of_stcstrokes = []
for subline in list_of_sublines:
list_of_stcstrokes = list_of_stcstrokes + subline.list_of_stcstrokes
"""break sublines"""
for subline in list_of_sublines:
break_subline(subline, list_of_sentences)
return [panorama, list_of_linegroups, list_of_sublines, list_of_stcstrokes, list_of_strokes, list_of_chars, list_of_sentences]
def panorama_lines(panorama, list_of_linegroups):
panorama_copy = panorama.copy()
for linegroup in list_of_linegroups:
obj = linegroup.obj
cv2.rectangle(panorama_copy, (obj.tlx, obj.tly), (obj.brx, obj.bry), (0, 0, 0), 2)
return panorama_copy
def break_subline(subline, list_of_sentences):
sb = SublineBreaker(subline, list_of_sentences)
subline.list_of_subsublines = sb.breaklines()
if __name__ == "__main__":
videopath = sys.argv[1]
panoramapath = sys.argv[2]
objdir = sys.argv[3]
scriptpath = sys.argv[4]
video = Video(videopath)
"""strokes"""
list_of_strokes = get_strokes(video, objdir)
"""xcut characters"""
xcutdir = objdir + "/xcut"
list_of_chars = get_chars(video, xcutdir)
link_char_strokes(list_of_chars, list_of_strokes)
"""sublines"""
linetxt = objdir + "/line_ids.txt"
list_of_words = pjson.get_words(scriptpath)
list_of_stcs = pjson.get_sentences(list_of_words)
list_of_sentences = []
stcid = 0
for stc in list_of_stcs:
list_of_sentences.append(Sentence(stc, video, stcid))
stcid += 1
sublinedir = objdir + "/sublines"
stcstrokesdir = objdir + "/stcstrokes"
if not os.path.exists(os.path.abspath(sublinedir)):
os.makedirs(os.path.abspath(sublinedir))
if not os.path.exists(os.path.abspath(stcstrokesdir)):
os.makedirs(os.path.abspath(stcstrokesdir))
list_of_sublines = get_sublines(list_of_strokes, linetxt, list_of_sentences, sublinedir, stcstrokesdir)
VisualObject.write_to_file(sublinedir + "/obj_info.txt", [subline.obj for subline in list_of_sublines])
list_of_stcstrokes = []
for subline in list_of_sublines:
list_of_stcstrokes = list_of_stcstrokes + subline.list_of_stcstrokes
VisualObject.write_to_file(stcstrokesdir + "/obj_info.txt", [stcstroke.obj for stcstroke in list_of_stcstrokes])
"""lines and sublines_inline"""
linedir = objdir + "/linegroups"
if not os.path.exists(os.path.abspath(linedir)):
os.makedirs(os.path.abspath(linedir))
list_of_linegroups = get_linegroups(list_of_sublines, linetxt, linedir)
"""break sublines"""
for subline in list_of_sublines:
break_subline(subline, list_of_sentences)
VisualObject.write_to_file(linedir + "/obj_info.txt", [line.obj for line in list_of_linegroups])
VisualObject.write_to_file(sublinedir + "/inline_obj_info.txt", [subline.obj_in_line for subline in list_of_sublines])
|
|
import os
from glob import glob
import xarray as xr
import pandas as pd
import geopandas as gpd
import bmorph
import numpy as np
from scipy.stats import entropy
from string import Template
import subprocess
CONTROL_TEMPLATE = Template(
"""<ancil_dir> $ancil_dir !
<input_dir> $input_dir !
<output_dir> $output_dir !
<sim_start> $sim_start !
<sim_end> $sim_end !
<fname_ntopOld> $topo_file !
<dname_nhru> seg !
<dname_sseg> seg !
<seg_outlet> -9999 !
<fname_qsim> $flow_file !
<vname_qsim> scbc_flow !
<vname_time> time !
<dname_time> time !
<dname_hruid> seg !
<vname_hruid> seg !
<units_qsim> mm/d !
<dt_qsim> 86400 !
<is_remap> F !
<route_opt> 1 !
<case_name> $out_name !
<param_nml> param.nml.default !
<doesBasinRoute> 0 !
<varname_area> Contrib_Area !
<varname_length> Length !
<varname_slope> Slope !
<varname_HRUid> seg_id !
<varname_segId> seg_id !
<varname_downSegId> Tosegment !
<varname_hruSegId> seg_id !
""")
def write_mizuroute_config(region, scbc_type, time_window,
config_dir='../mizuroute_configs/',
topo_dir='../topologies/',
input_dir='../input/',
output_dir='../output/'):
mizuroute_config = {
'ancil_dir': os.path.abspath(topo_dir)+'/',
'input_dir': os.path.abspath(input_dir)+'/',
'output_dir': os.path.abspath(output_dir)+'/',
'sim_start': time_window[0].strftime("%Y-%m-%d"),
'sim_end': time_window[1].strftime("%Y-%m-%d"),
'topo_file': f'{region}_huc12_topology_scaled_area.nc',
'flow_file': f'{region}_local_{scbc_type}_scbc.nc',
'out_name': f'{region}_{scbc_type}_scbc'
}
config_path = os.path.abspath(f'{config_dir}reroute_{region}_{scbc_type}.control')
with open(config_path, 'w') as f:
f.write(CONTROL_TEMPLATE.substitute(mizuroute_config))
return config_path, mizuroute_config
def run_mizuroute(mizuroute_exe, mizuroute_config):
cmd = f'{mizuroute_exe} {mizuroute_config}'
p = subprocess.run([cmd], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return p
def find_up(ds, seg, sel_method='first', sel_var='IRFroutedRunoff'):
"""
Finds the segment directly upstream of seg given seg is not
a headwater segment, (in which case np.nan is returned).
Parameters
---------
ds: xr.Dataset
Dataset containing river segments as 'seg', headwater
segments by 'is_headwaters', and what is downstream of
each seg in 'down_seg'.
seg: int
River segment designation to search from.
sel_method: str
Method to use when selecting among multiple upstream
segments.
sel_var: str
Variable used when comparing segments amonth multiple
upstream segments. Can be 'forward_fill', 'r2', or 'kge'.
Returns
-------
up_seg: int
Upstream segment designation found, or np.nan if seg
is a headwater segement.
"""
if ds.sel(seg=seg)['is_headwaters']:
return np.nan
up_idxs = np.argwhere(ds['down_seg'].values == seg).flatten()
if sel_method == 'first' or sel_method == 'forward_fill':
up_idx = up_idxs[0]
elif sel_method == 'r2':
idx_of_up_idx = np.argmax([
np.corrcoef(ds[sel_var].sel(seg=seg), ds[sel_var].isel(seg=i))[0, 1]**2
for i in up_idxs])
up_idx = up_idxs[idx_of_up_idx]
elif sel_method == 'kge':
idx_of_up_idx = np.argmax([
kling_gupta_efficiency(ds[sel_var].sel(seg=seg), ds[sel_var].isel(seg=i))
for i in up_idxs])
up_idx = up_idxs[idx_of_up_idx]
elif sel_method == 'kldiv':
raise NotImplementedError('kldiv has not been implemented, please select ',
'forward_fill, r2, or kge')
up_seg = ds['seg'].isel(seg=up_idx).values[()]
return up_seg
def walk_down(ds, start_seg):
"""
Finds the nearest downstream gauge site and returns the distance
traveled to reach it from start_seg.
Parameters
---------
ds: xr.Dataset
Dataset containing river segments, downstream segs, the length
of the river segments, and which segs are gauge sites as
'seg', 'down_seg', 'lenght', and 'is_gauge', respectively.
start_seg: int
River segment designation to start walking from to a
downstream gauge site.
Returns
-------
tot_length: float
Total length traveled during walk, (e.g. cumulative river
distance from start_seg to the downstream gauge site).
cur_seg: int
River segment designation of the gauge site reached.
"""
tot_length = 0.0
cur_seg = start_seg
if ds['is_gauge'].sel(seg=cur_seg):
return 0.0, cur_seg
else:
while (ds['down_seg'].sel(seg=cur_seg).values[()] in ds['seg'].values
and not ds['is_gauge'].sel(seg=ds['seg'].sel(seg=cur_seg).values[()]).values[()]):
cur_seg = ds['down_seg'].sel(seg=cur_seg).values[()]
tot_length += ds.sel(seg=cur_seg)['length'].values[()]
cur_seg = ds['down_seg'].sel(seg=cur_seg).values[()]
return tot_length, cur_seg
def walk_up(ds, start_seg):
"""
Finds the nearest upstream gauge site and returns the distance
traveled to reach it from start_seg.
Parameters
---------
ds: xr.Dataset
Dataset containing river segments, upstream segs, the length
of the river segments, and which segs are gauge sites as
'seg', 'up_seg', 'lenght', and 'is_gauge', respectively.
start_seg: int
River segment designation to start walking from to an
upstream gauge site.
Returns
-------
tot_length: float
Total length traveled during walk, (e.g. cumulative river
distance from start_seg to the downstream gauge site).
cur_seg: int
River segment designation of the gauge site reached.
"""
tot_length = 0.0
cur_seg = start_seg
if ds['is_gauge'].sel(seg=cur_seg):
return 0.0, cur_seg
else:
# assume flows are at the end of the reach, so if we are
# walking upstream we will be walking through start_seg
# and need to account for that
tot_length += ds.sel(seg=cur_seg)['length'].values[()]
while (ds['up_seg'].sel(seg=cur_seg).values[()] in ds['seg'].values
and not ds['is_gauge'].sel(seg=ds['up_seg'].sel(seg=cur_seg).values[()]).values[()]):
cur_seg = ds['up_seg'].sel(seg=cur_seg).values[()]
tot_length += ds.sel(seg=cur_seg)['length'].values[()]
cur_seg = ds['up_seg'].sel(seg=cur_seg).values[()]
return tot_length, cur_seg
def find_max_r2(ds, curr_seg_flow):
"""
Searches through ds to find which seg has the greatest
r2 value with respect to curr_seg_flow. If no seg is found,
max_r2 = 0 and max_r2_ref_seg = -1.
Parameters
----------
ds: xr.Dataset
Contains the variable 'reference_flow' to compare
curr_seg_flow against and the coordinate 'seg'.
curr_seg_flow: np.array
A numpy array containing flow values that r2 is to
be maximized with respect to.
Returns
-------
max_r2: float
Magnitude of the maximum R squared value found.
max_r2_ref_seg: int
River segement designation corresponding to the max_r2.
"""
max_r2 = 0.0
max_r2_ref_seg = -1
for ref_seg in ds['seg'].values:
ref_flow = ds.sel(seg=ref_seg).values
curr_ref_r2 = np.corrcoef(curr_seg_flow, ref_flow)[0, 1]**2
if curr_ref_r2 > max_r2:
max_r2 = curr_ref_r2
max_r2_ref_seg = ref_seg
return max_r2, max_r2_ref_seg
def find_min_kldiv(ds, curr_seg_flow):
"""
Searches through ds to find which seg has the smallest
Kullback-Leibler Divergence value with respect to curr_seg_flow.
If no seg is found, min_kldiv = -1 and min_kldiv_ref_seg = -1.
https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence
Parameters
----------
ds: xr.Dataset
contains the variable 'reference_flow' to compare
curr_seg_flow against and the coordinate 'seg'.
curr_seg_flow: np.array
a numpy array containing flow values that KL Divergence
is to be maximized with respect to.
Returns
-------
min_kldiv: float
Magnitude of the minimum KL Divergence found.
min_kldiv_ref_seg: int
River segment designation corresponding to min_kldiv.
"""
TINY_VAL = 1e-6
min_kldiv = np.inf
min_kldiv_ref_seg = -1
total_bins = int(np.sqrt(len(curr_seg_flow)))
curr_seg_flow_pdf, curr_seg_flow_edges = np.histogram(
curr_seg_flow, bins=total_bins, density=True)
curr_seg_flow_pdf[curr_seg_flow_pdf == 0] = TINY_VAL
for ref_seg in ds['seg'].values:
ref_flow = ds.sel(seg=ref_seg).values
ref_flow_pdf = np.histogram(ref_flow, bins=curr_seg_flow_edges, density=True)[0]
ref_flow_pdf[ref_flow_pdf == 0] = TINY_VAL
curr_ref_kldiv = entropy(pk=ref_flow_pdf, qk=curr_seg_flow_pdf)
if curr_ref_kldiv < min_kldiv:
min_kldiv = curr_ref_kldiv
min_kldiv_ref_seg = ref_seg
if min_kldiv == np.inf:
# meaning something went wrong and kldiv cannot be used
# to select refernce sites
min_kldiv = -1
# kl divergence can never be less than zero, so we can
# trust that if a -1 pops up down the line, it is because
# we set it here and something went wrong ... but we dont
# want the method to break and stop running for other sites
return min_kldiv, min_kldiv_ref_seg
def kling_gupta_efficiency(sim, obs):
"""
Calculates the Kling-Gupta Efficiency (KGE) between two flow arrays.
https://agrimetsoft.com/calculators/Kling-Gupta%20efficiency
Parameters
---------
sim: array-like
Simulated flow array.
obs: array-like
Observed flow array.
Returns
-------
kge: float
Kling-Gupta Efficiency calculated between the two arrays.
"""
obs = np.asarray(obs)
sim = np.asarray(sim)
obs_filtered = obs[np.logical_and(~np.isnan(obs), ~np.isnan(sim))]
sim_filtered = sim[np.logical_and(~np.isnan(obs), ~np.isnan(sim))]
sim_std = np.std(sim_filtered, ddof=1)
obs_std = np.std(obs_filtered, ddof=1)
sim_mu = np.mean(sim_filtered)
obs_mu = np.mean(obs_filtered)
r = np.corrcoef(sim_filtered, obs_filtered)[0, 1]
var = sim_std / obs_std
bias = sim_mu / obs_mu
kge = 1 - np.sqrt((bias-1)**2 + (var-1)**2 + (r-1)**2)
return kge
def find_max_kge(ds, curr_seg_flow):
"""
Searches through ds to find which seg has the larges
Kling-Gupta Efficiency (KGE) value with respect to curr_seg_flow.
If no seg is found, max_kge = -np.inf and max_kge_ref_seg = -1.
Parameters
----------
ds: xr.Dataset
Contains the variable 'reference_flow' to compare
curr_seg_flow against and the coordinate 'seg'.
curr_seg_flow: int
A numpy array containing flow values that KGE
is to be maximized with respect to.
Returns
-------
max_kge: float
Maximum KGE value found.
max_kge_ref_seg
River segment designation corresponding to max_kge.
"""
max_kge = -np.inf
max_kge_ref_seg = -1
all_kge = [kling_gupta_efficiency(curr_seg_flow, ds.sel(seg=ref_seg).values)
for ref_seg in ds['seg'].values]
return np.max(all_kge), ds['seg'].values[np.argmax(all_kge)]
def trim_time(dataset_list: list):
"""
Trims all times of the xr.Datasets in the list to the shortest timeseries.
Parameters
----------
dataset_list: List[xr.Dataset]
Contains a list of xr.Datasets
Returns
-------
list
Contains a list in the same order as dataset_list except with all items
in the list having the same start and end time.
"""
t_starts = list()
t_finishes = list()
for ds in dataset_list:
assert isinstance(ds, xr.Dataset) #quick type check
t_ds = ds.time.values[[0, -1]]
t_starts.append(t_ds[0])
t_finishes.append(t_ds[1])
t_trim_start = np.max(t_starts)
t_trim_finish = np.min(t_finishes)
t_slice = slice(t_trim_start, t_trim_finish)
dataset_list_trimmed = list()
for ds in dataset_list:
dataset_list_trimmed.append(ds.sel(time=t_slice))
return dataset_list_trimmed
def map_segs_topology(routed: xr.Dataset, topology: xr.Dataset):
"""
Adds contributing_area, average elevation, length, and down_seg to
routed from topology.
Parameters
---------
routed: xr.Dataset
Contains streamflow timeseries mapped to river segments denoted
as 'seg'.
topology: xr.Dataset
Contains topological data of the watershed that routed's streamflow
timeseries describe. River segment designations, lengths, and
immeditate downstream segments are expected as 'seg', 'Length',
and 'Tosegment'.
Returns
-------
routed: xr.Dataset
The input dataset routed updated with the topological data.
"""
routed = routed.sel(seg=topology['seg'])
#routed['contributing_area'] = topology['Contrib_Area']
#routed['elevation'] = 0.5 * (topology['TopElev'] + topology['BotElev'])
routed['length'] = topology['Length']
routed['down_seg'] = topology['Tosegment']
return routed
def map_ref_sites(routed: xr.Dataset, gauge_reference: xr.Dataset,
gauge_sites=None, route_var='IRFroutedRunoff',
fill_method='r2', min_kge=-0.41):
"""
Assigns segs within routed boolean 'is_gauge' "identifiers" and
what each seg's upstream and downstream reference seg designations are.
Parameters
----------
routed: xr.Dataset
Contains the input flow timeseries data.
gauge_reference: xr.Dataset
Contains reference flow timeseries data for the same watershed
as the routed dataset.
gauge_sites: list, optional
If None, gauge_sites will be taken as all those listed in
gauge_reference.
route_var: str
Variable name of flows used for fill_method purposes within routed.
This is defaulted as 'IRFroutedRunoff'.
fill_method: str
While finding some upstream/downstream reference segs may be simple,
(segs with 'is_gauge' = True are their own reference segs, others
may be easy to find looking directly up or downstream), some river
networks may have multiple options to select gauge sites and may fail
to have upstream/downstream reference segs designated. 'fill_method'
specifies how segs should be assigned upstream/downstream reference
segs for bias correction if they are missed walking upstream or downstream.
Currently supported methods:
'leave_null'
nothing is done to fill missing reference segs, np.nan values are
replaced with a -1 seg designation and that's it
'forward_fill'
xarray's ffill method is used to fill in any np.nan values
'r2'
reference segs are selected based on which reference site that
seg's flows has the greatest r2 value with
'kldiv'
reference segs are selected based on which reference site that
seg's flows has the smallest KL Divergence value with
'kge'
reference segs are selected based on which reference site that
seg's flows has the greatest KGE value with
Returns
-------
routed: xr.Dataset
Routed timeseries with reference gauge site river segments assigned to
each river segement in the original routed.
"""
if isinstance(gauge_sites, type(None)):
gauge_sites = gauge_reference['site'].values
else:
# need to typecheck since we do a for loop later and don't
# want to end up iterating through a string by accident
assert isinstance(gauge_sites, list)
gauge_segs = gauge_reference.sel(site=gauge_sites)['seg'].values
routed['is_gauge'] = False * routed['seg']
routed['down_ref_seg'] = np.nan * routed['seg']
routed['up_ref_seg'] = np.nan * routed['seg']
routed['up_seg'] = 0 * routed['is_headwaters']
routed['up_seg'].values = [find_up(routed, s, sel_method=fill_method) for s in routed['seg'].values]
for s in routed['seg']:
if s in list(gauge_segs):
routed['is_gauge'].loc[{'seg':s}] = True
routed['down_ref_seg'].loc[{'seg': s}] = s
routed['up_ref_seg'].loc[{'seg': s}] = s
for seg in routed['seg']:
cur_seg = seg.values[()]
while cur_seg in routed['seg'].values and np.isnan(routed['down_ref_seg'].sel(seg=cur_seg)):
cur_seg = routed['down_seg'].sel(seg=cur_seg).values[()]
if cur_seg in routed['seg'].values:
routed['down_ref_seg'].loc[{'seg':seg}] = routed['down_ref_seg'].sel(seg=cur_seg).values[()]
for seg in routed['seg']:
cur_seg = seg.values[()]
while cur_seg in routed['seg'].values and np.isnan(routed['up_ref_seg'].sel(seg=cur_seg)):
cur_seg = routed['up_seg'].sel(seg=cur_seg).values[()]
if cur_seg in routed['seg'].values:
routed['up_ref_seg'].loc[{'seg':seg}] = routed['up_ref_seg'].sel(seg=cur_seg).values[()]
# Fill in any remaining nulls (head/tailwaters)
if fill_method == 'leave_null':
# since there should be no -1 segs from mizuroute, we can set nan's to -1 to acknowledge
# that they have been addressed and still set them apart from the rest of the data
routed['up_ref_seg'] = (routed['up_ref_seg'].where(~np.isnan(routed['up_ref_seg']), other=-1))
routed['down_ref_seg'] = (routed['down_ref_seg'].where(~np.isnan(routed['down_ref_seg']), other=-1))
elif fill_method == 'forward_fill':
routed['up_ref_seg'] = (routed['up_ref_seg'].where(
~np.isnan(routed['up_ref_seg']), other=routed['down_ref_seg'])).ffill('seg')
routed['down_ref_seg'] = (routed['down_ref_seg'].where(
~np.isnan(routed['down_ref_seg']), other=routed['up_ref_seg'])).ffill('seg')
elif fill_method == 'r2':
fill_up_isegs = np.where(np.isnan(routed['up_ref_seg'].values))[0]
fill_down_isegs = np.where(np.isnan(routed['down_ref_seg'].values))[0]
routed['r2_up_gauge'] = 0 * routed['is_gauge']
routed['r2_down_gauge'] = 0 * routed['is_gauge']
for curr_seg in routed['seg'].values:
up_ref_seg = np.nan
curr_seg_flow = routed[route_var].sel(seg=curr_seg).values
if np.isnan(routed['up_ref_seg'].sel(seg=curr_seg).values):
up_ref_r2, up_ref_seg = find_max_r2(routed[route_var].sel(seg=gauge_segs), curr_seg_flow)
routed['r2_up_gauge'].loc[{'seg':curr_seg}] = up_ref_r2
routed['up_ref_seg'].loc[{'seg':curr_seg}] = up_ref_seg
else:
# this seg has already been filled in, but r2 still needs to be calculated
ref_flow = routed[route_var].sel(seg=routed['up_ref_seg'].sel(seg=curr_seg)).values
up_ref_r2 = np.corrcoef(curr_seg_flow, ref_flow)[0, 1]**2
routed['r2_up_gauge'].loc[{'seg':curr_seg}] = up_ref_r2
for curr_seg in routed['seg'].values:
down_ref_seg = np.nan
curr_seg_flow = routed[route_var].sel(seg=curr_seg).values
if np.isnan(routed['down_ref_seg'].sel(seg=curr_seg).values):
down_ref_r2, down_ref_seg = find_max_r2(routed[route_var].sel(seg=gauge_segs), curr_seg_flow)
routed['r2_down_gauge'].loc[{'seg':curr_seg}] = down_ref_r2
routed['down_ref_seg'].loc[{'seg':curr_seg}] = down_ref_seg
else:
# this seg has already been filled in, but r2 still needs to be calculated
ref_flow = routed[route_var].sel(seg=routed['down_ref_seg'].sel(seg=curr_seg)).values
down_ref_r2 = np.corrcoef(curr_seg_flow, ref_flow)[0, 1]**2
routed['r2_down_gauge'].loc[{'seg':curr_seg}] = down_ref_r2
elif fill_method == 'kldiv':
fill_up_isegs = np.where(np.isnan(routed['up_ref_seg'].values))[0]
fill_down_isegs = np.where(np.isnan(routed['down_ref_seg'].values))[0]
routed['kldiv_up_gauge'] = 0 * routed['is_gauge']
routed['kldiv_down_gauge'] = 0 * routed['is_gauge']
for curr_seg in routed['seg'].values:
curr_seg_flow = routed[route_var].sel(seg=curr_seg).values
if np.isnan(routed['up_ref_seg'].sel(seg=curr_seg).values):
up_ref_kldiv, up_ref_seg = find_min_kldiv(routed[route_var].sel(seg=gauge_segs), curr_seg_flow)
routed['kldiv_up_gauge'].loc[{'seg':curr_seg}] = up_ref_kldiv
routed['up_ref_seg'].loc[{'seg':curr_seg}] = up_ref_seg
else:
# this seg has already been filled in, but kldiv still needs to be calculated
# kldiv computation could probably be gutted in the furture ...
TINY_VAL = 1e-6
total_bins = int(np.sqrt(len(curr_seg_flow)))
curr_seg_flow_pdf, curr_seg_flow_edges = np.histogram(
curr_seg_flow, bins=total_bins, density=True)
curr_seg_flow_pdf[curr_seg_flow_pdf == 0] = TINY_VAL
ref_flow = routed[route_var].sel(seg=routed['up_ref_seg'].sel(seg=curr_seg).values).values
ref_flow_pdf = np.histogram(ref_flow, bins=curr_seg_flow_edges, density=True)[0]
ref_flow_pdf[ref_flow_pdf == 0] = TINY_VAL
up_ref_kldiv = entropy(pk=ref_flow_pdf, qk=curr_seg_flow_pdf)
routed['kldiv_up_gauge'].loc[{'seg':curr_seg}] = up_ref_kldiv
for curr_seg in routed['seg'].values:
curr_seg_flow = routed[route_var].sel(seg=curr_seg).values
if np.isnan(routed['down_ref_seg'].sel(seg=curr_seg).values):
down_ref_kldiv, down_ref_seg = find_min_kldiv(routed[route_var].sel(seg=gauge_segs), curr_seg_flow)
routed['kldiv_down_gauge'].loc[{'seg':curr_seg}] = down_ref_kldiv
routed['down_ref_seg'].loc[{'seg':curr_seg}] = down_ref_seg
else:
# this seg has already been filled in, but kldiv still needs to be calculated
# kldiv computation could probably be gutted in the furture ...
TINY_VAL = 1e-6
total_bins = int(np.sqrt(len(curr_seg_flow)))
curr_seg_flow_pdf, curr_seg_flow_edges = np.histogram(
curr_seg_flow, bins=total_bins, density=True)
curr_seg_flow_pdf[curr_seg_flow_pdf == 0] = TINY_VAL
ref_flow = routed[route_var].sel(seg=routed['down_ref_seg'].sel(seg=curr_seg).values).values
ref_flow_pdf = np.histogram(ref_flow, bins=curr_seg_flow_edges, density=True)[0]
ref_flow_pdf[ref_flow_pdf == 0] = TINY_VAL
down_ref_kldiv = entropy(pk=ref_flow_pdf, qk=curr_seg_flow_pdf)
routed['kldiv_down_gauge'].loc[{'seg':curr_seg}] = down_ref_kldiv
elif fill_method == 'kge':
fill_up_isegs = np.where(np.isnan(routed['up_ref_seg'].values))[0]
fill_down_isegs = np.where(np.isnan(routed['down_ref_seg'].values))[0]
routed['kge_up_gauge'] = min_kge + 0.0 * routed['is_gauge']
routed['kge_down_gauge'] = min_kge + 0.0 * routed['is_gauge']
for curr_seg in routed['seg'].values:
up_ref_seg = np.nan
curr_seg_flow = routed[route_var].sel(seg=curr_seg).values
if np.isnan(routed['up_ref_seg'].sel(seg=curr_seg).values):
up_ref_kge, up_ref_seg = find_max_kge(routed[route_var].sel(seg=gauge_segs), curr_seg_flow)
routed['kge_up_gauge'].loc[{'seg':curr_seg}] = up_ref_kge
routed['up_ref_seg'].loc[{'seg':curr_seg}] = up_ref_seg
else:
# this seg has already been filled in, but kge still needs to be calculated
ref_flow = routed[route_var].sel(seg=routed['up_ref_seg'].sel(seg=curr_seg)).values
up_ref_kge = kling_gupta_efficiency(curr_seg_flow, ref_flow)
routed['kge_up_gauge'].loc[{'seg':curr_seg}] = up_ref_kge
for curr_seg in routed['seg'].values:
down_ref_seg = np.nan
curr_seg_flow = routed[route_var].sel(seg=curr_seg).values
if np.isnan(routed['down_ref_seg'].sel(seg=curr_seg).values):
down_ref_kge, down_ref_seg = find_max_kge(routed[route_var].sel(seg=gauge_segs), curr_seg_flow)
routed['kge_down_gauge'].loc[{'seg':curr_seg}] = down_ref_kge
routed['down_ref_seg'].loc[{'seg':curr_seg}] = down_ref_seg
else:
# this seg has already been filled in, but kge still needs to be calculated
ref_flow = routed[route_var].sel(seg=routed['down_ref_seg'].sel(seg=curr_seg)).values
down_ref_kge = kling_gupta_efficiency(curr_seg_flow, ref_flow)
if down_ref_kge < min_kge:
down_ref_kge, down_ref_seg = find_max_kge(routed[route_var].sel(seg=gauge_segs), curr_seg_flow)
routed['kge_down_gauge'].loc[{'seg':curr_seg}] = down_ref_kge
else:
raise ValueError('Invalid method provided for "fill_method"')
return routed
def map_headwater_sites(routed: xr.Dataset):
"""
Boolean identifies whether a river segement is a headwater with 'is_headwater'.
Parameters
----------
routed: xr.Dataset
Contains watershed river segments designated as the dimension 'seg'.
River segments are connected by referencing immediate downstream segments
as 'down_seg' for each 'seg'.
Returns
-------
routed: xr.Dataset
The original routed dataset updated with which sites are headwaters.
"""
if not 'down_seg' in list(routed.var()):
raise Exception("Please denote down segs with 'down_seg'")
routed['is_headwaters'] = False * routed['seg']
headwaters = [s not in routed['down_seg'].values for s in routed['seg'].values]
routed['is_headwaters'].values = headwaters
return routed
def calculate_cdf_blend_factor(routed: xr.Dataset, gauge_reference: xr.Dataset,
gauge_sites=None, fill_method='kldiv', min_kge=-0.41):
"""
Calculates the cumulative distribution function blend factor based on distance
to a seg's nearest up gauge site with respect to the total distance between
the two closest guage sites to the seg.
Parameters
----------
routed: xr.Dataset
Contains flow timeseries data.
gauge_reference: xr.Dataset
Contains reference flow timeseries data for the same watershed
as the routed dataset.
gauge_sites: list, optional
If None, gauge_sites will be taken as all those listed in
gauge_reference.
fill_method: str
See map_ref_sites for full description of how fill_method works.
Because each fill_method selects reference segs differently, calculate_blend_vars
needs to know how they were selected to create blend factors. Note that 'leave_null'
is not supported for this method because there is no filling for this method.
Currently supported:
'forward_fill'
cdf_blend_factor = distance_to_upstream /
(distance_to_upstream + distance_to_downstream)
'kldiv'
cdf_blend_factor = kldiv_upstream / (kldiv_upstream + kldiv_downstream)
'r2'
cdf_blend_factor = r2_upstream / (r2_upstream + r2_downstream)
Returns
-------
routed: xr.Dataset
The original routed dataset updated with 'cdf_blend_factors' used to combine
upstream and downstream relative bias corrections. Each fill_method will also
add or use upstream and downstream statistical measures calculated in map_ref_sites.
"""
if not 'is_gauge' in list(routed.var()):
# needed for walk_up and walk_down
raise Exception("Please denote headwater segs with 'is_headwaters'")
routed['cdf_blend_factor'] = 0 * routed['is_gauge']
if fill_method == 'forward_fill':
routed['distance_to_up_gauge'] = 0 * routed['is_gauge']
routed['distance_to_down_gauge'] = 0 * routed['is_gauge']
routed['distance_to_up_gauge'].values = [walk_up(routed, s)[0] for s in routed['seg']]
routed['distance_to_down_gauge'].values = [walk_down(routed, s)[0] for s in routed['seg']]
routed['cdf_blend_factor'].values = (routed['distance_to_up_gauge']
/ (routed['distance_to_up_gauge']
+ routed['distance_to_down_gauge'])).values
else:
if isinstance(gauge_sites, type(None)):
gauge_sites = gauge_reference['site'].values
else:
# need to typecheck since we do a for loop later and don't
# want to end up iterating through a string by accident
assert isinstance(gauge_sites, list)
if fill_method == 'kldiv':
routed['cdf_blend_factor'].values = (routed['kldiv_down_gauge']
/ (routed['kldiv_up_gauge']
+ routed['kldiv_down_gauge'])).values
elif fill_method == 'r2':
routed['cdf_blend_factor'].values = (routed['r2_up_gauge']
/ (routed['r2_up_gauge']
+ routed['r2_down_gauge'])).values
elif fill_method == 'kge':
# since kge can be negative, the blend factor needs scaling
lower_bound = np.min([routed['kge_up_gauge'], routed['kge_down_gauge']])
upper_bound = np.max([routed['kge_up_gauge'], routed['kge_down_gauge']])
routed['cdf_blend_factor'].values = ((routed['kge_up_gauge'] - lower_bound) / (upper_bound - lower_bound))
routed['cdf_blend_factor'] = routed['cdf_blend_factor'].where(~np.isnan(routed['cdf_blend_factor']), other=0.0)
return routed
def calculate_blend_vars(routed: xr.Dataset, topology: xr.Dataset, reference: xr.Dataset,
gauge_sites = None, route_var = 'IRFroutedRunoff',
fill_method='kldiv', min_kge=-0.41):
"""
Calculates a number of variables used in blendmorph and map_var_to_seg.
Parameters
----------
routed: xr.Dataset
The dataset that will be modified and returned ready for map_var_to_seg.
topology: xr.Dataset
Contains the network topology with a "seg" dimension that identifies reaches,
matching the routed dataset.
reference: xr.Dataset
Contains reaches used for reference with dimension "site" and coordinate "seg".
gauge_sites: list, optional
Contains the gauge site names from the reference dataset to be used that are
automatically pulled from reference if None are given.
route_var: str
Variable name of flows used for fill_method purposes within routed.
This is defaulted as 'IRFroutedRunoff'.
fill_method: str
While finding some upstream/downstream reference segs may be simple,
(segs with 'is_gauge' = True are their own reference segs, others
may be easy to find looking directly up or downstream), some river
networks may have multiple options to select gauge sites and may fail
to have upstream/downstream reference segs designated. 'fill_method'
specifies how segs should be assigned upstream/downstream reference
segs for bias correction if they are missed walking upstream or downstream.
Currently supported methods:
'leave_null'
nothing is done to fill missing reference segs, np.nan values are
replaced with a -1 seg designation and that's it
'forward_fill'
xarray's ffill method is used to fill in any np.nan values
'r2'
reference segs are selected based on which reference site that
seg's flows has the greatest r2 value with
'kldiv' (default)
reference segs are selected based on which reference site that
seg's flows has the smallest KL Divergence value with
'kge'
reference segs are selected based on which reference site that
seg's flows has the greatest KGE value with
min_kge: float
If not None, all upstream/downstream reference seg selections will be filtered
according to the min_kge criteria, where seg selections that have a kge with
the current seg that is less that min_kge will be set to -1 and determined
unsuitable for bias correction. This is defaulted as -0.41.
Returns
-------
routed: xr.Dataset
with the following added:
'is_headwaters'
'is_gauge'
'down_seg'
'distance_to_up_gauge'
'distance_to_down_gauge'
'cdf_blend_factor'
'up_seg'
'up_ref_seg'
'down_ref_seg'
"""
routed = map_segs_topology(routed=routed, topology=topology)
routed = map_headwater_sites(routed=routed)
routed = map_ref_sites(routed=routed, gauge_reference=reference,
gauge_sites=gauge_sites, route_var=route_var,
fill_method=fill_method, min_kge=min_kge)
routed = calculate_cdf_blend_factor(routed=routed, gauge_reference=reference,
gauge_sites=gauge_sites, fill_method=fill_method, min_kge=min_kge)
for seg in routed['seg']:
# if one of the refernece sites has been left null or determined
# non bias correcteable according to the fill methods, then both
# reference sites should be considered so to prevent any weird
# partial bias correction attemps
up_ref_seg = routed['up_ref_seg'].sel(seg=seg)
down_ref_seg = routed['down_ref_seg'].sel(seg=seg)
if up_ref_seg == -1 or down_ref_seg == -1:
routed['up_ref_seg'].loc[{'seg':seg}] = -1
routed['down_ref_seg'].loc[{'seg':seg}] = -1
return routed
def map_var_to_segs(routed: xr.Dataset, map_var: xr.DataArray, var_label: str,
var_key: str, gauge_segs = None):
"""
Splits the variable into its up and down components to be used in blendmorph.
Parameters
----------
routed: xr.Dataset
the dataset that will be modified and returned having been prepared by calculate_blend_vars
with the dimension 'seg'
map_var: xr.DataArray
contains the variable to be split into up and down components and can be
the same as routed, (must also contain the dimension 'seg')
var_label: str
suffix of the up and down parts of the variable
var_key: str
variable name to access the variable to be split in map_var
gauge_segs: list, optional
List of the gauge segs that identify the reaches that are gauge sites, pulled from routed
if None.
----
Returns
-------
routed: xr.Dataset
with the following added:
f'down_{var_label}'
f'up_{var_label}'
"""
down_var = f'down_{var_label}'
up_var = f'up_{var_label}'
# need to override dask array data protections
map_var.load()
routed[down_var] = np.nan * map_var
routed[up_var] = np.nan * map_var
# and need to make certain dimensions all line up
map_var = xr.merge([map_var, routed])[var_key]
for seg in routed['seg'].values:
up_seg = routed['up_ref_seg'].sel(seg=seg)
down_seg = routed['down_ref_seg'].sel(seg=seg)
if up_seg != -1:
routed[up_var].loc[{'seg': seg}] = map_var.sel(seg=up_seg).values[:]
if down_seg != -1:
routed[down_var].loc[{'seg': seg}] = map_var.sel(seg=down_seg).values[:]
return routed
def map_met_hru_to_seg(met_hru, topo):
"""
Maps meterological data from hru to seg.
Parameters
----------
met_hru: xr.Dataset
A dataset of meteorological data to be mapped
onto the stream segments to facilitate conditioning.
All variables in this dataset will automatically be mapped
onto the stream segments and returned.
topo: xr.Dataset
Topology dataset for running mizuRoute.
We expect this to have ``seg`` and ``hru`` dimensions.
Returns
-------
met_seg: xr.Dataset
A dataset of meterological data mapped onto the stream
segments to facilitate conditioning.
"""
hru_2_seg = topo['seg_hru_id'].values
met_vars = set(met_hru.variables.keys()) - set(met_hru.coords)
# Prep met data structures
met_seg = xr.Dataset({'time': met_hru['time']})
for v in met_vars:
met_seg[v] = xr.DataArray(data=np.nan, dims=('time', 'seg', ),
coords={'time': met_hru['time'], 'seg': topo['seg']})
# Map from hru -> segment for met data
# In case a mapping doesn't exist to all segments,
# we define a neighborhood search to spatially average
null_neighborhood = [-3, -2, -1, 0, 1, 2, 3]
for var in met_vars:
for seg in met_seg['seg'].values:
subset = np.argwhere(hru_2_seg == seg).flatten()
# First fallback, search in the null_neighborhood
if not len(subset):
subset = np.hstack([np.argwhere(hru_2_seg == seg-offset).flatten()
for offset in null_neighborhood])
# Second fallback, use domain average
if not len(subset):
subset = met_hru['hru'].values
met_seg[var].loc[{'seg': seg}] = met_hru[var].isel(hru=subset).mean(dim='hru')
return met_seg
def to_bmorph(topo: xr.Dataset, routed: xr.Dataset, reference: xr.Dataset,
met_hru: xr.Dataset=None, route_var: str='IRFroutedRunoff',
fill_method = 'r2', min_kge=None):
'''
Prepare mizuroute output for bias correction via the blendmorph algorithm. This
allows an optional dataset of hru meteorological data to be given for conditional
bias correction.
Parameters
----------
topo: xr.Dataset
Topology dataset for running mizuRoute.
We expect this to have ``seg`` and ``hru`` dimensions.
routed: xr.Dataset
The initially routed dataset from mizuRoute.
reference: xr.Dataset
A dataset containing reference flows for bias correction.
We expect this to have ``site`` and ``time`` dimensions with
flows being stored in ``reference_flow``.
met_hru: xr.Dataset, optional
A dataset of meteorological data to be mapped
onto the stream segments to facilitate conditioning.
All variables in this dataset will automatically be mapped
onto the stream segments and returned.
route_var: str
Name of the variable of the routed runoff in the ``routed``
dataset. Defaults to ``IRFroutedRunoff``.
fill_method: str
While finding some upstream/downstream reference segs may be simple,
(segs with 'is_gauge' = True are their own reference segs, others
may be easy to find looking directly up or downstream), some river
networks may have multiple options to select gauge sites and may fail
to have upstream/downstream reference segs designated. 'fill_method'
specifies how segs should be assigned upstream/downstream reference
segs for bias correction if they are missed walking upstream or downstream.
Currently supported methods:
'leave_null'
nothing is done to fill missing reference segs, np.nan values are
replaced with a -1 seg designation and that's it
'forward_fill'
xarray's ffill method is used to fill in any np.nan values
'r2'
reference segs are selected based on which reference site that
seg's flows has the greatest r2 value with
'kldiv' (default)
reference segs are selected based on which reference site that
seg's flows has the smallest KL Divergence value with
'kge'
reference segs are selected based on which reference site that
seg's flows has the greatest KGE value with
min_kge: float, optional
See calculate_blend_vars for more information
defaults None unless fill_method = 'kge'.
Returns
-------
met_seg:
A dataset with the required data for applying the ``blendmorph``
routines. See the ``blendmorph`` documentation for further information.
'''
if fill_method == 'kge' and min_kge is None:
min_kge = -0.41
if met_hru is None:
met_hru = xr.Dataset(coords={'time': routed['time']})
# Provide some convenience data for mapping/loops
ref_sites = list(reference['site'].values)
ref_segs = list(reference['seg'].values)
hru_2_seg = topo['seg_hru_id'].values
met_vars = set(met_hru.variables.keys()) - set(met_hru.coords)
# Remap any meteorological data from hru to stream segment
met_seg = map_met_hru_to_seg(met_hru, topo)
# Get the longest overlapping time period between all datasets
routed = calculate_blend_vars(routed, topo, reference, route_var = route_var,
fill_method = fill_method)
# Put all data on segments
seg_ref = xr.Dataset({'reference_flow':(('time','seg'), reference['reference_flow'].values)},
coords = {'time': reference['time'].values, 'seg': ref_segs},)
routed = map_var_to_segs(routed, routed[route_var], 'raw_flow', route_var)
routed = map_var_to_segs(routed, seg_ref['reference_flow'], 'ref_flow', 'reference_flow')
for v in met_vars:
routed = map_var_to_segs(routed, met_seg[v], v, v)
# Merge it all together
met_seg = xr.merge([met_seg, routed])
return met_seg
|
|
from ucsmsdk.ucsexception import UcsException
import re, sys
# given an array and a string of numbers, make sure they are all in the array:
#
def check_values(array, csv):
indexes = csv.split(',')
for i in indexes:
try:
i = int(i) - 1
except:
print "bad value: " + i
return False
if i < 0 or i > len(array) - 1:
return False
return True
# get the available servers to put in the pool.
def select_kube_servers(handle):
from ucsmsdk.mometa.compute.ComputeRackUnit import ComputeRackUnit
from ucsmsdk.mometa.fabric.FabricComputeSlotEp import FabricComputeSlotEp
print "Listing Available UCS Servers"
filter_string = '(presence, "equipped")'
# get blades
blades = handle.query_classid("fabricComputeSlotEp", filter_string)
# get all connected rack mount servers.
servers = handle.query_classid("computeRackUnit")
m = blades + servers
while True:
for i, s in enumerate(m):
if type(s) is FabricComputeSlotEp:
print "[%d]: Blade %s/%s type %s" % (i+1, s.chassis_id, s.rn, s.model)
if type(s) is ComputeRackUnit:
print "[%d]: Rack %s type %s" % (i+1, s.rn, s.model)
vals = raw_input("(E.g.: 2,4,8): ")
if check_values(m, vals) == True:
k8servers = [m[int(x)-1] for x in vals.split(',')]
print "Install Kubernetes on the following servers:"
for s in k8servers:
if type(s) is FabricComputeSlotEp:
print "\tBlade %s/%s type %s" % (s.chassis_id, s.rn, s.model)
if type(s) is ComputeRackUnit:
print "\tServer %s type %s" % (s.rn, s.model)
yn = raw_input("Is this correct? [N/y]: ")
if yn == "y" or yn == "Y":
return k8servers
def createKubeBootPolicy(handle):
print "Creating Kube Boot Policy"
from ucsmsdk.mometa.lsboot.LsbootPolicy import LsbootPolicy
from ucsmsdk.mometa.lsboot.LsbootVirtualMedia import LsbootVirtualMedia
from ucsmsdk.mometa.lsboot.LsbootStorage import LsbootStorage
from ucsmsdk.mometa.lsboot.LsbootLocalStorage import LsbootLocalStorage
from ucsmsdk.mometa.lsboot.LsbootDefaultLocalImage import LsbootDefaultLocalImage
mo = LsbootPolicy(parent_mo_or_dn="org-root", name="kube", descr="Kuberenetes", reboot_on_update="yes", policy_owner="local", enforce_vnic_name="yes", boot_mode="legacy")
mo_1 = LsbootVirtualMedia(parent_mo_or_dn=mo, access="read-only-remote-cimc", lun_id="0", order="2")
mo_2 = LsbootStorage(parent_mo_or_dn=mo, order="1")
mo_2_1 = LsbootLocalStorage(parent_mo_or_dn=mo_2, )
mo_2_1_1 = LsbootDefaultLocalImage(parent_mo_or_dn=mo_2_1, order="1")
handle.add_mo(mo, modify_present=True)
try:
handle.commit()
except UcsException as err:
if err.error_code == "103":
print "\talready exists"
def deleteKubeBootPolicy(handle):
mo = handle.query_dn("org-root/boot-policy-kube")
try:
handle.remove_mo(mo)
handle.commit()
except AttributeError:
print "\talready deleted"
def createKubeLocalDiskPolicy(handle):
print "Creating Kube Local Disk Policy"
from ucsmsdk.mometa.storage.StorageLocalDiskConfigPolicy import StorageLocalDiskConfigPolicy
mo = StorageLocalDiskConfigPolicy(parent_mo_or_dn="org-root", protect_config="no", name="kube", descr="Kubernetes", flex_flash_raid_reporting_state="disable", flex_flash_state="disable", policy_owner="local", mode="raid-mirrored")
handle.add_mo(mo)
try:
handle.commit()
except UcsException as err:
if err.error_code == "103":
print "\talready exists"
def deleteKubeLocalDiskPolicy(handle):
print "Deleting Kube Local Disk Policy"
mo = handle.query_dn("org-root/local-disk-config-kube")
try:
handle.remove_mo(mo)
handle.commit()
except AttributeError:
print "\talready deleted"
def createKubeUUIDPools(handle):
print "Creating Kube UUID Pools"
from ucsmsdk.mometa.uuidpool.UuidpoolPool import UuidpoolPool
from ucsmsdk.mometa.uuidpool.UuidpoolBlock import UuidpoolBlock
mo = UuidpoolPool(parent_mo_or_dn="org-root", policy_owner="local", prefix="derived", descr="Kubernetes Pool", assignment_order="default", name="kube")
mo_1 = UuidpoolBlock(parent_mo_or_dn=mo, to="C888-888888888100", r_from="C888-888888888001")
handle.add_mo(mo)
try:
handle.commit()
except UcsException as err:
if err.error_code == "103":
print "\talready exists"
def deleteKubeUUIDPools(handle):
print "Deleting Kube UUID Pool"
mo = handle.query_dn("org-root/uuid-pool-kube")
try:
handle.remove_mo(mo)
handle.commit()
except AttributeError:
print "\talready deleted"
def createKubeServerPool(handle):
print "Creating Kubernetes Compute Pool"
from ucsmsdk.mometa.compute.ComputePool import ComputePool
mo = ComputePool(parent_mo_or_dn="org-root", policy_owner="local", name="Kubernetes", descr="")
handle.add_mo(mo)
try:
handle.commit()
except UcsException as err:
if err.error_code == "103":
print "\talready exists"
def addServersToKubePool(handle, servers):
print "Adding servers to Kubernetes Pool"
from ucsmsdk.mometa.compute.ComputePool import ComputePool
from ucsmsdk.mometa.compute.ComputePooledSlot import ComputePooledSlot
from ucsmsdk.mometa.compute.ComputePooledRackUnit import ComputePooledRackUnit
from ucsmsdk.mometa.compute.ComputeRackUnit import ComputeRackUnit
from ucsmsdk.mometa.fabric.FabricComputeSlotEp import FabricComputeSlotEp
mo = ComputePool(parent_mo_or_dn="org-root", policy_owner="local", name="Kubernetes", descr="")
for s in servers:
if type(s) is FabricComputeSlotEp:
ComputePooledSlot(parent_mo_or_dn=mo, slot_id=re.sub("slot-","", s.slot_id), chassis_id=str(s.chassis_id))
if type(s) is ComputeRackUnit:
ComputePooledRackUnit(parent_mo_or_dn=mo, id=re.sub("rack-unit-","", s.rn))
handle.add_mo(mo, True)
try:
handle.commit()
except UcsException as err:
if err.error_code == "103":
print "\talready exists"
def deleteKubeServerPool(handle):
print "Deleting Kubernetes Compute Pool"
mo = handle.query_dn("org-root/compute-pool-Kubernetes")
try:
handle.remove_mo(mo)
handle.commit()
except AttributeError:
print "\talready deleted"
def createServiceProfileTemplate(handle):
print "Creating Kubernetes Service Profile Template"
from ucsmsdk.mometa.ls.LsServer import LsServer
from ucsmsdk.mometa.vnic.VnicConnDef import VnicConnDef
from ucsmsdk.mometa.ls.LsRequirement import LsRequirement
from ucsmsdk.mometa.lstorage.LstorageProfileBinding import LstorageProfileBinding
mo = LsServer(parent_mo_or_dn="org-root",
policy_owner="local",
name="Kubernetes",
descr="Kubernetes Service Profile",
type="updating-template",
# Boot using Kubernetes Boot policy: local Disk, then Remote DVD
boot_policy_name="kube",
# Default Maintenance Policy
maint_policy_name="default",
# scrub policy
scrub_policy_name="kube",
# UUID Pool
ident_pool_name="kube",
# disks we use.
#local_disk_policy_name="kube",
#storage_profile_name="kube",
# virtual media policy
vmedia_policy_name="kube"
)
# create vNIC Connection Policy
VnicConnDef(parent_mo_or_dn=mo,
lan_conn_policy_name="kube")
# create server pool and add to template.
LsRequirement(parent_mo_or_dn=mo, name="Kubernetes")
# add storage profile.
mo_1 = LstorageProfileBinding(parent_mo_or_dn=mo, storage_profile_name="kube")
handle.add_mo(mo, True)
try:
handle.commit()
except UcsException as err:
if err.error_code == "103":
print "\talready exists"
except Exception:
print Exception
def deleteServiceProfileTemplate(handle):
print "Deleting Kubernetes Service Profile Template"
print "Deleting Kubernetes Compute Pool"
mo = handle.query_dn("org-root/ls-Kubernetes")
try:
handle.remove_mo(mo)
handle.commit()
except AttributeError:
print "\talready deleted"
def createServers(handle, servers):
print "Creating Kubernetes Service Profiles"
from ucsmsdk.ucsmethodfactory import ls_instantiate_n_named_template
from ucsmsdk.ucsbasetype import DnSet, Dn
for i, s in enumerate(servers):
dn_set = DnSet()
dn = Dn()
sp_name = "kube0%d" % (i+1)
dn.attr_set("value",sp_name)
dn_set.child_add(dn)
elem = ls_instantiate_n_named_template(cookie=handle.cookie,
dn="org-root/ls-Kubernetes",
in_error_on_existing="true",
in_name_set=dn_set,
in_target_org="org-root",
in_hierarchical="false")
try:
mo_list = handle.process_xml_elem(elem)
except UcsException as err:
if err.error_code == "105":
print "\t" + sp_name + " already exists."
else:
print err
def deleteServers(handle):
print "Deleting Kubernetes Nodes"
filter_string = '(dn, "ls-kube[0-9]+", type="re")'
kube = handle.query_classid("lsServer", filter_string)
for k in kube:
print "Deleting " + k.name
handle.remove_mo(k)
try:
handle.commit()
except AttributeError:
print "\talready deleted"
except UcsException as err:
print "\t"+ k.name + ": " + err.error_descr
def createKubeVirtualMedia(handle):
print "Adding Virtual Media Policy"
from urlparse import urlparse
import os.path
yn = False
url = ""
while yn == False:
print "What is the URL for the Boot ISO image?"
url = raw_input("(E.g.: http://192.168.2.2/kubam/centos7.2-boot.iso) : ")
print "You entered: " + url
yn = raw_input("Is this correct? [y/N]: ")
if yn != "y":
yn = False
o = urlparse(url)
paths = os.path.split(o.path)
scheme = o.scheme # http, https
if scheme == "":
scheme = "http"
filename = paths[-1]
address = o.hostname
path = "/".join(paths[:-1])
name = ".".join(paths[-1].split(".")[:-1])
from ucsmsdk.mometa.cimcvmedia.CimcvmediaMountConfigPolicy import CimcvmediaMountConfigPolicy
from ucsmsdk.mometa.cimcvmedia.CimcvmediaConfigMountEntry import CimcvmediaConfigMountEntry
mo = CimcvmediaMountConfigPolicy(name="kube",
retry_on_mount_fail="yes",
parent_mo_or_dn="org-root",
policy_owner="local",
descr="Kubernetes Boot Media")
mo_1 = CimcvmediaConfigMountEntry(parent_mo_or_dn=mo,
mapping_name=name,
device_type="cdd",
mount_protocol=scheme,
remote_ip_address=address,
image_name_variable="none",
image_file_name=filename,
image_path=path)
mo_2 = CimcvmediaConfigMountEntry(parent_mo_or_dn=mo,
mapping_name="kickstartImage",
device_type="hdd",
mount_protocol=scheme,
remote_ip_address=address,
image_name_variable="service-profile-name",
image_path=path)
handle.add_mo(mo, modify_present=True)
try:
handle.commit()
except UcsException as err:
if err.error_code == "103":
print "\talready exists"
def deleteVirtualMedia(handle):
print "Deleting Kubernetes Virtual Media Policy"
mo = handle.query_dn("org-root/mnt-cfg-policy-kube")
try:
handle.remove_mo(mo)
handle.commit()
except AttributeError:
print "\talready deleted"
def createScrubPolicy(handle):
from ucsmsdk.mometa.compute.ComputeScrubPolicy import ComputeScrubPolicy
mo = ComputeScrubPolicy(flex_flash_scrub="no",
parent_mo_or_dn="org-root",
name="kube",
disk_scrub="yes",
bios_settings_scrub="no",
descr="Destroy data when SP is unassociated")
handle.add_mo(mo, modify_present=True)
try:
handle.commit()
except UcsException as err:
if err.error_code == "103":
print "\talready exists"
def deleteScrubPolicy(handle):
print "Deleting Kubernetes Scrub Policy"
mo = handle.query_dn("org-root/scrub-kube")
try:
handle.remove_mo(mo)
handle.commit()
except AttributeError:
print "\talready deleted"
def deleteDiskGroupConfig(handle):
print "Deleting Disk Group config"
mo = handle.query_dn("org-root/disk-group-config-Kube_Boot")
try:
handle.remove_mo(mo)
handle.commit()
except AttributeError:
print "\talready deleted"
def deleteStorageProfile(handle):
print "Deleting Storage Profile"
mo = handle.query_dn("org-root/profile-kube")
try:
handle.remove_mo(mo)
handle.commit()
except AttributeError:
print "\talready deleted"
def createDiskGroupConfig(handle):
print "Adding Disk Group Config"
from ucsmsdk.mometa.lstorage.LstorageDiskGroupConfigPolicy import LstorageDiskGroupConfigPolicy
from ucsmsdk.mometa.lstorage.LstorageDiskGroupQualifier import LstorageDiskGroupQualifier
from ucsmsdk.mometa.lstorage.LstorageVirtualDriveDef import LstorageVirtualDriveDef
mo = LstorageDiskGroupConfigPolicy(parent_mo_or_dn="org-root",
policy_owner="local",
name="kube_boot",
descr="Kubernetes Boot Disk",
raid_level="mirror")
mo_1 = LstorageDiskGroupQualifier(parent_mo_or_dn=mo,
use_remaining_disks="no",
num_ded_hot_spares="unspecified",
drive_type="unspecified",
num_drives="2",
min_drive_size="unspecified",
num_glob_hot_spares="unspecified")
mo_2 = LstorageVirtualDriveDef(parent_mo_or_dn=mo, read_policy="platform-default",
drive_cache="platform-default",
strip_size="platform-default",
io_policy="platform-default",
write_cache_policy="platform-default",
access_policy="platform-default")
handle.add_mo(mo, modify_present=True)
try:
handle.commit()
except UcsException as err:
if err.error_code == "103":
print "\talready exists"
def createStorageProfile(handle):
from ucsmsdk.mometa.lstorage.LstorageProfile import LstorageProfile
from ucsmsdk.mometa.lstorage.LstorageDasScsiLun import LstorageDasScsiLun
mo = LstorageProfile(parent_mo_or_dn="org-root",
policy_owner="local",
name="kube",
descr="Kubernetes Storage Profile")
mo_1 = LstorageDasScsiLun(parent_mo_or_dn=mo,
local_disk_policy_name="kube_boot",
auto_deploy="auto-deploy",
expand_to_avail="yes",
lun_map_type="non-shared",
size="1",
fractional_size="0",
admin_state="online",
deferred_naming="no",
order="not-applicable",
name="KubeLUN")
handle.add_mo(mo, modify_present=True)
try:
handle.commit()
except UcsException as err:
if err.error_code == "103":
print "\talready exists"
def createKubeServers(handle, org):
createKubeBootPolicy(handle)
#createKubeLocalDiskPolicy(handle)
createDiskGroupConfig(handle)
createStorageProfile(handle)
createScrubPolicy(handle)
createKubeUUIDPools(handle)
createKubeServerPool(handle)
createKubeVirtualMedia(handle)
servers = select_kube_servers(handle)
addServersToKubePool(handle, servers)
createServiceProfileTemplate(handle)
createServers(handle, servers)
def deleteKubeServers(handle, org):
deleteServers(handle)
deleteServiceProfileTemplate(handle)
deleteKubeServerPool(handle)
deleteVirtualMedia(handle)
deleteScrubPolicy(handle)
deleteKubeBootPolicy(handle)
deleteStorageProfile(handle)
deleteDiskGroupConfig(handle)
#deleteKubeLocalDiskPolicy(handle)
deleteKubeUUIDPools(handle)
|
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova import block_device
from nova import context
from nova.openstack.common import jsonutils
from nova import test
from nova.tests import matchers
from nova.virt import block_device as driver_block_device
from nova.virt import driver
from nova.volume import cinder
from nova.volume import encryptors
class TestDriverBlockDevice(test.NoDBTestCase):
driver_classes = {
'swap': driver_block_device.DriverSwapBlockDevice,
'ephemeral': driver_block_device.DriverEphemeralBlockDevice,
'volume': driver_block_device.DriverVolumeBlockDevice,
'snapshot': driver_block_device.DriverSnapshotBlockDevice,
'image': driver_block_device.DriverImageBlockDevice
}
swap_bdm = block_device.BlockDeviceDict(
{'id': 1, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sdb1',
'source_type': 'blank',
'destination_type': 'local',
'delete_on_termination': True,
'guest_format': 'swap',
'disk_bus': 'scsi',
'volume_size': 2,
'boot_index': -1})
swap_driver_bdm = {
'device_name': '/dev/sdb1',
'swap_size': 2,
'disk_bus': 'scsi'}
swap_legacy_driver_bdm = {
'device_name': '/dev/sdb1',
'swap_size': 2}
ephemeral_bdm = block_device.BlockDeviceDict(
{'id': 2, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sdc1',
'source_type': 'blank',
'destination_type': 'local',
'disk_bus': 'scsi',
'device_type': 'disk',
'volume_size': 4,
'guest_format': 'ext4',
'delete_on_termination': True,
'boot_index': -1})
ephemeral_driver_bdm = {
'device_name': '/dev/sdc1',
'size': 4,
'device_type': 'disk',
'guest_format': 'ext4',
'disk_bus': 'scsi'}
ephemeral_legacy_driver_bdm = {
'device_name': '/dev/sdc1',
'size': 4,
'virtual_name': 'ephemeral0',
'num': 0}
volume_bdm = block_device.BlockDeviceDict(
{'id': 3, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sda1',
'source_type': 'volume',
'disk_bus': 'scsi',
'device_type': 'disk',
'volume_size': 8,
'destination_type': 'volume',
'volume_id': 'fake-volume-id-1',
'guest_format': 'ext4',
'connection_info': '{"fake": "connection_info"}',
'delete_on_termination': False,
'boot_index': 0})
volume_driver_bdm = {
'mount_device': '/dev/sda1',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': False,
'disk_bus': 'scsi',
'device_type': 'disk',
'guest_format': 'ext4',
'boot_index': 0}
volume_legacy_driver_bdm = {
'mount_device': '/dev/sda1',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': False}
snapshot_bdm = block_device.BlockDeviceDict(
{'id': 4, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sda2',
'delete_on_termination': True,
'volume_size': 3,
'disk_bus': 'scsi',
'device_type': 'disk',
'source_type': 'snapshot',
'destination_type': 'volume',
'connection_info': '{"fake": "connection_info"}',
'snapshot_id': 'fake-snapshot-id-1',
'volume_id': 'fake-volume-id-2',
'boot_index': -1})
snapshot_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True,
'disk_bus': 'scsi',
'device_type': 'disk',
'guest_format': None,
'boot_index': -1}
snapshot_legacy_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True}
image_bdm = block_device.BlockDeviceDict(
{'id': 5, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sda2',
'delete_on_termination': True,
'volume_size': 1,
'disk_bus': 'scsi',
'device_type': 'disk',
'source_type': 'image',
'destination_type': 'volume',
'connection_info': '{"fake": "connection_info"}',
'image_id': 'fake-image-id-1',
'volume_id': 'fake-volume-id-2',
'boot_index': -1})
image_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True,
'disk_bus': 'scsi',
'device_type': 'disk',
'guest_format': None,
'boot_index': -1}
image_legacy_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True}
def setUp(self):
super(TestDriverBlockDevice, self).setUp()
self.volume_api = self.mox.CreateMock(cinder.API)
self.virt_driver = self.mox.CreateMock(driver.ComputeDriver)
self.context = context.RequestContext('fake_user',
'fake_project')
def test_no_device_raises(self):
for name, cls in self.driver_classes.items():
self.assertRaises(driver_block_device._NotTransformable,
cls, {'no_device': True})
def _test_driver_device(self, name):
db_bdm = getattr(self, "%s_bdm" % name)
test_bdm = self.driver_classes[name](db_bdm)
self.assertThat(test_bdm, matchers.DictMatches(
getattr(self, "%s_driver_bdm" % name)))
for k, v in db_bdm.iteritems():
field_val = getattr(test_bdm._bdm_obj, k)
if isinstance(field_val, bool):
v = bool(v)
self.assertEqual(field_val, v)
self.assertThat(test_bdm.legacy(),
matchers.DictMatches(
getattr(self, "%s_legacy_driver_bdm" % name)))
# Test passthru attributes
for passthru in test_bdm._proxy_as_attr:
self.assertEqual(getattr(test_bdm, passthru),
getattr(test_bdm._bdm_obj, passthru))
for no_pass in set(db_bdm.keys()) - test_bdm._proxy_as_attr:
self.assertRaises(AttributeError, getattr, test_bdm, no_pass)
# Make sure that all others raise _invalidType
for other_name, cls in self.driver_classes.iteritems():
if other_name == name:
continue
self.assertRaises(driver_block_device._InvalidType,
cls,
getattr(self, '%s_bdm' % name))
# Test the save method
with mock.patch.object(test_bdm._bdm_obj, 'save') as save_mock:
test_bdm.save(self.context)
for fld, alias in test_bdm._update_on_save.iteritems():
self.assertEqual(test_bdm[alias or fld],
getattr(test_bdm._bdm_obj, fld))
save_mock.assert_called_once_with(self.context)
def _test_driver_default_size(self, name):
size = 'swap_size' if name == 'swap' else 'size'
no_size_bdm = getattr(self, "%s_bdm" % name).copy()
no_size_bdm['volume_size'] = None
driver_bdm = self.driver_classes[name](no_size_bdm)
self.assertEqual(driver_bdm[size], 0)
del no_size_bdm['volume_size']
driver_bdm = self.driver_classes[name](no_size_bdm)
self.assertEqual(driver_bdm[size], 0)
def test_driver_swap_block_device(self):
self._test_driver_device("swap")
def test_driver_swap_default_size(self):
self._test_driver_default_size('swap')
def test_driver_ephemeral_block_device(self):
self._test_driver_device("ephemeral")
def test_driver_ephemeral_default_size(self):
self._test_driver_default_size('ephemeral')
def test_driver_volume_block_device(self):
self._test_driver_device("volume")
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
self.assertEqual(test_bdm['connection_info'],
jsonutils.loads(test_bdm._bdm_obj.connection_info))
self.assertEqual(test_bdm._bdm_obj.id, 3)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-1')
self.assertEqual(test_bdm.volume_size, 8)
def test_driver_snapshot_block_device(self):
self._test_driver_device("snapshot")
test_bdm = self.driver_classes['snapshot'](
self.snapshot_bdm)
self.assertEqual(test_bdm._bdm_obj.id, 4)
self.assertEqual(test_bdm.snapshot_id, 'fake-snapshot-id-1')
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
self.assertEqual(test_bdm.volume_size, 3)
def test_driver_image_block_device(self):
self._test_driver_device('image')
test_bdm = self.driver_classes['image'](
self.image_bdm)
self.assertEqual(test_bdm._bdm_obj.id, 5)
self.assertEqual(test_bdm.image_id, 'fake-image-id-1')
self.assertEqual(test_bdm.volume_size, 1)
def test_driver_image_block_device_destination_local(self):
self._test_driver_device('image')
bdm = self.image_bdm.copy()
bdm['destination_type'] = 'local'
self.assertRaises(driver_block_device._InvalidType,
self.driver_classes['image'], bdm)
def _test_volume_attach(self, driver_bdm, bdm_dict,
fake_volume, check_attach=True,
fail_check_attach=False, driver_attach=False,
fail_driver_attach=False):
elevated_context = self.context.elevated()
self.stubs.Set(self.context, 'elevated',
lambda: elevated_context)
self.mox.StubOutWithMock(driver_bdm._bdm_obj, 'save')
self.mox.StubOutWithMock(encryptors, 'get_encryption_metadata')
instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
connector = {'ip': 'fake_ip', 'host': 'fake_host'}
connection_info = {'data': {}}
expected_conn_info = {'data': {},
'serial': fake_volume['id']}
enc_data = {'fake': 'enc_data'}
self.volume_api.get(self.context,
fake_volume['id']).AndReturn(fake_volume)
if check_attach:
if not fail_check_attach:
self.volume_api.check_attach(self.context, fake_volume,
instance=instance).AndReturn(None)
else:
self.volume_api.check_attach(self.context, fake_volume,
instance=instance).AndRaise(
test.TestingException)
return instance, expected_conn_info
self.virt_driver.get_volume_connector(instance).AndReturn(connector)
self.volume_api.initialize_connection(
elevated_context, fake_volume['id'],
connector).AndReturn(connection_info)
if driver_attach:
encryptors.get_encryption_metadata(
elevated_context, self.volume_api, fake_volume['id'],
connection_info).AndReturn(enc_data)
if not fail_driver_attach:
self.virt_driver.attach_volume(
elevated_context, expected_conn_info, instance,
bdm_dict['device_name'],
encryption=enc_data).AndReturn(None)
else:
self.virt_driver.attach_volume(
elevated_context, expected_conn_info, instance,
bdm_dict['device_name'],
encryption=enc_data).AndRaise(test.TestingException)
self.volume_api.terminate_connection(
elevated_context, fake_volume['id'],
expected_conn_info).AndReturn(None)
return instance, expected_conn_info
self.volume_api.attach(elevated_context, fake_volume['id'],
'fake_uuid', bdm_dict['device_name']).AndReturn(None)
driver_bdm._bdm_obj.save(self.context).AndReturn(None)
return instance, expected_conn_info
def test_volume_attach(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1'}
instance, expected_conn_info = self._test_volume_attach(
test_bdm, self.volume_bdm, volume)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance,
self.volume_api, self.virt_driver)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def check_volume_attach_check_attach_fails(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1'}
instance, _ = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, fail_check_attach=True)
self.mox.ReplayAll()
self.asserRaises(test.TestingException, test_bdm.attach, self.context,
instance, self.volume_api, self.virt_driver)
def test_volume_attach_no_check_driver_attach(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1'}
instance, expected_conn_info = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, check_attach=False,
driver_attach=True)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance,
self.volume_api, self.virt_driver,
do_check_attach=False, do_driver_attach=True)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def check_volume_attach_driver_attach_fails(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1'}
instance, _ = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, fail_check_attach=True)
self.mox.ReplayAll()
self.asserRaises(test.TestingException, test_bdm.attach, self.context,
instance, self.volume_api, self.virt_driver,
do_driver_attach=True)
def test_refresh_connection(self):
test_bdm = self.driver_classes['snapshot'](
self.snapshot_bdm)
instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
connector = {'ip': 'fake_ip', 'host': 'fake_host'}
connection_info = {'data': {}}
expected_conn_info = {'data': {},
'serial': 'fake-volume-id-2'}
self.mox.StubOutWithMock(test_bdm._bdm_obj, 'save')
self.virt_driver.get_volume_connector(instance).AndReturn(connector)
self.volume_api.initialize_connection(
self.context, test_bdm.volume_id,
connector).AndReturn(connection_info)
test_bdm._bdm_obj.save(self.context).AndReturn(None)
self.mox.ReplayAll()
test_bdm.refresh_connection_info(self.context, instance,
self.volume_api, self.virt_driver)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def test_snapshot_attach_no_volume(self):
no_volume_snapshot = self.snapshot_bdm.copy()
no_volume_snapshot['volume_id'] = None
test_bdm = self.driver_classes['snapshot'](no_volume_snapshot)
snapshot = {'id': 'fake-snapshot-id-1'}
volume = {'id': 'fake-volume-id-2'}
wait_func = self.mox.CreateMockAnything()
self.volume_api.get_snapshot(self.context,
'fake-snapshot-id-1').AndReturn(snapshot)
self.volume_api.create(self.context, 3,
'', '', snapshot).AndReturn(volume)
wait_func(self.context, 'fake-volume-id-2').AndReturn(None)
instance, expected_conn_info = self._test_volume_attach(
test_bdm, no_volume_snapshot, volume)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver, wait_func)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
def test_snapshot_attach_volume(self):
test_bdm = self.driver_classes['snapshot'](
self.snapshot_bdm)
instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
volume_class = self.driver_classes['volume']
self.mox.StubOutWithMock(volume_class, 'attach')
# Make sure theses are not called
self.mox.StubOutWithMock(self.volume_api, 'get_snapshot')
self.mox.StubOutWithMock(self.volume_api, 'create')
volume_class.attach(self.context, instance, self.volume_api,
self.virt_driver).AndReturn(None)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
def test_image_attach_no_volume(self):
no_volume_image = self.image_bdm.copy()
no_volume_image['volume_id'] = None
test_bdm = self.driver_classes['image'](no_volume_image)
image = {'id': 'fake-image-id-1'}
volume = {'id': 'fake-volume-id-2'}
wait_func = self.mox.CreateMockAnything()
self.volume_api.create(self.context, 1,
'', '', image_id=image['id']).AndReturn(volume)
wait_func(self.context, 'fake-volume-id-2').AndReturn(None)
instance, expected_conn_info = self._test_volume_attach(
test_bdm, no_volume_image, volume)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver, wait_func)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
def test_image_attach_volume(self):
test_bdm = self.driver_classes['image'](
self.image_bdm)
instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
volume_class = self.driver_classes['volume']
self.mox.StubOutWithMock(volume_class, 'attach')
# Make sure theses are not called
self.mox.StubOutWithMock(self.volume_api, 'get_snapshot')
self.mox.StubOutWithMock(self.volume_api, 'create')
volume_class.attach(self.context, instance, self.volume_api,
self.virt_driver).AndReturn(None)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
def test_convert_block_devices(self):
converted = driver_block_device._convert_block_devices(
self.driver_classes['volume'],
[self.volume_bdm, self.ephemeral_bdm])
self.assertEqual(converted, [self.volume_driver_bdm])
def test_legacy_block_devices(self):
test_snapshot = self.driver_classes['snapshot'](
self.snapshot_bdm)
block_device_mapping = [test_snapshot, test_snapshot]
legacy_bdm = driver_block_device.legacy_block_devices(
block_device_mapping)
self.assertEqual(legacy_bdm, [self.snapshot_legacy_driver_bdm,
self.snapshot_legacy_driver_bdm])
# Test that the ephemerals work as expected
test_ephemerals = [self.driver_classes['ephemeral'](
self.ephemeral_bdm) for _ in xrange(2)]
expected = [self.ephemeral_legacy_driver_bdm.copy()
for _ in xrange(2)]
expected[0]['virtual_name'] = 'ephemeral0'
expected[0]['num'] = 0
expected[1]['virtual_name'] = 'ephemeral1'
expected[1]['num'] = 1
legacy_ephemerals = driver_block_device.legacy_block_devices(
test_ephemerals)
self.assertEqual(expected, legacy_ephemerals)
def test_get_swap(self):
swap = [self.swap_driver_bdm]
legacy_swap = [self.swap_legacy_driver_bdm]
no_swap = [self.volume_driver_bdm]
self.assertEqual(swap[0], driver_block_device.get_swap(swap))
self.assertEqual(legacy_swap[0],
driver_block_device.get_swap(legacy_swap))
self.assertEqual(no_swap, driver_block_device.get_swap(no_swap))
self.assertIsNone(driver_block_device.get_swap([]))
def test_is_implemented(self):
for bdm in (self.image_bdm, self.volume_bdm, self.swap_bdm,
self.ephemeral_bdm, self.snapshot_bdm):
self.assertTrue(driver_block_device.is_implemented(bdm))
local_image = self.image_bdm.copy()
local_image['destination_type'] = 'local'
self.assertFalse(driver_block_device.is_implemented(local_image))
|
|
# Copyright 2020 kubeflow.org.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
KFServing
Python SDK for KFServing # noqa: E501
The version of the OpenAPI document: v0.1
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kfserving.configuration import Configuration
class V1beta1LightGBMSpec(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'args': 'list[str]',
'command': 'list[str]',
'env': 'list[V1EnvVar]',
'env_from': 'list[V1EnvFromSource]',
'image': 'str',
'image_pull_policy': 'str',
'lifecycle': 'V1Lifecycle',
'liveness_probe': 'V1Probe',
'name': 'str',
'ports': 'list[V1ContainerPort]',
'protocol_version': 'str',
'readiness_probe': 'V1Probe',
'resources': 'V1ResourceRequirements',
'runtime_version': 'str',
'security_context': 'V1SecurityContext',
'startup_probe': 'V1Probe',
'stdin': 'bool',
'stdin_once': 'bool',
'storage_uri': 'str',
'termination_message_path': 'str',
'termination_message_policy': 'str',
'tty': 'bool',
'volume_devices': 'list[V1VolumeDevice]',
'volume_mounts': 'list[V1VolumeMount]',
'working_dir': 'str'
}
attribute_map = {
'args': 'args',
'command': 'command',
'env': 'env',
'env_from': 'envFrom',
'image': 'image',
'image_pull_policy': 'imagePullPolicy',
'lifecycle': 'lifecycle',
'liveness_probe': 'livenessProbe',
'name': 'name',
'ports': 'ports',
'protocol_version': 'protocolVersion',
'readiness_probe': 'readinessProbe',
'resources': 'resources',
'runtime_version': 'runtimeVersion',
'security_context': 'securityContext',
'startup_probe': 'startupProbe',
'stdin': 'stdin',
'stdin_once': 'stdinOnce',
'storage_uri': 'storageUri',
'termination_message_path': 'terminationMessagePath',
'termination_message_policy': 'terminationMessagePolicy',
'tty': 'tty',
'volume_devices': 'volumeDevices',
'volume_mounts': 'volumeMounts',
'working_dir': 'workingDir'
}
def __init__(self, args=None, command=None, env=None, env_from=None, image=None, image_pull_policy=None, lifecycle=None, liveness_probe=None, name=None, ports=None, protocol_version=None, readiness_probe=None, resources=None, runtime_version=None, security_context=None, startup_probe=None, stdin=None, stdin_once=None, storage_uri=None, termination_message_path=None, termination_message_policy=None, tty=None, volume_devices=None, volume_mounts=None, working_dir=None, local_vars_configuration=None): # noqa: E501
"""V1beta1LightGBMSpec - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._args = None
self._command = None
self._env = None
self._env_from = None
self._image = None
self._image_pull_policy = None
self._lifecycle = None
self._liveness_probe = None
self._name = None
self._ports = None
self._protocol_version = None
self._readiness_probe = None
self._resources = None
self._runtime_version = None
self._security_context = None
self._startup_probe = None
self._stdin = None
self._stdin_once = None
self._storage_uri = None
self._termination_message_path = None
self._termination_message_policy = None
self._tty = None
self._volume_devices = None
self._volume_mounts = None
self._working_dir = None
self.discriminator = None
if args is not None:
self.args = args
if command is not None:
self.command = command
if env is not None:
self.env = env
if env_from is not None:
self.env_from = env_from
if image is not None:
self.image = image
if image_pull_policy is not None:
self.image_pull_policy = image_pull_policy
if lifecycle is not None:
self.lifecycle = lifecycle
if liveness_probe is not None:
self.liveness_probe = liveness_probe
if name is not None:
self.name = name
if ports is not None:
self.ports = ports
if protocol_version is not None:
self.protocol_version = protocol_version
if readiness_probe is not None:
self.readiness_probe = readiness_probe
if resources is not None:
self.resources = resources
if runtime_version is not None:
self.runtime_version = runtime_version
if security_context is not None:
self.security_context = security_context
if startup_probe is not None:
self.startup_probe = startup_probe
if stdin is not None:
self.stdin = stdin
if stdin_once is not None:
self.stdin_once = stdin_once
if storage_uri is not None:
self.storage_uri = storage_uri
if termination_message_path is not None:
self.termination_message_path = termination_message_path
if termination_message_policy is not None:
self.termination_message_policy = termination_message_policy
if tty is not None:
self.tty = tty
if volume_devices is not None:
self.volume_devices = volume_devices
if volume_mounts is not None:
self.volume_mounts = volume_mounts
if working_dir is not None:
self.working_dir = working_dir
@property
def args(self):
"""Gets the args of this V1beta1LightGBMSpec. # noqa: E501
Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell # noqa: E501
:return: The args of this V1beta1LightGBMSpec. # noqa: E501
:rtype: list[str]
"""
return self._args
@args.setter
def args(self, args):
"""Sets the args of this V1beta1LightGBMSpec.
Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell # noqa: E501
:param args: The args of this V1beta1LightGBMSpec. # noqa: E501
:type: list[str]
"""
self._args = args
@property
def command(self):
"""Gets the command of this V1beta1LightGBMSpec. # noqa: E501
Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell # noqa: E501
:return: The command of this V1beta1LightGBMSpec. # noqa: E501
:rtype: list[str]
"""
return self._command
@command.setter
def command(self, command):
"""Sets the command of this V1beta1LightGBMSpec.
Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell # noqa: E501
:param command: The command of this V1beta1LightGBMSpec. # noqa: E501
:type: list[str]
"""
self._command = command
@property
def env(self):
"""Gets the env of this V1beta1LightGBMSpec. # noqa: E501
List of environment variables to set in the container. Cannot be updated. # noqa: E501
:return: The env of this V1beta1LightGBMSpec. # noqa: E501
:rtype: list[V1EnvVar]
"""
return self._env
@env.setter
def env(self, env):
"""Sets the env of this V1beta1LightGBMSpec.
List of environment variables to set in the container. Cannot be updated. # noqa: E501
:param env: The env of this V1beta1LightGBMSpec. # noqa: E501
:type: list[V1EnvVar]
"""
self._env = env
@property
def env_from(self):
"""Gets the env_from of this V1beta1LightGBMSpec. # noqa: E501
List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. # noqa: E501
:return: The env_from of this V1beta1LightGBMSpec. # noqa: E501
:rtype: list[V1EnvFromSource]
"""
return self._env_from
@env_from.setter
def env_from(self, env_from):
"""Sets the env_from of this V1beta1LightGBMSpec.
List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. # noqa: E501
:param env_from: The env_from of this V1beta1LightGBMSpec. # noqa: E501
:type: list[V1EnvFromSource]
"""
self._env_from = env_from
@property
def image(self):
"""Gets the image of this V1beta1LightGBMSpec. # noqa: E501
Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets. # noqa: E501
:return: The image of this V1beta1LightGBMSpec. # noqa: E501
:rtype: str
"""
return self._image
@image.setter
def image(self, image):
"""Sets the image of this V1beta1LightGBMSpec.
Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets. # noqa: E501
:param image: The image of this V1beta1LightGBMSpec. # noqa: E501
:type: str
"""
self._image = image
@property
def image_pull_policy(self):
"""Gets the image_pull_policy of this V1beta1LightGBMSpec. # noqa: E501
Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images # noqa: E501
:return: The image_pull_policy of this V1beta1LightGBMSpec. # noqa: E501
:rtype: str
"""
return self._image_pull_policy
@image_pull_policy.setter
def image_pull_policy(self, image_pull_policy):
"""Sets the image_pull_policy of this V1beta1LightGBMSpec.
Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images # noqa: E501
:param image_pull_policy: The image_pull_policy of this V1beta1LightGBMSpec. # noqa: E501
:type: str
"""
self._image_pull_policy = image_pull_policy
@property
def lifecycle(self):
"""Gets the lifecycle of this V1beta1LightGBMSpec. # noqa: E501
:return: The lifecycle of this V1beta1LightGBMSpec. # noqa: E501
:rtype: V1Lifecycle
"""
return self._lifecycle
@lifecycle.setter
def lifecycle(self, lifecycle):
"""Sets the lifecycle of this V1beta1LightGBMSpec.
:param lifecycle: The lifecycle of this V1beta1LightGBMSpec. # noqa: E501
:type: V1Lifecycle
"""
self._lifecycle = lifecycle
@property
def liveness_probe(self):
"""Gets the liveness_probe of this V1beta1LightGBMSpec. # noqa: E501
:return: The liveness_probe of this V1beta1LightGBMSpec. # noqa: E501
:rtype: V1Probe
"""
return self._liveness_probe
@liveness_probe.setter
def liveness_probe(self, liveness_probe):
"""Sets the liveness_probe of this V1beta1LightGBMSpec.
:param liveness_probe: The liveness_probe of this V1beta1LightGBMSpec. # noqa: E501
:type: V1Probe
"""
self._liveness_probe = liveness_probe
@property
def name(self):
"""Gets the name of this V1beta1LightGBMSpec. # noqa: E501
Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. # noqa: E501
:return: The name of this V1beta1LightGBMSpec. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V1beta1LightGBMSpec.
Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. # noqa: E501
:param name: The name of this V1beta1LightGBMSpec. # noqa: E501
:type: str
"""
self._name = name
@property
def ports(self):
"""Gets the ports of this V1beta1LightGBMSpec. # noqa: E501
List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated. # noqa: E501
:return: The ports of this V1beta1LightGBMSpec. # noqa: E501
:rtype: list[V1ContainerPort]
"""
return self._ports
@ports.setter
def ports(self, ports):
"""Sets the ports of this V1beta1LightGBMSpec.
List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated. # noqa: E501
:param ports: The ports of this V1beta1LightGBMSpec. # noqa: E501
:type: list[V1ContainerPort]
"""
self._ports = ports
@property
def protocol_version(self):
"""Gets the protocol_version of this V1beta1LightGBMSpec. # noqa: E501
Protocol version to use by the predictor (i.e. v1 or v2) # noqa: E501
:return: The protocol_version of this V1beta1LightGBMSpec. # noqa: E501
:rtype: str
"""
return self._protocol_version
@protocol_version.setter
def protocol_version(self, protocol_version):
"""Sets the protocol_version of this V1beta1LightGBMSpec.
Protocol version to use by the predictor (i.e. v1 or v2) # noqa: E501
:param protocol_version: The protocol_version of this V1beta1LightGBMSpec. # noqa: E501
:type: str
"""
self._protocol_version = protocol_version
@property
def readiness_probe(self):
"""Gets the readiness_probe of this V1beta1LightGBMSpec. # noqa: E501
:return: The readiness_probe of this V1beta1LightGBMSpec. # noqa: E501
:rtype: V1Probe
"""
return self._readiness_probe
@readiness_probe.setter
def readiness_probe(self, readiness_probe):
"""Sets the readiness_probe of this V1beta1LightGBMSpec.
:param readiness_probe: The readiness_probe of this V1beta1LightGBMSpec. # noqa: E501
:type: V1Probe
"""
self._readiness_probe = readiness_probe
@property
def resources(self):
"""Gets the resources of this V1beta1LightGBMSpec. # noqa: E501
:return: The resources of this V1beta1LightGBMSpec. # noqa: E501
:rtype: V1ResourceRequirements
"""
return self._resources
@resources.setter
def resources(self, resources):
"""Sets the resources of this V1beta1LightGBMSpec.
:param resources: The resources of this V1beta1LightGBMSpec. # noqa: E501
:type: V1ResourceRequirements
"""
self._resources = resources
@property
def runtime_version(self):
"""Gets the runtime_version of this V1beta1LightGBMSpec. # noqa: E501
Runtime version of the predictor docker image # noqa: E501
:return: The runtime_version of this V1beta1LightGBMSpec. # noqa: E501
:rtype: str
"""
return self._runtime_version
@runtime_version.setter
def runtime_version(self, runtime_version):
"""Sets the runtime_version of this V1beta1LightGBMSpec.
Runtime version of the predictor docker image # noqa: E501
:param runtime_version: The runtime_version of this V1beta1LightGBMSpec. # noqa: E501
:type: str
"""
self._runtime_version = runtime_version
@property
def security_context(self):
"""Gets the security_context of this V1beta1LightGBMSpec. # noqa: E501
:return: The security_context of this V1beta1LightGBMSpec. # noqa: E501
:rtype: V1SecurityContext
"""
return self._security_context
@security_context.setter
def security_context(self, security_context):
"""Sets the security_context of this V1beta1LightGBMSpec.
:param security_context: The security_context of this V1beta1LightGBMSpec. # noqa: E501
:type: V1SecurityContext
"""
self._security_context = security_context
@property
def startup_probe(self):
"""Gets the startup_probe of this V1beta1LightGBMSpec. # noqa: E501
:return: The startup_probe of this V1beta1LightGBMSpec. # noqa: E501
:rtype: V1Probe
"""
return self._startup_probe
@startup_probe.setter
def startup_probe(self, startup_probe):
"""Sets the startup_probe of this V1beta1LightGBMSpec.
:param startup_probe: The startup_probe of this V1beta1LightGBMSpec. # noqa: E501
:type: V1Probe
"""
self._startup_probe = startup_probe
@property
def stdin(self):
"""Gets the stdin of this V1beta1LightGBMSpec. # noqa: E501
Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. # noqa: E501
:return: The stdin of this V1beta1LightGBMSpec. # noqa: E501
:rtype: bool
"""
return self._stdin
@stdin.setter
def stdin(self, stdin):
"""Sets the stdin of this V1beta1LightGBMSpec.
Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. # noqa: E501
:param stdin: The stdin of this V1beta1LightGBMSpec. # noqa: E501
:type: bool
"""
self._stdin = stdin
@property
def stdin_once(self):
"""Gets the stdin_once of this V1beta1LightGBMSpec. # noqa: E501
Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false # noqa: E501
:return: The stdin_once of this V1beta1LightGBMSpec. # noqa: E501
:rtype: bool
"""
return self._stdin_once
@stdin_once.setter
def stdin_once(self, stdin_once):
"""Sets the stdin_once of this V1beta1LightGBMSpec.
Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false # noqa: E501
:param stdin_once: The stdin_once of this V1beta1LightGBMSpec. # noqa: E501
:type: bool
"""
self._stdin_once = stdin_once
@property
def storage_uri(self):
"""Gets the storage_uri of this V1beta1LightGBMSpec. # noqa: E501
This field points to the location of the trained model which is mounted onto the pod. # noqa: E501
:return: The storage_uri of this V1beta1LightGBMSpec. # noqa: E501
:rtype: str
"""
return self._storage_uri
@storage_uri.setter
def storage_uri(self, storage_uri):
"""Sets the storage_uri of this V1beta1LightGBMSpec.
This field points to the location of the trained model which is mounted onto the pod. # noqa: E501
:param storage_uri: The storage_uri of this V1beta1LightGBMSpec. # noqa: E501
:type: str
"""
self._storage_uri = storage_uri
@property
def termination_message_path(self):
"""Gets the termination_message_path of this V1beta1LightGBMSpec. # noqa: E501
Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated. # noqa: E501
:return: The termination_message_path of this V1beta1LightGBMSpec. # noqa: E501
:rtype: str
"""
return self._termination_message_path
@termination_message_path.setter
def termination_message_path(self, termination_message_path):
"""Sets the termination_message_path of this V1beta1LightGBMSpec.
Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated. # noqa: E501
:param termination_message_path: The termination_message_path of this V1beta1LightGBMSpec. # noqa: E501
:type: str
"""
self._termination_message_path = termination_message_path
@property
def termination_message_policy(self):
"""Gets the termination_message_policy of this V1beta1LightGBMSpec. # noqa: E501
Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. # noqa: E501
:return: The termination_message_policy of this V1beta1LightGBMSpec. # noqa: E501
:rtype: str
"""
return self._termination_message_policy
@termination_message_policy.setter
def termination_message_policy(self, termination_message_policy):
"""Sets the termination_message_policy of this V1beta1LightGBMSpec.
Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. # noqa: E501
:param termination_message_policy: The termination_message_policy of this V1beta1LightGBMSpec. # noqa: E501
:type: str
"""
self._termination_message_policy = termination_message_policy
@property
def tty(self):
"""Gets the tty of this V1beta1LightGBMSpec. # noqa: E501
Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. # noqa: E501
:return: The tty of this V1beta1LightGBMSpec. # noqa: E501
:rtype: bool
"""
return self._tty
@tty.setter
def tty(self, tty):
"""Sets the tty of this V1beta1LightGBMSpec.
Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. # noqa: E501
:param tty: The tty of this V1beta1LightGBMSpec. # noqa: E501
:type: bool
"""
self._tty = tty
@property
def volume_devices(self):
"""Gets the volume_devices of this V1beta1LightGBMSpec. # noqa: E501
volumeDevices is the list of block devices to be used by the container. # noqa: E501
:return: The volume_devices of this V1beta1LightGBMSpec. # noqa: E501
:rtype: list[V1VolumeDevice]
"""
return self._volume_devices
@volume_devices.setter
def volume_devices(self, volume_devices):
"""Sets the volume_devices of this V1beta1LightGBMSpec.
volumeDevices is the list of block devices to be used by the container. # noqa: E501
:param volume_devices: The volume_devices of this V1beta1LightGBMSpec. # noqa: E501
:type: list[V1VolumeDevice]
"""
self._volume_devices = volume_devices
@property
def volume_mounts(self):
"""Gets the volume_mounts of this V1beta1LightGBMSpec. # noqa: E501
Pod volumes to mount into the container's filesystem. Cannot be updated. # noqa: E501
:return: The volume_mounts of this V1beta1LightGBMSpec. # noqa: E501
:rtype: list[V1VolumeMount]
"""
return self._volume_mounts
@volume_mounts.setter
def volume_mounts(self, volume_mounts):
"""Sets the volume_mounts of this V1beta1LightGBMSpec.
Pod volumes to mount into the container's filesystem. Cannot be updated. # noqa: E501
:param volume_mounts: The volume_mounts of this V1beta1LightGBMSpec. # noqa: E501
:type: list[V1VolumeMount]
"""
self._volume_mounts = volume_mounts
@property
def working_dir(self):
"""Gets the working_dir of this V1beta1LightGBMSpec. # noqa: E501
Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. # noqa: E501
:return: The working_dir of this V1beta1LightGBMSpec. # noqa: E501
:rtype: str
"""
return self._working_dir
@working_dir.setter
def working_dir(self, working_dir):
"""Sets the working_dir of this V1beta1LightGBMSpec.
Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. # noqa: E501
:param working_dir: The working_dir of this V1beta1LightGBMSpec. # noqa: E501
:type: str
"""
self._working_dir = working_dir
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1LightGBMSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1LightGBMSpec):
return True
return self.to_dict() != other.to_dict()
|
|
# -*- coding: utf-8 -*-
"""
Created on Sat May 10 00:29:46 2014
@author: Nate
"""
import ctypes
#from numpy.ctypeslib import ndpointer
import numpy
from bitarray import bitarray
import time, pdb
import file_locations, uuid
from numpy.ctypeslib import ndpointer
class repas():
def __init__(self):
print file_locations.file_locations['repasint_dll'][uuid.getnode()]
self.lib = ctypes.cdll.LoadLibrary(file_locations.file_locations['repasint_dll'][uuid.getnode()]) #"c:\\Users\\Nate\\documents\\visual studio 2010\\Projects\\testctype\\x64\\Release\\testctype.dll"
suffixes=["","dev","pulse","pulsedev"]
# bit-flipping versions
repasops={"uint8": ctypes.c_ubyte, "uint32": ctypes.c_uint32}
for suffix in suffixes:
for ra in repasops:
try:
setattr(self,"fun_repas"+ra+suffix,getattr(self.lib,"repas"+ra+suffix))
getattr(self,"fun_repas"+ra+suffix).argtypes = [ndpointer(ctypes.c_uint32, flags="C_CONTIGUOUS"), ctypes.c_uint32,
ndpointer(ctypes.c_uint32, flags="C_CONTIGUOUS"), repasops[ra],
ndpointer(repasops[ra], flags="C_CONTIGUOUS"), ctypes.c_uint32]
getattr(self,"fun_repas"+ra+suffix).restype = None
except: continue
# def __del__(self):
# del self.lib
def repasint(self,flipstring,firstelm,PULSER=False,DEV=False):
#pdb.set_trace()
ptrs = ((flipstring==0).nonzero())[0].astype(numpy.uint32)
outbits=ptrs.size
outdata = numpy.empty(numpy.unique(flipstring).shape[0]+2, dtype=getattr(numpy,"uint"+str(outbits))) #JZ add +2 on the array size.
func = "fun_repasuint" + str(outbits) + ({True: "pulse", False: ""})[PULSER] + ({True: "dev", False: ""})[DEV]
getattr(self,func)(flipstring, flipstring.size, ptrs,firstelm, outdata,outdata.size) # JZ and NDG remove the +1 from the last parameter.
#if not PULSER: outdata[-1]=0 # this was inserted to reproduce a bug in original code NDG 081414
return outdata[1:-1] # Change outdata[1:] to outdata[1:-1] JZ 8/18/2014, the first element is the seed, and the last element is an extra one to reserve a memary space for last iteration of pointer.
def OldRepresentAsInteger(self,channeltimes,seed, PULSER=False):
ptrs = ((channeltimes==0).nonzero())[0].astype(numpy.uint32)
Nchan = ptrs.size
Nbitout = ptrs.size # number of bits in integer to use
try:
dtype = {0:numpy.uint8,8:numpy.uint8,16:numpy.uint16,32:numpy.uint32,64:numpy.uint64}[Nbitout] # data type for output
except KeyError:
pass
# find the final resting places of the pointers
fptrs = [ptr for ptr in ptrs[1:]]
# add in end pointer
fptrs.append(channeltimes.shape[0])
fptrs = numpy.array(fptrs)
# create a bit-array to represent all channel outputs
bits = bitarray(bin(seed)[2:].zfill(Nchan))#bitarray([1]*Nchan)
bits.reverse()
# create arrays of output times and values for a single channel
numtimes = len(numpy.unique(channeltimes))
outvals = numpy.empty(numtimes,dtype=dtype)
outtimes = numpy.empty(numtimes,dtype=numpy.uint64)
outptr = 0 # a pointer to the first currently unwritten output element
if PULSER:
optrs=ptrs
while not (ptrs == fptrs).all():
active = ptrs<fptrs # identify active pointers
tim = min(channeltimes[ptrs[active.nonzero()]]) # current time smallest value for "active" pointers
#LRJ 10-30-2013 hitstrue disables unused channels
lineindex=0
hitstrue=[]
for ct in channeltimes[ptrs]:
if (ptrs[lineindex]-optrs[lineindex])==2 and ct==tim:#self.channels.values()[lineindex].intedges.shape[1] == 2 and ct==time:
hitstrue.append(False)
else:
hitstrue.append(ct==tim)
lineindex+=1
hits = [ct == tim for ct in channeltimes[ptrs]] # find active pointers
bits = bitarray(hitstrue) # assign bits based on whether a matching time was found
# populate output arrays
outvals[outptr] = numpy.fromstring((bits.tobytes()[::-1]),dtype=dtype)
outtimes[outptr] = tim
# advances pointers if active and hits are both true for that pointer.
ptrs += numpy.logical_and(active, hits)
outptr += 1
else:
while not (ptrs == fptrs).all():
active = ptrs<fptrs # identify active pointers
tim = min(channeltimes[ptrs[active.nonzero()]]) # current time smallest value for "active" pointers
flips = [ct == tim for ct in channeltimes[ptrs]] # find active pointers
bits = bits^bitarray(flips) # flip bits where updates dictate using bitwise XOR
# populate output arrays
outvals[outptr] = numpy.fromstring((bits[::-1].tobytes()[::-1]), dtype = dtype)
outtimes[outptr] = tim
# advances pointers if active and flips and both true for that pointer.
ptrs += numpy.logical_and(active, flips)
outptr += 1
# Now change final values to be zeros.
bits = bitarray([0]*Nchan)
outvals[-1] = numpy.fromstring((bits[::-1].tobytes()[::-1]), dtype = dtype)
return outvals
repas=repas()
def test_repas():
def generate_testdata(scale, nbits):
a=numpy.array([0,100*scale]*(nbits-5), dtype=numpy.uint32)
a=numpy.append(a,numpy.arange(0,scale, dtype=numpy.uint32)*10)
a=numpy.append(a,numpy.array([100*scale], dtype=numpy.uint32))
a=numpy.append(a,numpy.arange(0,scale, dtype=numpy.uint32)*15)
a=numpy.append(a,numpy.array([100*scale], dtype=numpy.uint32))
a=numpy.append(a,numpy.array([0,100*scale]*(2), dtype=numpy.uint32))
a=numpy.append(a,numpy.array([0,150*scale]*(1), dtype=numpy.uint32))
return a
VERBOSE=False
BENCHMARK=False
DEVELOP=True
PULSER=True
NBITS=8
a=generate_testdata(10000000,NBITS)
bits=bitarray(NBITS)
seed = numpy.fromstring((bits[::-1].tobytes()[::-1]), dtype = getattr(numpy,"uint"+str(NBITS)))
t0=time.time()
outdata=repas.repasint(a,seed[0],PULSER=PULSER)
print time.time()-t0
if VERBOSE:
strout=""
for line in range(0,NBITS):
for elm in outdata:
strout += str((int(elm) >> line) % 2)
strout += "\n"
print strout
if DEVELOP:
t0=time.time()
outdata3=repas.repasint(a,seed[0],DEV=True, PULSER=PULSER)
print time.time()-t0
if VERBOSE:
strout=""
for line in range(0,NBITS):
for elm in outdata3:
strout += str((int(elm) >> line) % 2)
strout += "\n"
print strout
print "equality check: ", (outdata3==outdata).all()
if BENCHMARK:
t0=time.time()
outdata2=repas.OldRepresentAsInteger(a,seed[0], PULSER=PULSER)
print time.time()-t0
if VERBOSE:
strout=""
for line in range(0,NBITS):
for elm in outdata2:
strout += str((int(elm) >> line) % 2)
strout += "\n"
print strout
print "equality check: ", (outdata2==outdata).all()
def test_merge_sorted():
VERBOSE=False
PERF_TEST=True
BM_SS=True
if PERF_TEST:
a0=numpy.arange(3000000,dtype=numpy.float64)#numpy.array([2.0,3.0,4.0,9.0],dtype=numpy.float64)
b1=numpy.array([17.5,100],dtype=numpy.float64)##numpy.array([3.0,7.0,8.0],dtype=numpy.float64)
b0=numpy.array([22.5],dtype=numpy.float64)##numpy.array([3.0,7.0,8.0],dtype=numpy.float64)
b2=numpy.array([10,17,24],dtype=numpy.float64)##numpy.array([3.0,7.0,8.0],dtype=numpy.float64)
a1=2.5*numpy.arange(10000013,dtype=numpy.float64)
arrs=[a0,b0,a1,b1,b2]
else:
a=numpy.array([2.0,3.0,4.0,9.0],dtype=numpy.float64)
a1=numpy.array([2.5,3.0,4.5,9.0],dtype=numpy.float64)
b=numpy.array([3.0,7.0,8.0],dtype=numpy.float64)
arrs=[a,a1,b]
t0=time.time()
lens = (ctypes.c_ulong*(len(arrs)+1))()
totsiz=0
for arr in range(len(arrs)):
lens[arr]=arrs[arr].size
totsiz+=lens[arr]
outarr=numpy.empty(totsiz,dtype=numpy.float64)
arrs.append(outarr)
if VERBOSE:
for arr in arrs: print arr
ctypes_arrays = [numpy.ctypeslib.as_ctypes(array) for array in arrs]
pointer_ar = (ctypes.POINTER(ctypes.c_longdouble) * len(arrs))(*ctypes_arrays)
ctypes_lens=ctypes.POINTER(ctypes.c_uint32)(lens)
ctypes_arrnum = ctypes.c_uint16(len(arrs));
ctypes.CDLL(file_locations.file_locations['repasint_dll'][uuid.getnode()]).merge_sorted_arrays3(pointer_ar,ctypes_arrnum,ctypes_lens)
cc=outarr[0:ctypes_lens[len(arrs)-1]-1]
t1=time.time()
if VERBOSE:
for arr in arrs: print arr
print "length:" ,lens[len(arrs)-1]
t2=time.time()
ss = numpy.unique(numpy.concatenate(arrs[0:-1]))
t3=time.time()
print "Agrees with numpy.unique(numpy.concatenate(x)): ", numpy.array_equal(ss,cc)
print "merge_sorted time:", t1-t0
print "numpy sort time:", t3-t2
print "merge_sorted/numpy: ", (t1-t0)/(t3-t2)
if BM_SS:
t4=time.time()
dd=numpy.insert(a0,a0.searchsorted(b0),b0)
t5=time.time()
print "inserted ", b0.size, " elms with searchsorted in ", t5-t4
pdb.set_trace()
class DataWrongShapeError(Exception):
pass
def merge_sorted_orig(arrs):
"""
merges a list of pre-sorted 64-bit floating point (numpy) arrays, discarding duplicate items
returns the merged array. equivalent to numpy.unique(numpy.concatenate(arrs)) for pre-sorted
arrays without self-duplicates, but ~x6 faster
this routine calls a precompiled dll library function
"""
if type(arrs)!=type([]): arrs=[arrs]
num_arr=len(arrs)
lens = (ctypes.c_ulong*(len(arrs)+1))()
totsiz=0
for arr in range(len(arrs)):
if len(arrs[arr].shape)!=1: arrs[arr]=arrs[arr].flatten()
#arrs[arr]=numpy.asarray(arrs[arr],dtype=numpy.float64).astype(numpy.float64)
lens[arr]=arrs[arr].size
totsiz+=lens[arr]
outarr=numpy.empty(totsiz+1,dtype=numpy.float64)
arrs.append(outarr)
ctypes_arrays = [numpy.ctypeslib.as_ctypes(array) for array in arrs]
pointer_ar = (ctypes.POINTER(ctypes.c_longdouble) * len(arrs))(*ctypes_arrays)
ctypes_lens=ctypes.POINTER(ctypes.c_uint32)(lens)
ctypes_arrnum = ctypes.c_uint16(len(arrs));
ctypes.CDLL(file_locations.file_locations['repasint_dll'][uuid.getnode()]).merge_sorted_drop_dup(pointer_ar,ctypes_arrnum,ctypes_lens)
cc=(outarr[1:ctypes_lens[num_arr]])
del arrs[-1]
#pdb.set_trace()
return cc
def merge_sorted(arrs,track_indices=False,cast_up=True):
"""
merges a list of pre-sorted 64-bit floating point (numpy) arrays, discarding duplicate items.
returns the merged array and REPLACES input arrays' values with record of the corresponding index of the output
(merged) array. Aside from index-tracking, this is equivalent to numpy.unique(numpy.concatenate(arrs))
for pre-sorted arrays without self-duplicates, but ~x6 faster
example:
a=numpy.arange(1000,dtype=numpy.float64)*.01
b=numpy.arange(1000,dtype=numpy.float64)*.01+.002
ao=a.copy()
bo=b.copy()
arr=[a,b]
c=merge_sorted_track_indices(arr)
c[0:10]
<< array([ 0. , 0.002, 0.01 , 0.012, 0.02 , 0.022, 0.03 , 0.032,
0.04 , 0.042])
a[0:10]
<< array([ 0., 2., 4., 6., 8., 10., 12., 14., 16., 18.])
b[0:10]
<< array([ 1., 3., 5., 7., 9., 11., 13., 15., 17., 19.])
ao[7]
<< 0.070000000000000007
c[a[7]]
<< 0.070000000000000007
numpy.array_equal(c[a.astype(numpy.uint32)],ao)
<< True
this routine calls a precompiled dll library function
"""
# analyze input arrays to flatten if necessary and find total size
if type(arrs)!=type([]): arrs=[arrs]
num_arr=len(arrs)
lens = (ctypes.c_ulong*(len(arrs)+1))()
totsiz=0
for arr in range(len(arrs)):
if len(arrs[arr].shape)!=1: arrs[arr]=arrs[arr].flatten()
lens[arr]=arrs[arr].size
totsiz+=lens[arr]
# scan input arrays to find appropriate data type for output
input_types=[arr.dtype for arr in arrs]
cast_to_bytes=sorted([it.itemsize for it in input_types])[{True:-1,False:0}[cast_up]]
cast_to_type=input_types[[it.itemsize for it in input_types].index(cast_to_bytes)]
cast_to_ctype=numpy_type_to_C(cast_to_type)
# if necessary, cast inputs to common type
for arr,i in [(arr,i) for arr,i in zip(arrs,range(num_arr)) if arr.dtype!=cast_to_type]:
arrs[i]=arr.astype(cast_to_type)
# define output and input arrays for passage to dll
outarr=numpy.empty(totsiz+1,dtype=cast_to_type)
arrs.append(outarr)
ctypes_arrays = [numpy.ctypeslib.as_ctypes(array) for array in arrs]
pointer_ar = (ctypes.POINTER(cast_to_ctype) * len(arrs))(*ctypes_arrays)
# create variables to pass lengths and array counts
ctypes_lens=ctypes.POINTER(ctypes.c_uint32)(lens)
ctypes_arrnum = ctypes.c_uint16(len(arrs));
# call the appropriate dll function
dll_name=file_locations.file_locations['repasint_dll'][uuid.getnode()]
func_root = "merge_sorted_"
func_tail = {True:"track_indices_", False:""}[track_indices] + str(cast_to_type)
getattr(ctypes.CDLL(dll_name),func_root+func_tail)(pointer_ar,ctypes_arrnum,ctypes_lens)
# trim the output and eliminate the backeffect of appending output array argument, return
cc=(outarr[1:ctypes_lens[num_arr]])
del arrs[-1]
return cc
def numpy_type_to_C(dtype):
return {numpy.dtype('float64'):ctypes.c_double, numpy.dtype('float32'):ctypes.c_float}[dtype]
|
|
# -*- coding: utf-8 -*-
"""
Inventory Management
A module to record inventories of items at a locations (sites),
including Warehouses, Offices, Shelters & Hospitals
"""
module = request.controller
resourcename = request.function
if not deployment_settings.has_module(module):
raise HTTP(404, body="Module disabled: %s" % module)
# -----------------------------------------------------------------------------
def index():
"""
Application Home page
- custom View
"""
# Load models
s3mgr.load("cr_shelter") # Need CRUD String
module_name = deployment_settings.modules[module].name_nice
response.title = module_name
return dict(module_name=module_name)
# =============================================================================
def office():
"""
Required to ensure the tabs work from req_match
"""
return warehouse()
def warehouse():
"""
RESTful CRUD controller
Filtered version of the org_office resource
"""
module = "org"
resourcename = "office"
tablename = "org_office"
table = s3db[tablename]
s3mgr.load("inv_inv_item")
if "viewing" in request.get_vars:
viewing = request.get_vars.viewing
tn, record = viewing.split(".", 1)
if tn == "org_office":
request.args.insert(0, record)
s3.crud_strings[tablename] = s3.org_warehouse_crud_strings
# Type is Warehouse
table.type.default = 5 # Warehouse
table.type.writable = False
# Only show warehouses
response.s3.filter = (table.type == 5)
# Remove type from list_fields
list_fields = s3mgr.model.get_config(tablename, "list_fields")
try:
list_fields.remove("type")
except:
# Already removed
pass
s3mgr.configure(tablename, list_fields=list_fields)
warehouse_search = s3base.S3Search(
advanced=(s3base.S3SearchSimpleWidget(
name="warehouse_search_text",
label=T("Search"),
comment=T("Search for warehouse by text."),
field=["name","comments", "email"]
),
s3base.S3SearchOptionsWidget(
name="warehouse_search_org",
label=T("Organization"),
comment=T("Search for warehouse by organization."),
field=["organisation_id"],
represent ="%(name)s",
cols = 3
),
s3base.S3SearchLocationHierarchyWidget(
name="warehouse_search_location",
comment=T("Search for warehouse by location."),
represent ="%(name)s",
cols = 3
),
s3base.S3SearchLocationWidget(
name="warehouse_search_map",
label=T("Map"),
),
))
s3mgr.configure(tablename,
search_method = warehouse_search)
# CRUD pre-process
def prep(r):
if r.interactive and r.tablename == "org_office":
if r.method != "read":
# Don't want to see in Create forms
# inc list_create (list_fields over-rides)
r.table.obsolete.writable = False
r.table.obsolete.readable = False
address_hide(table)
# Process Base Location
#s3mgr.configure(table._tablename,
# onaccept=address_onaccept)
if r.component:
if r.component.name == "inv_item":
# Filter out items which are already in this inventory
s3db.inv_prep(r)
# Remove the Warehouse Name from the list_fields
list_fields = s3mgr.model.get_config("inv_inv_item", "list_fields")
try:
list_fields.remove("site_id")
s3mgr.configure("inv_inv_item", list_fields=list_fields)
except:
pass
elif r.component.name == "recv" or \
r.component.name == "send":
# Filter out items which are already in this inventory
s3db.inv_prep(r)
elif r.component.name == "human_resource":
# Filter out people which are already staff for this warehouse
s3_filter_staff(r)
# Cascade the organisation_id from the hospital to the staff
htable = s3db.hrm_human_resource
htable.organisation_id.default = r.record.organisation_id
htable.organisation_id.writable = False
elif r.component.name == "req":
s3db.req_prep(r)
if r.method != "update" and r.method != "read":
# Hide fields which don't make sense in a Create form
# inc list_create (list_fields over-rides)
s3db.req_create_form_mods()
# "show_obsolete" var option can be added (btn?) later to
# disable this filter
if r.method in [None, "list"] and \
not r.vars.get("show_obsolete", False):
r.resource.add_filter((s3db.org_office.obsolete != True))
return True
response.s3.prep = prep
# CRUD post-process
def postp(r, output):
if r.interactive and not r.component and r.method != "import":
# Change Action buttons to open Stock Tab by default
read_url = URL(f="warehouse", args=["[id]", "inv_item"])
update_url = URL(f="warehouse", args=["[id]", "inv_item"])
s3mgr.crud.action_buttons(r,
read_url=read_url,
update_url=update_url)
if "add_btn" in output:
del output["add_btn"]
return output
response.s3.postp = postp
rheader = response.s3.inv_warehouse_rheader
if "extra_data" in request.get_vars:
csv_template = "inv_item"
module = "inv"
resourcename = "inv_item"
else:
csv_template = "warehouse"
csv_stylesheet = "%s.xsl" % csv_template
output = s3_rest_controller(module, resourcename,
rheader=rheader,
csv_template = csv_template,
csv_stylesheet = csv_stylesheet,
# Extra fields for CSV uploads:
csv_extra_fields = [
dict(label="Organisation",
field=s3db.org_organisation_id(comment=None))
])
return output
# =============================================================================
def incoming():
""" Incoming Shipments """
# Defined in the Model for use from Multiple Controllers for unified menus
return inv_incoming()
# =============================================================================
def req_match():
""" Match Requests """
return s3db.req_match()
# =============================================================================
def inv_item():
""" REST Controller """
table = s3db.inv_inv_item
# Upload for configuration (add replace option)
response.s3.importerPrep = lambda: dict(ReplaceOption=T("Remove existing data before import"))
# Import pre-process
def import_prep(data):
"""
Deletes all Stock records of the organisation
before processing a new data import, used for the import_prep
hook in s3mgr
"""
request = current.request
resource, tree = data
xml = s3mgr.xml
tag = xml.TAG
att = xml.ATTRIBUTE
if response.s3.importerReplace:
if tree is not None:
root = tree.getroot()
expr = "/%s/%s[@%s='org_organisation']/%s[@%s='name']" % \
(tag.root, tag.resource, att.name, tag.data, att.field)
orgs = root.xpath(expr)
otable = s3db.org_organisation
stable = s3db.org_site
itable = s3db.inv_inv_item
for org in orgs:
org_name = org.get("value", None) or org.text
if org_name:
try:
org_name = json.loads(s3mgr.xml.xml_decode(org_name))
except:
pass
if org_name:
query = (otable.name == org_name) & \
(stable.organisation_id == otable.id) & \
(itable.site_id == stable.id)
resource = s3mgr.define_resource("inv", "inv_item", filter=query)
ondelete = s3mgr.model.get_config("inv_inv_item", "ondelete")
resource.delete(ondelete=ondelete, format="xml")
resource.skip_import = True
s3mgr.import_prep = import_prep
# Limit site_id to sites the user has permissions for
auth.permission.permitted_facilities(table=table,
error_msg=T("You do not have permission for any site to add an inventory item."))
# remove CRUD generated buttons in the tabs
s3mgr.configure("inv_inv_item",
create=False,
listadd=False,
editable=False,
deletable=False,
)
rheader = response.s3.inv_warehouse_rheader
output = s3_rest_controller("inv",
"inv_item",
rheader=rheader,
csv_extra_fields = [
dict(label="Organisation",
field=s3db.org_organisation_id(comment=None)
)
],
interactive_report = True
)
if "add_btn" in output:
del output["add_btn"]
return output
# -----------------------------------------------------------------------------
def inv_item_quantity():
"""
"""
table = s3db.inv_inv_item
ptable = s3db.supply_item_pack
query = (table.id == request.args[0]) & \
(table.item_pack_id == ptable.id)
record = db(query).select(table.quantity,
ptable.quantity,
limitby=(0, 1)).first()
response.headers["Content-Type"] = "application/json"
return json.dumps(record)
# -----------------------------------------------------------------------------
def inv_item_packs():
"""
Called by S3FilterFieldChange to provide the pack options for a
particular Item
"""
table = s3db.inv_inv_item
ptable = s3db.supply_item_pack
query = (table.id == request.args[0]) & \
(table.item_id == ptable.item_id)
records = db(query).select(ptable.id,
ptable.name,
ptable.quantity)
response.headers["Content-Type"] = "application/json"
return records.json()
# =============================================================================
def send():
""" RESTful CRUD controller """
s3mgr.load("inv_send")
s3db = current.s3db
db = current.db
auth = current.auth
request = current.request
response = current.response
s3 = response.s3
tablename = "inv_send"
table = s3db.inv_send
# Limit site_id to sites the user has permissions for
error_msg = T("You do not have permission for any facility to send a shipment.")
auth.permission.permitted_facilities(table=table,
error_msg=error_msg)
# Set Validator for checking against the number of items in the warehouse
vars = request.vars
if (vars.send_stock_id):
s3db.inv_track_item.quantity.requires = QUANTITY_INV_ITEM(db,
vars.send_stock_id,
vars.item_pack_id)
def prep(r):
# Default to the Search tab in the location selector
response.s3.gis.tab = "search"
if r.component:
# Can only create or delete track items for a send record if the status is preparing
if r.method == "create" or r.method == "delete":
record = table[r.id]
if record.status != 1:
return False
if r.method == "delete":
return s3.inv_track_item_deleting(r.component_id)
if r.record.get("site_id"):
# Restrict to items from this warehouse only
tracktable = s3db.inv_track_item
comp_rec = tracktable[r.component_id]
tracktable.send_stock_id.requires = IS_ONE_OF(db,
"inv_inv_item.id",
s3db.inv_item_represent,
orderby="inv_inv_item.id",
sort=True,
filterby = "site_id",
filter_opts = [r.record.site_id]
)
# Hide the values that will be copied from the inv_inv_item record
if comp_rec and comp_rec.get("req_item_id"):
tracktable.item_id.readable = True
else:
tracktable.item_id.readable = False
tracktable.item_id.writable = False
tracktable.item_source_no.readable = False
tracktable.item_source_no.writable = False
tracktable.expiry_date.readable = False
tracktable.expiry_date.writable = False
tracktable.bin.readable = False
tracktable.bin.writable = False
tracktable.supply_org_id.readable = False
tracktable.supply_org_id.writable = False
# Hide the link to the receive and adjustment records
tracktable.recv_id.readable = False
tracktable.recv_id.writable = False
tracktable.adj_item_id.readable = False
tracktable.adj_item_id.writable = False
if r.interactive:
SHIP_STATUS_IN_PROCESS = s3db.inv_ship_status["IN_PROCESS"]
SHIP_STATUS_SENT = s3db.inv_ship_status["SENT"]
if r.record.status == SHIP_STATUS_IN_PROCESS:
s3.crud_strings.inv_send.title_update = \
s3.crud_strings.inv_send.title_display = T("Process Shipment to Send")
elif "site_id" in request.vars and r.record.status == SHIP_STATUS_SENT:
s3.crud_strings.inv_send.title_update = \
s3.crud_strings.inv_send.title_display = T("Review Incoming Shipment to Receive")
return True
if len(request.args) > 1 and request.args[1] == "track_item" and table[request.args[0]].status:
# remove CRUD generated buttons in the tabs
s3mgr.configure("inv_track_item",
create=False,
listadd=False,
editable=False,
deletable=False,
)
response.s3.prep = prep
output = s3_rest_controller("inv",
"send",
rheader=s3.inv_send_rheader,
)
return output
# ==============================================================================
def prepare_commit():
""" RESTful CRUD controller """
s3mgr.load("inv_send")
s3db = current.s3db
db = current.db
auth = current.auth
request = current.request
response = current.response
s3 = response.s3
# Get the commit and request records
if len(request.args) > 0:
commit_id = request.args[0]
else:
redirect(URL(c = "req",
f = "commit",
)
)
req_table = s3db.req_req
rim_table = s3db.req_req_item
com_table = s3db.req_commit
cim_table = s3db.req_commit_item
send_table = s3db.inv_send
track_table = s3db.inv_track_item
query = (com_table.id == commit_id) & \
(com_table.req_id == req_table.id) & \
(com_table.deleted == False)
record = db(query).select(limitby = (0, 1)).first()
# create a inv_send and link to the commit
send_id = send_table.insert(sender_id = record.req_commit.committer_id,
site_id = record.req_commit.site_id,
recipient_id = record.req_req.requester_id,
to_site_id = record.req_req.site_id,
status = 0)
# get all of the committed items
query = (cim_table.commit_id == commit_id) & \
(cim_table.req_item_id == rim_table.id) & \
(cim_table.deleted == False)
records = db(query).select()
# create inv_track_items for each commit item
for row in records:
id = track_table.insert(track_org_id = record.req_commit.organisation_id,
send_id = send_id,
status = 1,
item_id = row.req_req_item.item_id,
item_pack_id = row.req_req_item.item_pack_id,
quantity = row.req_commit_item.quantity,
currency = row.req_req_item.currency,
req_item_id = row.req_req_item.id)
track_table(track_table.id == id).update(tracking_no = "TN:%6d" % (10000 + id))
# redirect to inv_send for the send id just created
redirect(URL(c = "inv",
f = "send",
args = [send_id, "track_item"]))
# -----------------------------------------------------------------------------
def send_process():
""" Send a Shipment """
send_id = request.args[0]
stable = s3db.inv_send
tracktable = s3db.inv_track_item
ritable = s3db.req_req_item
otable = s3db.org_office
if not auth.s3_has_permission("update",
stable,
record_id=send_id):
session.error = T("You do not have permission to send this shipment.")
send_record = stable[send_id]
if send_record.status != eden.inv.inv_ship_status["IN_PROCESS"]:
session.error = T("This shipment has already been sent.")
# Get the track items that are part of this shipment
query = ( tracktable.send_id == send_id ) & \
(tracktable.deleted == False)
track_items = db(query).select()
if not track_items:
session.error = T("No items have been selected for shipping.")
if session.error:
redirect(URL(c = "inv",
f = "send",
args = [send_id]))
# Update Send record & lock for editing
stable[send_id] = dict(date = request.utcnow,
status = eden.inv.inv_ship_status["SENT"],
owned_by_user = None,
owned_by_group = ADMIN)
# if this is linked to a request then update the quantity in transit
for track_item in track_items:
if track_item.req_item_id:
db(ritable.id == track_item.req_item_id).update(quantity_transit = ritable.quantity_transit + track_item.quantity)
# Create a Receive record
recv_id = s3db.inv_recv.insert(sender_id = send_record.sender_id,
tracking_no = send_record.tracking_no,
from_site_id = send_record.site_id,
eta = send_record.delivery_date,
recipient_id = send_record.recipient_id,
site_id = send_record.to_site_id,
comments = send_record.comments,
status = eden.inv.inv_ship_status["SENT"],
type = 1, # 1:"Another Inventory"
)
# Change the status for all track items in this shipment to In transit
# and link to the receive record
db(tracktable.send_id == send_id).update(status = 2,
recv_id = recv_id)
session.confirmation = T("Shipment Items sent from Warehouse")
# Go to the Site which has sent these items
site_id = send_record.site_id
(prefix, resourcename, id) = s3mgr.model.get_instance(s3db.org_site,
site_id)
query = (otable.id == id)
otype = db(query).select(otable.type, limitby = (0, 1)).first()
if otype and otype.type == 5:
url = URL(c = "inv",
f = "warehouse",
args = [id, "inv_item"])
else:
url = URL(c = "org",
f = "office",
args = [id, "inv_item"])
redirect(url)
# -----------------------------------------------------------------------------
def send_cancel():
"""
This will cancel a shipment that has been sent
@todo need to roll back commitments
"""
send_id = request.args[0]
stable = s3db.inv_send
rtable = s3db.inv_recv
tracktable = s3db.inv_track_item
if not auth.s3_has_permission("delete",
stable,
record_id=send_id):
session.error = T("You do not have permission to cancel this sent shipment.")
send_record = stable[send_id]
if send_record.status != eden.inv.inv_ship_status["SENT"]:
session.error = T("This shipment has not been sent - it has NOT been canceled because can still be edited.")
if session.error:
redirect(URL(c = "inv",
f = "send",
args = [send_id],
)
)
# Okay no error so far, let's delete that baby
# Change the send and recv status to cancelled
stable[send_id] = dict(date = request.utcnow,
status = eden.inv.inv_ship_status["CANCEL"],
owned_by_user = None,
owned_by_group = ADMIN)
recv_row = db(tracktable.send_id == send_id).select(tracktable.recv_id,
limitby = (0, 1)).first()
if recv_row:
recv_id = recv_row.recv_id
rtable[recv_id] = dict(date = request.utcnow,
status = eden.inv.inv_ship_status["CANCEL"],
owned_by_user = None,
owned_by_group = ADMIN)
# Change the track items status to canceled and then delete them
# If they are linked to a request then the in transit total will also be reduced
# Records can only be deleted if the status is 1 (prepare)
# so change the status before we delete
db(tracktable.send_id == send_id).update(status = 1)
track_rows = db(tracktable.send_id == send_id).select(tracktable.id)
for track_item in track_rows:
s3.inv_track_item_deleting(track_item.id)
# Now change the status to 4 (cancelled)
db(tracktable.send_id == send_id).update(status = 4)
session.confirmation = T("Sent Shipment canceled and items returned to Warehouse")
redirect(URL(c = "inv",
f = "send",
args = [send_id]))
# =============================================================================
def recv():
""" RESTful CRUD controller """
s3mgr.load("inv_recv")
s3mgr.load("inv_adj_item")
tablename = "inv_recv"
table = s3db.inv_recv
# Limit site_id to sites the user has permissions for
if deployment_settings.get_inv_shipment_name() == "order":
error_msg = T("You do not have permission for any facility to add an order.")
else:
error_msg = T("You do not have permission for any facility to receive a shipment.")
auth.permission.permitted_facilities(table=table,
error_msg=error_msg)
# The inv_recv record might be created when the shipment is send and so it
# might not have the recipient identified. If it is null then set it to
# the person who is logged in (the default)
if len(request.args) > 0:
try:
id = request.args[0]
if table[id].recipient_id == None:
db(table.id == id).update(recipient_id = auth.s3_logged_in_person())
except:
pass
def prep(r):
if r.component:
record = table[r.id]
# Can only create or delete track items for a recv record if the status is preparing
if r.method == "create" or r.method == "delete":
if record.status != 1:
return False
tracktable = s3db.inv_track_item
# Hide the link to the send and adjustment records
tracktable.send_id.readable = False
tracktable.send_id.writable = False
tracktable.recv_id.readable = False
tracktable.recv_id.writable = False
tracktable.bin.readable = False
tracktable.bin.writable = False
tracktable.adj_item_id.readable = False
tracktable.adj_item_id.writable = False
if r.method == "update" and record.status==2:
# Hide the values that will be copied from the inv_inv_item record
tracktable.item_source_no.readable = True
tracktable.item_source_no.writable = False
tracktable.item_id.writable = False
tracktable.send_stock_id.writable = False
tracktable.item_pack_id.writable = False
tracktable.quantity.writable = False
tracktable.currency.writable = False
tracktable.pack_value.writable = False
tracktable.expiry_date.writable = False
tracktable.supply_org_id.writable = False
tracktable.recv_quantity.readable = True
tracktable.recv_quantity.writable = True
tracktable.recv_bin.readable = True
tracktable.recv_bin.writable = True
else:
tracktable = s3db.inv_track_item
# Hide the values that will be copied from the inv_inv_item record
tracktable.send_stock_id.readable = False
tracktable.send_stock_id.writable = False
# Display the values that can only be entered on create
tracktable.item_source_no.readable = True
tracktable.item_source_no.writable = True
tracktable.recv_quantity.readable = True
tracktable.recv_bin.readable = True
tracktable.recv_bin.writable = True
SHIP_STATUS_IN_PROCESS = s3db.inv_ship_status["IN_PROCESS"]
if r.record.status == SHIP_STATUS_IN_PROCESS:
s3.crud_strings.inv_recv.title_update = \
s3.crud_strings.inv_recv.title_display = T("Process Received Shipment")
else:
table.sender_id.readable = False
table.sender_id.writable = False
table.from_site_id.readable = False
table.from_site_id.writable = False
if r.id:
record = table[r.id]
# If this is part of a shipment then lock down the type and site_id
if record.sender_id != None:
table.type.writable = False
table.site_id.writable = False
if record.status == 1:
table.recipient_id.writable = False
table.date.writable = False
return True
response.s3.prep = prep
if len(request.args) > 1 and request.args[1] == "track_item" and table[request.args[0]].status:
# remove CRUD generated buttons in the tabs
s3mgr.configure("inv_track_item",
create=False,
listadd=False,
editable=False,
deletable=False,
)
if table[request.args[0]].status == 2:
s3mgr.configure("inv_track_item",
editable=True,
)
output = s3_rest_controller("inv", "recv",
rheader=eden.inv.inv_recv_rheader,
)
return output
# -----------------------------------------------------------------------------
def req_items_for_inv(site_id, quantity_type):
"""
used by recv_process & send_process
returns a dict of unique req items (with min db.req_req.date_required | db.req_req.date)
key = item_id
@param site_id: The inventory to find the req_items from
@param quantity_type: str ("commit", "transit" or "fulfil) The
quantity type which will be used to determine if this item is still outstanding
"""
if not deployment_settings.has_module("req"):
return Storage()
table = s3db.req_req
itable = s3db.req_req_item
query = ( table.site_id == site_id ) & \
( table.id == itable.req_id) & \
( itable.item_pack_id == itable.item_pack_id) & \
( itable["quantity_%s" % quantity_type] < itable.quantity) & \
( table.cancel == False ) & \
( table.deleted == False ) & \
( itable.deleted == False )
req_items = db(query).select(itable.id,
itable.req_id,
itable.item_id,
itable.quantity,
itable["quantity_%s" % quantity_type],
itable.item_pack_id,
orderby = table.date_required | table.date,
#groupby = itable.item_id
)
# Because groupby doesn't follow the orderby, this will remove any
# duplicate req_item, using the first record according to the orderby
# req_items = req_items.as_dict( key = "req_req_item.item_id") <- doensn't work
# @todo: web2py Rows.as_dict function could be extended to enable this functionality instead
req_item_ids = []
unique_req_items = Storage()
for req_item in req_items:
if req_item.item_id not in req_item_ids:
# This item is not already in the dict
unique_req_items[req_item.item_id] = Storage( req_item.as_dict() )
req_item_ids.append(req_item.item_id)
return unique_req_items
# -----------------------------------------------------------------------------
def req_item_in_shipment( shipment_item,
shipment_type,
req_items,
):
"""
Checks if a shipment item is in a request and updates req_item
and the shipment.
"""
shipment_item_table = "inv_%s_item" % shipment_type
try:
item_id = shipment_item[shipment_item_table].item_id
except:
item_id = shipment_item.inv_inv_item.item_id
# Check for req_items
if item_id in req_items:
shipment_to_req_type = dict(recv = "fulfil",
send = "transit")
quantity_req_type = "quantity_%s" % shipment_to_req_type[shipment_type]
# This item has been requested from this inv
req_item = req_items[item_id]
req_item_id = req_item.id
# Update the req quantity
# convert the shipment items quantity into the req_tem.quantity_fulfil (according to pack)
quantity = req_item[quantity_req_type] + \
(shipment_item[shipment_item_table].pack_quantity / \
req_item.pack_quantity) * \
shipment_item[shipment_item_table].quantity
quantity = min(quantity, req_item.quantity) #Cap at req. quantity
s3db.req_req_item[req_item_id] = {quantity_req_type: quantity}
# Link the shipment_item to the req_item
s3db[shipment_item_table][shipment_item[shipment_item_table].id] = \
dict(req_item_id = req_item_id)
# Flag req record to update status_fulfil
return req_item.req_id, req_item.id
else:
return None, None
# -----------------------------------------------------------------------------
def recv_process():
""" Receive a Shipment """
s3mgr.load("inv_adj")
s3mgr.load("inv_adj_item")
recv_id = request.args[0]
rtable = s3db.inv_recv
stable = s3db.inv_send
tracktable = s3db.inv_track_item
ritable = s3db.req_req_item
otable = s3db.org_office
if not auth.s3_has_permission("update",
rtable,
record_id=recv_id):
session.error = T("You do not have permission to receive this shipment.")
recv_record = rtable[recv_id]
if recv_record.status == eden.inv.SHIP_STATUS_RECEIVED:
session.error = T("This shipment has already been received.")
if recv_record.status == eden.inv.SHIP_STATUS_CANCEL:
session.error = T("This shipment has already been received & subsequently canceled.")
if session.error:
redirect(URL(c = "inv",
f = "recv",
args = [recv_id]))
site_id = recv_record.site_id
# Update Receive record & lock for editing
rtable[recv_id] = dict(date = request.utcnow,
status = eden.inv.inv_ship_status["RECEIVED"],
owned_by_user = None,
owned_by_group = ADMIN)
send_row = db(tracktable.recv_id == recv_id).select(tracktable.send_id,
limitby = (0, 1)).first()
if send_row:
send_id = send_row.send_id
stable[send_id] = dict(date = request.utcnow,
status = eden.inv.inv_ship_status["RECEIVED"],
owned_by_user = None,
owned_by_group = ADMIN)
# Change the status for all track items in this shipment to Unloading
# the onaccept will then move the values into the site update any request
# record, create any adjustment if needed and change the status to Arrived
db(tracktable.recv_id == recv_id).update(status = 3)
# Go to the Inventory of the Site which has received these items
(prefix, resourcename, id) = s3mgr.model.get_instance(s3db.org_site,
site_id)
query = (otable.id == id)
otype = db(query).select(otable.type, limitby = (0, 1)).first()
if otype and otype.type == 5:
url = URL(c = "inv",
f = "warehouse",
args = [id, "inv_item"])
else:
url = URL(c = "org",
f = "office",
args = [id, "inv_item"])
redirect(url)
# -----------------------------------------------------------------------------
def recv_cancel():
"""
Cancel a Received Shipment
@todo what to do if the quantity cancelled doesn't exist?
"""
recv_id = request.args[0]
rtable = s3db.inv_recv
stable = s3db.inv_send
tracktable = s3db.inv_track_item
stocktable = s3db.inv_inv_item
ritable = s3db.req_req_item
if not auth.s3_has_permission("delete",
rtable,
record_id=recv_id):
session.error = T("You do not have permission to cancel this received shipment.")
recv_record = rtable[recv_id]
if recv_record.status != eden.inv.inv_ship_status["RECEIVED"]:
session.error = T("This shipment has not been received - it has NOT been canceled because can still be edited.")
if session.error:
redirect(URL(c = "inv",
f = "recv",
args = [recv_id]))
# Go through each item in the shipment remove them from the site store
# and put them back in the track item record
query = (tracktable.recv_id == recv_id) & \
(tracktable.deleted == False)
recv_items = db(query).select()
send_id = None
for recv_item in recv_items:
stock_id = recv_item.recv_stock_id
# This assumes that the inv_item has the quantity
db(stocktable.id == stock_id).update(quantity = stocktable.quantity - recv_item.quantity)
db(tracktable.recv_id == recv_id).update(status = 2) # In transit
# @todo potential problem in that the send id should be the same for all track items but is not explicitly checked
if send_id == None and recv_item.send_id != None:
send_id = recv_item.send_id
track_rows = db(tracktable.recv_id == recv_id).select()
for track_item in track_rows:
# if this is linked to a request
# then remove these items from the quantity in transit
if track_item.req_item_id:
db(ritable.id == track_item.req_item_id).update(quantity_fulfil = ritable.quantity_fulfil - track_item.quantity)
# Now set the recv record to cancelled and the send record to sent
rtable[recv_id] = dict(date = request.utcnow,
status = eden.inv.inv_ship_status["CANCEL"],
owned_by_user = None,
owned_by_group = ADMIN)
if send_id != None:
# The sent record is now set back to SENT the warehouse can now cancel
# this record to get the stock back into their warehouse.
# IMPORTANT reports need to locate this record otherwise it can be
# a mechanism to circumvent the auditing of stock
stable[send_id] = dict(date = request.utcnow,
status = eden.inv.inv_ship_status["SENT"],
owned_by_user = None,
owned_by_group = ADMIN)
redirect(URL(c = "inv",
f = "recv",
args = [recv_id]))
# -----------------------------------------------------------------------------
def recv_sent():
""" wrapper function to copy data from a shipment which was sent to the warehouse to a recv shipment (will happen at destination WH)
@ToDo: Consider making obsolete
"""
send_id = request.args[0]
# This is more explicit than getting the site_id from the inv_send.to_site_id
# As there may be multiple sites per location.
#site_id = request.vars.site_id
# Flag shipment as received as received
s3db.inv_send[send_id] = dict(status = eden.inv.inv_ship_status["RECEIVED"])
# Redirect to rec
redirect(URL(c = "inv",
f = "recv",
args = [recv_id, "recv_item"]))
# =============================================================================
def track_item():
""" RESTful CRUD controller """
output = s3_rest_controller("inv",
"track_item",
rheader=response.s3.inv_warehouse_rheader,
)
return output
# =============================================================================
def adj():
""" RESTful CRUD controller """
s3mgr.load("inv_adj")
s3db = current.s3db
db = current.db
auth = current.auth
request = current.request
response = current.response
s3 = response.s3
tablename = "inv_adj"
table = s3db.inv_adj
# Limit site_id to sites the user has permissions for
error_msg = T("You do not have permission to adjust the stock level in this warehouse.")
auth.permission.permitted_facilities(table=table,
error_msg=error_msg)
def prep(r):
if r.interactive:
if r.component:
if r.component_id:
aitable = s3db.inv_adj_item
if r.record.status == 0:
aitable.reason.writable = True
record = aitable[r.component_id]
if record.inv_item_id:
aitable.item_id.writable = False
aitable.item_pack_id.writable = False
aitable.item_id.writable = False
else:
# if an adjustment has been selected and it has been completed
# then make the fields read only
if r.record and r.record.status:
table.adjuster_id.writable = False
table.site_id.writable = False
table.comments.writable = False
return True
response.s3.prep = prep
if len(request.args) > 1 and request.args[1] == "adj_item" and table[request.args[0]].status:
# remove CRUD generated buttons in the tabs
s3mgr.configure("inv_adj_item",
create=False,
listadd=False,
editable=False,
deletable=False,
)
output = s3_rest_controller("inv",
"adj",
rheader=s3.inv_adj_rheader,
)
return output
def adj_close():
""" RESTful CRUD controller """
s3mgr.load("inv_adj")
s3db = current.s3db
db = current.db
auth = current.auth
request = current.request
response = current.response
s3 = response.s3
atable = s3db.inv_adj
aitable = s3db.inv_adj_item
stocktable = s3db.inv_inv_item
otable = s3db.org_office
# Limit site_id to sites the user has permissions for
error_msg = T("You do not have permission to adjust the stock level in this warehouse.")
auth.permission.permitted_facilities(table=table,
error_msg=error_msg)
adj_id = request.args[0]
adj_rec = atable[adj_id]
if adj_rec.status != 0:
session.error = T("This adjustment has already been closed.")
if session.error:
redirect(URL(c = "inv",
f = "adj",
args = [adj_id]))
# Go through all the adj_items
query = ( aitable.adj_id == adj_id ) & \
(aitable.deleted == False)
adj_items = db(query).select()
for adj_item in adj_items:
# if we don't have a stock item then create it
if adj_item.inv_item_id == None:
stock_id = stocktable.insert(site_id = adj_rec.site_id,
item_id = adj_item.item_id,
item_pack_id = adj_item.item_pack_id,
currency = adj_item.currency,
bin = adj_item.bin,
pack_value = adj_item.pack_value,
expiry_date = adj_item.expiry_date,
quantity = adj_item.new_quantity,
)
# and add the inventory item id to the adjustment record
db(aitable.id == adj_item.id).update(inv_item_id = stock_id)
# otherwise copy the details to the stock item
else:
db(stocktable.id == adj_item.inv_item_id).update(item_pack_id = adj_item.item_pack_id,
bin = adj_item.bin,
pack_value = adj_item.pack_value,
expiry_date = adj_item.expiry_date,
quantity = adj_item.new_quantity,
)
# Change the status of the adj record to Complete
db(atable.id == adj_id).update(status=1)
# Go to the Inventory of the Site which has adjusted these items
(prefix, resourcename, id) = s3mgr.model.get_instance(s3db.org_site,
adj_rec.site_id)
query = (otable.id == id)
otype = db(query).select(otable.type, limitby = (0, 1)).first()
if otype and otype.type == 5:
url = URL(c = "inv",
f = "warehouse",
args = [id, "inv_item"])
else:
url = URL(c = "org",
f = "office",
args = [id, "inv_item"])
redirect(url)
# =============================================================================
def recv_item_json():
"""
"""
stable = s3db.org_site
rtable = s3db.inv_recv
ittable = s3db.inv_track_item
rtable.date.represent = lambda dt: dt[:10]
query = (ittable.req_item_id == request.args[0]) & \
(rtable.id == ittable.recv_id) & \
(rtable.site_id == stable.id) & \
(rtable.status == eden.inv.inv_ship_status["RECEIVED"]) & \
(ittable.deleted == False )
records = db(query).select(rtable.id,
rtable.date,
stable.name,
ittable.quantity)
json_str = "[%s,%s" % ( json.dumps(dict(id = str(T("Received")),
quantity = "#"
)) ,
records.json()[1:])
response.headers["Content-Type"] = "application/json"
return json_str
# =============================================================================
def send_item_json():
"""
"""
stable = s3db.org_site
istable = s3db.inv_send
ittable = s3db.inv_track_item
istable.date.represent = lambda dt: dt[:10]
query = (ittable.req_item_id == request.args[0]) & \
(istable.id == ittable.send_id) & \
(istable.site_id == stable.id) & \
((istable.status == eden.inv.inv_ship_status["SENT"]) | \
(istable.status == eden.inv.inv_ship_status["RECEIVED"])) & \
(ittable.deleted == False)
records = db(query).select(istable.id,
istable.date,
stable.name,
ittable.quantity)
json_str = "[%s,%s" % ( json.dumps(dict(id = str(T("Sent")),
quantity = "#"
)) ,
records.json()[1:])
response.headers["Content-Type"] = "application/json"
return json_str
# END =========================================================================
|
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class FactorTestCase(IntegrationTestCase):
def test_create_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.authy.v1.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.entities(identity="identity") \
.factors.create(binding="binding", friendly_name="friendly_name", factor_type="app-push")
values = {'Binding': "binding", 'FriendlyName': "friendly_name", 'FactorType': "app-push", }
self.holodeck.assert_has_request(Request(
'post',
'https://authy.twilio.com/v1/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Entities/identity/Factors',
data=values,
))
def test_create_response(self):
self.holodeck.mock(Response(
201,
'''
{
"sid": "YFaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"entity_sid": "YEaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"identity": "ff483d1ff591898a9942916050d2ca3f",
"date_created": "2015-07-30T20:00:00Z",
"date_updated": "2015-07-30T20:00:00Z",
"friendly_name": "friendly_name",
"status": "unverified",
"factor_strength": "low",
"factor_type": "sms",
"url": "https://authy.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Entities/ff483d1ff591898a9942916050d2ca3f/Factors/YFaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"links": {
"challenges": "https://authy.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Entities/ff483d1ff591898a9942916050d2ca3f/Factors/YFaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Challenges"
}
}
'''
))
actual = self.client.authy.v1.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.entities(identity="identity") \
.factors.create(binding="binding", friendly_name="friendly_name", factor_type="app-push")
self.assertIsNotNone(actual)
def test_delete_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.authy.v1.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.entities(identity="identity") \
.factors(sid="YFXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.holodeck.assert_has_request(Request(
'delete',
'https://authy.twilio.com/v1/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Entities/identity/Factors/YFXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_delete_response(self):
self.holodeck.mock(Response(
204,
None,
))
actual = self.client.authy.v1.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.entities(identity="identity") \
.factors(sid="YFXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.assertTrue(actual)
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.authy.v1.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.entities(identity="identity") \
.factors(sid="YFXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://authy.twilio.com/v1/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Entities/identity/Factors/YFXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_fetch_response(self):
self.holodeck.mock(Response(
200,
'''
{
"sid": "YFaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"entity_sid": "YEaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"identity": "ff483d1ff591898a9942916050d2ca3f",
"date_created": "2015-07-30T20:00:00Z",
"date_updated": "2015-07-30T20:00:00Z",
"friendly_name": "friendly_name",
"status": "unverified",
"factor_strength": "low",
"factor_type": "sms",
"url": "https://authy.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Entities/ff483d1ff591898a9942916050d2ca3f/Factors/YFaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"links": {
"challenges": "https://authy.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Entities/ff483d1ff591898a9942916050d2ca3f/Factors/YFaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Challenges"
}
}
'''
))
actual = self.client.authy.v1.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.entities(identity="identity") \
.factors(sid="YFXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.assertIsNotNone(actual)
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.authy.v1.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.entities(identity="identity") \
.factors.list()
self.holodeck.assert_has_request(Request(
'get',
'https://authy.twilio.com/v1/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Entities/identity/Factors',
))
def test_read_empty_response(self):
self.holodeck.mock(Response(
200,
'''
{
"factors": [],
"meta": {
"page": 0,
"page_size": 50,
"first_page_url": "https://authy.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Entities/ff483d1ff591898a9942916050d2ca3f/Factors?PageSize=50&Page=0",
"previous_page_url": null,
"url": "https://authy.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Entities/ff483d1ff591898a9942916050d2ca3f/Factors?PageSize=50&Page=0",
"next_page_url": null,
"key": "factors"
}
}
'''
))
actual = self.client.authy.v1.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.entities(identity="identity") \
.factors.list()
self.assertIsNotNone(actual)
def test_read_full_response(self):
self.holodeck.mock(Response(
200,
'''
{
"factors": [
{
"sid": "YFaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"entity_sid": "YEaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"identity": "ff483d1ff591898a9942916050d2ca3f",
"date_created": "2015-07-30T20:00:00Z",
"date_updated": "2015-07-30T20:00:00Z",
"friendly_name": "friendly_name",
"status": "unverified",
"factor_strength": "low",
"factor_type": "sms",
"url": "https://authy.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Entities/ff483d1ff591898a9942916050d2ca3f/Factors/YFaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"links": {
"challenges": "https://authy.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Entities/ff483d1ff591898a9942916050d2ca3f/Factors/YFaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Challenges"
}
}
],
"meta": {
"page": 0,
"page_size": 50,
"first_page_url": "https://authy.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Entities/ff483d1ff591898a9942916050d2ca3f/Factors?PageSize=50&Page=0",
"previous_page_url": null,
"url": "https://authy.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Entities/ff483d1ff591898a9942916050d2ca3f/Factors?PageSize=50&Page=0",
"next_page_url": null,
"key": "factors"
}
}
'''
))
actual = self.client.authy.v1.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.entities(identity="identity") \
.factors.list()
self.assertIsNotNone(actual)
def test_update_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.authy.v1.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.entities(identity="identity") \
.factors(sid="YFXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.holodeck.assert_has_request(Request(
'post',
'https://authy.twilio.com/v1/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Entities/identity/Factors/YFXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_verify_response(self):
self.holodeck.mock(Response(
200,
'''
{
"sid": "YFaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"entity_sid": "YEaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"identity": "ff483d1ff591898a9942916050d2ca3f",
"date_created": "2015-07-30T20:00:00Z",
"date_updated": "2015-07-30T20:00:00Z",
"friendly_name": "friendly_name",
"status": "verified",
"factor_strength": "low",
"factor_type": "sms",
"url": "https://authy.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Entities/ff483d1ff591898a9942916050d2ca3f/Factors/YFaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"links": {
"challenges": "https://authy.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Entities/ff483d1ff591898a9942916050d2ca3f/Factors/YFaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Challenges"
}
}
'''
))
actual = self.client.authy.v1.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.entities(identity="identity") \
.factors(sid="YFXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.assertIsNotNone(actual)
|
|
# yellowbrick.model_selection.importances
# Feature importance visualizer
#
# Author: Benjamin Bengfort
# Author: Rebecca Bilbro
# Created: Fri Mar 02 15:21:36 2018 -0500
#
# Copyright (C) 2018 The scikit-yb developers
# For license information, see LICENSE.txt
#
# ID: importances.py [] [email protected] $
"""
Implementation of a feature importances visualizer. This visualizer sits in
kind of a weird place since it is technically a model scoring visualizer, but
is generally used for feature engineering.
"""
##########################################################################
## Imports
##########################################################################
import warnings
import numpy as np
from yellowbrick.draw import bar_stack
from yellowbrick.base import ModelVisualizer
from yellowbrick.style.colors import resolve_colors
from yellowbrick.utils import is_dataframe, is_classifier
from yellowbrick.exceptions import YellowbrickTypeError, NotFitted
from yellowbrick.exceptions import YellowbrickWarning, YellowbrickValueError
##########################################################################
## Feature Visualizer
##########################################################################
class FeatureImportances(ModelVisualizer):
"""
Displays the most informative features in a model by showing a bar chart
of features ranked by their importances. Although primarily a feature
engineering mechanism, this visualizer requires a model that has either a
``coef_`` or ``feature_importances_`` parameter after fit.
Note: Some classification models such as ``LogisticRegression``, return
``coef_`` as a multidimensional array of shape ``(n_classes, n_features)``.
In this case, the ``FeatureImportances`` visualizer computes the mean of the
``coefs_`` by class for each feature.
Parameters
----------
estimator : Estimator
A Scikit-Learn estimator that learns feature importances. Must support
either ``coef_`` or ``feature_importances_`` parameters. If the estimator
is not fitted, it is fit when the visualizer is fitted, unless otherwise
specified by ``is_fitted``.
ax : matplotlib Axes, default: None
The axis to plot the figure on. If None is passed in the current axes
will be used (or generated if required).
labels : list, default: None
A list of feature names to use. If a DataFrame is passed to fit and
features is None, feature names are selected as the column names.
relative : bool, default: True
If true, the features are described by their relative importance as a
percentage of the strongest feature component; otherwise the raw
numeric description of the feature importance is shown.
absolute : bool, default: False
Make all coeficients absolute to more easily compare negative
coefficients with positive ones.
xlabel : str, default: None
The label for the X-axis. If None is automatically determined by the
underlying model and options provided.
stack : bool, default: False
If true and the classifier returns multi-class feature importance,
then a stacked bar plot is plotted; otherwise the mean of the
feature importance across classes are plotted.
colors: list of strings
Specify colors for each bar in the chart if ``stack==False``.
colormap : string or matplotlib cmap
Specify a colormap to color the classes if ``stack==True``.
is_fitted : bool or str, default='auto'
Specify if the wrapped estimator is already fitted. If False, the estimator
will be fit when the visualizer is fit, otherwise, the estimator will not be
modified. If 'auto' (default), a helper method will check if the estimator
is fitted before fitting it again.
topn : int, default=None
Display only the top N results with a positive integer, or the bottom N
results with a negative integer. If None or 0, all results are shown.
kwargs : dict
Keyword arguments that are passed to the base class and may influence
the visualization as defined in other Visualizers.
Attributes
----------
features_ : np.array
The feature labels ranked according to their importance
feature_importances_ : np.array
The numeric value of the feature importance computed by the model
classes_ : np.array
The classes labeled. Is not None only for classifier.
Examples
--------
>>> from sklearn.ensemble import GradientBoostingClassifier
>>> visualizer = FeatureImportances(GradientBoostingClassifier())
>>> visualizer.fit(X, y)
>>> visualizer.show()
"""
def __init__(
self,
estimator,
ax=None,
labels=None,
relative=True,
absolute=False,
xlabel=None,
stack=False,
colors=None,
colormap=None,
is_fitted="auto",
topn=None,
**kwargs
):
# Initialize the visualizer bases
super(FeatureImportances, self).__init__(
estimator, ax=ax, is_fitted=is_fitted, **kwargs
)
# Data Parameters
self.labels = labels
self.relative = relative
self.absolute = absolute
self.xlabel = xlabel
self.stack = stack
self.colors = colors
self.colormap = colormap
self.topn = topn
def fit(self, X, y=None, **kwargs):
"""
Fits the estimator to discover the feature importances described by
the data, then draws those importances as a bar plot.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n
An array or series of target or class values
kwargs : dict
Keyword arguments passed to the fit method of the estimator.
Returns
-------
self : visualizer
The fit method must always return self to support pipelines.
"""
# Super call fits the underlying estimator if it's not already fitted
super(FeatureImportances, self).fit(X, y, **kwargs)
# Get the feature importances from the model
self.feature_importances_ = self._find_importances_param()
# Get the classes from the model
if is_classifier(self):
self.classes_ = self._find_classes_param()
else:
self.classes_ = None
self.stack = False
# If self.stack = True and feature importances is a multidim array,
# we're expecting a shape of (n_classes, n_features)
# therefore we flatten by taking the average by
# column to get shape (n_features,) (see LogisticRegression)
if not self.stack and self.feature_importances_.ndim > 1:
self.feature_importances_ = np.mean(self.feature_importances_, axis=0)
warnings.warn(
(
"detected multi-dimensional feature importances but stack=False, "
"using mean to aggregate them."
),
YellowbrickWarning,
)
# Apply absolute value filter before normalization
if self.absolute:
self.feature_importances_ = np.abs(self.feature_importances_)
# Normalize features relative to the maximum
if self.relative:
maxv = np.abs(self.feature_importances_).max()
self.feature_importances_ /= maxv
self.feature_importances_ *= 100.0
# Create labels for the feature importances
# NOTE: this code is duplicated from MultiFeatureVisualizer
if self.labels is None:
# Use column names if a dataframe
if is_dataframe(X):
self.features_ = np.array(X.columns)
# Otherwise use the column index as the labels
else:
_, ncols = X.shape
self.features_ = np.arange(0, ncols)
else:
self.features_ = np.array(self.labels)
if self.topn and self.topn > self.features_.shape[0]:
raise YellowbrickValueError(
"topn '{}' cannot be greater than the number of "
"features '{}'".format(self.topn, self.features_.shape[0])
)
# Sort the features and their importances
if self.stack:
if len(self.classes_) != self.feature_importances_.shape[0]:
raise YellowbrickValueError(
(
"The model used does not return coef_ array in the shape of (n_classes, n_features)."
" Unable to generate stacked feature importances. "
"Consider setting the stack parameter to False or using a different model"
)
)
if self.topn:
abs_sort_idx = np.argsort(
np.sum(np.absolute(self.feature_importances_), 0)
)
sort_idx = self._reduce_topn(abs_sort_idx)
else:
sort_idx = np.argsort(np.mean(self.feature_importances_, 0))
self.features_ = self.features_[sort_idx]
self.feature_importances_ = self.feature_importances_[:, sort_idx]
else:
if self.topn:
abs_sort_idx = np.argsort(np.absolute(self.feature_importances_))
abs_sort_idx = self._reduce_topn(abs_sort_idx)
self.features_ = self.features_[abs_sort_idx]
self.feature_importances_ = self.feature_importances_[abs_sort_idx]
# Sort features by value (sorting a second time if topn)
sort_idx = np.argsort(self.feature_importances_)
self.features_ = self.features_[sort_idx]
self.feature_importances_ = self.feature_importances_[sort_idx]
# Draw the feature importances
self.draw()
return self
def draw(self, **kwargs):
"""
Draws the feature importances as a bar chart; called from fit.
"""
# Quick validation
for param in ("feature_importances_", "features_"):
if not hasattr(self, param):
raise NotFitted("missing required param '{}'".format(param))
# Find the positions for each bar
pos = np.arange(self.features_.shape[0]) + 0.5
# Plot the bar chart
if self.stack:
colors = resolve_colors(len(self.classes_), colormap=self.colormap)
legend_kws = {"bbox_to_anchor": (1.04, 0.5), "loc": "center left"}
bar_stack(
self.feature_importances_,
ax=self.ax,
labels=list(self.classes_),
ticks=self.features_,
orientation="h",
colors=colors,
legend_kws=legend_kws,
)
else:
colors = resolve_colors(
len(self.features_), colormap=self.colormap, colors=self.colors
)
self.ax.barh(pos, self.feature_importances_, color=colors, align="center")
# Set the labels for the bars
self.ax.set_yticks(pos)
self.ax.set_yticklabels(self.features_)
return self.ax
def finalize(self, **kwargs):
"""
Finalize the drawing setting labels and title.
"""
# Set the title
self.set_title(
"Feature Importances of {} Features using {}".format(
self._get_topn_title(), self.name
)
)
# Set the xlabel
self.ax.set_xlabel(self._get_xlabel())
# Remove the ygrid
self.ax.grid(False, axis="y")
# Ensure we have a tight fit
self.fig.tight_layout()
def _find_classes_param(self):
"""
Searches the wrapped model for the classes_ parameter.
"""
for attr in ["classes_"]:
try:
return getattr(self.estimator, attr)
except AttributeError:
continue
raise YellowbrickTypeError(
"could not find classes_ param on {}".format(
self.estimator.__class__.__name__
)
)
def _find_importances_param(self):
"""
Searches the wrapped model for the feature importances parameter.
"""
for attr in ("feature_importances_", "coef_"):
try:
return getattr(self.estimator, attr)
except AttributeError:
continue
raise YellowbrickTypeError(
"could not find feature importances param on {}".format(
self.estimator.__class__.__name__
)
)
def _get_xlabel(self):
"""
Determines the xlabel based on the underlying data structure
"""
# Return user-specified label
if self.xlabel:
return self.xlabel
# Label for coefficients
if hasattr(self.estimator, "coef_"):
if self.relative:
return "relative coefficient magnitude"
return "coefficient value"
# Default label for feature_importances_
if self.relative:
return "relative importance"
return "feature importance"
def _is_fitted(self):
"""
Returns true if the visualizer has been fit.
"""
return hasattr(self, "feature_importances_") and hasattr(self, "features_")
def _reduce_topn(self, arr):
"""
Return only the top or bottom N items within a sliceable array/list.
Assumes that arr is in ascending order.
"""
if self.topn > 0:
arr = arr[-self.topn:]
elif self.topn < 0:
arr = arr[:-self.topn]
return arr
def _get_topn_title(self):
"""
Return an appropriate title for the plot: Top N, Bottom N, or N
"""
if self.topn:
if self.topn > 0:
return "Top {}".format(len(self.features_))
else:
return "Bottom {}".format(len(self.features_))
else:
return str(len(self.features_))
##########################################################################
## Quick Method
##########################################################################
def feature_importances(
estimator,
X,
y=None,
ax=None,
labels=None,
relative=True,
absolute=False,
xlabel=None,
stack=False,
colors=None,
colormap=None,
is_fitted="auto",
topn=None,
show=True,
**kwargs
):
"""Quick Method:
Displays the most informative features in a model by showing a bar chart
of features ranked by their importances. Although primarily a feature
engineering mechanism, this visualizer requires a model that has either a
``coef_`` or ``feature_importances_`` parameter after fit.
Parameters
----------
estimator : Estimator
A Scikit-Learn estimator that learns feature importances. Must support
either ``coef_`` or ``feature_importances_`` parameters. If the estimator
is not fitted, it is fit when the visualizer is fitted, unless otherwise
specified by ``is_fitted``.
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n, optional
An array or series of target or class values
ax : matplotlib Axes, default: None
The axis to plot the figure on. If None is passed in the current axes
will be used (or generated if required).
labels : list, default: None
A list of feature names to use. If a DataFrame is passed to fit and
features is None, feature names are selected as the column names.
relative : bool, default: True
If true, the features are described by their relative importance as a
percentage of the strongest feature component; otherwise the raw
numeric description of the feature importance is shown.
absolute : bool, default: False
Make all coeficients absolute to more easily compare negative
coeficients with positive ones.
xlabel : str, default: None
The label for the X-axis. If None is automatically determined by the
underlying model and options provided.
stack : bool, default: False
If true and the classifier returns multi-class feature importance,
then a stacked bar plot is plotted; otherwise the mean of the
feature importance across classes are plotted.
colors: list of strings
Specify colors for each bar in the chart if ``stack==False``.
colormap : string or matplotlib cmap
Specify a colormap to color the classes if ``stack==True``.
is_fitted : bool or str, default='auto'
Specify if the wrapped estimator is already fitted. If False, the estimator
will be fit when the visualizer is fit, otherwise, the estimator will not be
modified. If 'auto' (default), a helper method will check if the estimator
is fitted before fitting it again.
show: bool, default: True
If True, calls ``show()``, which in turn calls ``plt.show()`` however you cannot
call ``plt.savefig`` from this signature, nor ``clear_figure``. If False, simply
calls ``finalize()``
topn : int, default=None
Display only the top N results with a positive integer, or the bottom N
results with a negative integer. If None or 0, all results are shown.
kwargs : dict
Keyword arguments that are passed to the base class and may influence
the visualization as defined in other Visualizers.
Returns
-------
viz : FeatureImportances
The feature importances visualizer, fitted and finalized.
"""
# Instantiate the visualizer
visualizer = FeatureImportances(
estimator,
ax=ax,
labels=labels,
relative=relative,
absolute=absolute,
xlabel=xlabel,
stack=stack,
colors=colors,
colormap=colormap,
is_fitted=is_fitted,
topn=topn,
**kwargs
)
# Fit and transform the visualizer (calls draw)
visualizer.fit(X, y)
if show:
visualizer.show()
else:
visualizer.finalize()
# Return the visualizer
return visualizer
|
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import timedelta
import json
import unittest
from urllib.parse import quote_plus
from airflow import configuration as conf
from airflow import settings
from airflow.api.common.experimental.trigger_dag import trigger_dag
from airflow.models import DagBag, DagRun, Pool, TaskInstance
from airflow.settings import Session
from airflow.utils.timezone import datetime, utcnow
from airflow.www import app as application
class TestBase(unittest.TestCase):
def setUp(self):
conf.load_test_config()
self.app, self.appbuilder = application.create_app(session=Session, testing=True)
self.app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///'
self.app.config['SECRET_KEY'] = 'secret_key'
self.app.config['CSRF_ENABLED'] = False
self.app.config['WTF_CSRF_ENABLED'] = False
self.client = self.app.test_client()
settings.configure_orm()
self.session = Session
class TestApiExperimental(TestBase):
@classmethod
def setUpClass(cls):
super(TestApiExperimental, cls).setUpClass()
session = Session()
session.query(DagRun).delete()
session.query(TaskInstance).delete()
session.commit()
session.close()
def setUp(self):
super(TestApiExperimental, self).setUp()
def tearDown(self):
session = Session()
session.query(DagRun).delete()
session.query(TaskInstance).delete()
session.commit()
session.close()
super(TestApiExperimental, self).tearDown()
def test_task_info(self):
url_template = '/api/experimental/dags/{}/tasks/{}'
response = self.client.get(
url_template.format('example_bash_operator', 'runme_0')
)
self.assertIn('"email"', response.data.decode('utf-8'))
self.assertNotIn('error', response.data.decode('utf-8'))
self.assertEqual(200, response.status_code)
response = self.client.get(
url_template.format('example_bash_operator', 'DNE')
)
self.assertIn('error', response.data.decode('utf-8'))
self.assertEqual(404, response.status_code)
response = self.client.get(
url_template.format('DNE', 'DNE')
)
self.assertIn('error', response.data.decode('utf-8'))
self.assertEqual(404, response.status_code)
def test_get_dag_code(self):
url_template = '/api/experimental/dags/{}/code'
response = self.client.get(
url_template.format('example_bash_operator')
)
self.assertIn('BashOperator(', response.data.decode('utf-8'))
self.assertEqual(200, response.status_code)
response = self.client.get(
url_template.format('xyz')
)
self.assertEqual(404, response.status_code)
def test_task_paused(self):
url_template = '/api/experimental/dags/{}/paused/{}'
response = self.client.get(
url_template.format('example_bash_operator', 'true')
)
self.assertIn('ok', response.data.decode('utf-8'))
self.assertEqual(200, response.status_code)
url_template = '/api/experimental/dags/{}/paused/{}'
response = self.client.get(
url_template.format('example_bash_operator', 'false')
)
self.assertIn('ok', response.data.decode('utf-8'))
self.assertEqual(200, response.status_code)
def test_trigger_dag(self):
url_template = '/api/experimental/dags/{}/dag_runs'
response = self.client.post(
url_template.format('example_bash_operator'),
data=json.dumps({'run_id': 'my_run' + utcnow().isoformat()}),
content_type="application/json"
)
self.assertEqual(200, response.status_code)
response = self.client.post(
url_template.format('does_not_exist_dag'),
data=json.dumps({}),
content_type="application/json"
)
self.assertEqual(404, response.status_code)
def test_trigger_dag_for_date(self):
url_template = '/api/experimental/dags/{}/dag_runs'
dag_id = 'example_bash_operator'
hour_from_now = utcnow() + timedelta(hours=1)
execution_date = datetime(hour_from_now.year,
hour_from_now.month,
hour_from_now.day,
hour_from_now.hour)
datetime_string = execution_date.isoformat()
# Test Correct execution
response = self.client.post(
url_template.format(dag_id),
data=json.dumps({'execution_date': datetime_string}),
content_type="application/json"
)
self.assertEqual(200, response.status_code)
dagbag = DagBag()
dag = dagbag.get_dag(dag_id)
dag_run = dag.get_dagrun(execution_date)
self.assertTrue(dag_run,
'Dag Run not found for execution date {}'
.format(execution_date))
# Test error for nonexistent dag
response = self.client.post(
url_template.format('does_not_exist_dag'),
data=json.dumps({'execution_date': execution_date.isoformat()}),
content_type="application/json"
)
self.assertEqual(404, response.status_code)
# Test error for bad datetime format
response = self.client.post(
url_template.format(dag_id),
data=json.dumps({'execution_date': 'not_a_datetime'}),
content_type="application/json"
)
self.assertEqual(400, response.status_code)
def test_task_instance_info(self):
url_template = '/api/experimental/dags/{}/dag_runs/{}/tasks/{}'
dag_id = 'example_bash_operator'
task_id = 'also_run_this'
execution_date = utcnow().replace(microsecond=0)
datetime_string = quote_plus(execution_date.isoformat())
wrong_datetime_string = quote_plus(
datetime(1990, 1, 1, 1, 1, 1).isoformat()
)
# Create DagRun
trigger_dag(dag_id=dag_id,
run_id='test_task_instance_info_run',
execution_date=execution_date)
# Test Correct execution
response = self.client.get(
url_template.format(dag_id, datetime_string, task_id)
)
self.assertEqual(200, response.status_code)
self.assertIn('state', response.data.decode('utf-8'))
self.assertNotIn('error', response.data.decode('utf-8'))
# Test error for nonexistent dag
response = self.client.get(
url_template.format('does_not_exist_dag', datetime_string,
task_id),
)
self.assertEqual(404, response.status_code)
self.assertIn('error', response.data.decode('utf-8'))
# Test error for nonexistent task
response = self.client.get(
url_template.format(dag_id, datetime_string, 'does_not_exist_task')
)
self.assertEqual(404, response.status_code)
self.assertIn('error', response.data.decode('utf-8'))
# Test error for nonexistent dag run (wrong execution_date)
response = self.client.get(
url_template.format(dag_id, wrong_datetime_string, task_id)
)
self.assertEqual(404, response.status_code)
self.assertIn('error', response.data.decode('utf-8'))
# Test error for bad datetime format
response = self.client.get(
url_template.format(dag_id, 'not_a_datetime', task_id)
)
self.assertEqual(400, response.status_code)
self.assertIn('error', response.data.decode('utf-8'))
def test_dagrun_status(self):
url_template = '/api/experimental/dags/{}/dag_runs/{}'
dag_id = 'example_bash_operator'
execution_date = utcnow().replace(microsecond=0)
datetime_string = quote_plus(execution_date.isoformat())
wrong_datetime_string = quote_plus(
datetime(1990, 1, 1, 1, 1, 1).isoformat()
)
# Create DagRun
trigger_dag(dag_id=dag_id,
run_id='test_task_instance_info_run',
execution_date=execution_date)
# Test Correct execution
response = self.client.get(
url_template.format(dag_id, datetime_string)
)
self.assertEqual(200, response.status_code)
self.assertIn('state', response.data.decode('utf-8'))
self.assertNotIn('error', response.data.decode('utf-8'))
# Test error for nonexistent dag
response = self.client.get(
url_template.format('does_not_exist_dag', datetime_string),
)
self.assertEqual(404, response.status_code)
self.assertIn('error', response.data.decode('utf-8'))
# Test error for nonexistent dag run (wrong execution_date)
response = self.client.get(
url_template.format(dag_id, wrong_datetime_string)
)
self.assertEqual(404, response.status_code)
self.assertIn('error', response.data.decode('utf-8'))
# Test error for bad datetime format
response = self.client.get(
url_template.format(dag_id, 'not_a_datetime')
)
self.assertEqual(400, response.status_code)
self.assertIn('error', response.data.decode('utf-8'))
class TestPoolApiExperimental(TestBase):
@classmethod
def setUpClass(cls):
super(TestPoolApiExperimental, cls).setUpClass()
session = Session()
session.query(Pool).delete()
session.commit()
session.close()
def setUp(self):
super(TestPoolApiExperimental, self).setUp()
self.pools = []
for i in range(2):
name = 'experimental_%s' % (i + 1)
pool = Pool(
pool=name,
slots=i,
description=name,
)
self.session.add(pool)
self.pools.append(pool)
self.session.commit()
self.pool = self.pools[0]
def tearDown(self):
self.session.query(Pool).delete()
self.session.commit()
self.session.close()
super(TestPoolApiExperimental, self).tearDown()
def _get_pool_count(self):
response = self.client.get('/api/experimental/pools')
self.assertEqual(response.status_code, 200)
return len(json.loads(response.data.decode('utf-8')))
def test_get_pool(self):
response = self.client.get(
'/api/experimental/pools/{}'.format(self.pool.pool),
)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.data.decode('utf-8')),
self.pool.to_json())
def test_get_pool_non_existing(self):
response = self.client.get('/api/experimental/pools/foo')
self.assertEqual(response.status_code, 404)
self.assertEqual(json.loads(response.data.decode('utf-8'))['error'],
"Pool 'foo' doesn't exist")
def test_get_pools(self):
response = self.client.get('/api/experimental/pools')
self.assertEqual(response.status_code, 200)
pools = json.loads(response.data.decode('utf-8'))
self.assertEqual(len(pools), 2)
for i, pool in enumerate(sorted(pools, key=lambda p: p['pool'])):
self.assertDictEqual(pool, self.pools[i].to_json())
def test_create_pool(self):
response = self.client.post(
'/api/experimental/pools',
data=json.dumps({
'name': 'foo',
'slots': 1,
'description': '',
}),
content_type='application/json',
)
self.assertEqual(response.status_code, 200)
pool = json.loads(response.data.decode('utf-8'))
self.assertEqual(pool['pool'], 'foo')
self.assertEqual(pool['slots'], 1)
self.assertEqual(pool['description'], '')
self.assertEqual(self._get_pool_count(), 3)
def test_create_pool_with_bad_name(self):
for name in ('', ' '):
response = self.client.post(
'/api/experimental/pools',
data=json.dumps({
'name': name,
'slots': 1,
'description': '',
}),
content_type='application/json',
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
json.loads(response.data.decode('utf-8'))['error'],
"Pool name shouldn't be empty",
)
self.assertEqual(self._get_pool_count(), 2)
def test_delete_pool(self):
response = self.client.delete(
'/api/experimental/pools/{}'.format(self.pool.pool),
)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.data.decode('utf-8')),
self.pool.to_json())
self.assertEqual(self._get_pool_count(), 1)
def test_delete_pool_non_existing(self):
response = self.client.delete(
'/api/experimental/pools/foo',
)
self.assertEqual(response.status_code, 404)
self.assertEqual(json.loads(response.data.decode('utf-8'))['error'],
"Pool 'foo' doesn't exist")
if __name__ == '__main__':
unittest.main()
|
|
from sympy.core import (S, symbols, Eq, pi, Catalan, EulerGamma, Lambda,
Dummy, Function)
from sympy.core.compatibility import StringIO
from sympy import erf, Integral, Piecewise
from sympy import Equality
from sympy.matrices import Matrix, MatrixSymbol
from sympy.printing.codeprinter import Assignment
from sympy.utilities.codegen import OctaveCodeGen, codegen, make_routine
from sympy.utilities.pytest import raises
from sympy.utilities.lambdify import implemented_function
from sympy.utilities.pytest import XFAIL
import sympy
x, y, z = symbols('x,y,z')
def test_empty_m_code():
code_gen = OctaveCodeGen()
output = StringIO()
code_gen.dump_m([], output, "file", header=False, empty=False)
source = output.getvalue()
assert source == ""
def test_m_simple_code():
name_expr = ("test", (x + y)*z)
result, = codegen(name_expr, "Octave", header=False, empty=False)
assert result[0] == "test.m"
source = result[1]
expected = (
"function out1 = test(x, y, z)\n"
" out1 = z.*(x + y);\n"
"end\n"
)
assert source == expected
def test_m_simple_code_with_header():
name_expr = ("test", (x + y)*z)
result, = codegen(name_expr, "Octave", header=True, empty=False)
assert result[0] == "test.m"
source = result[1]
expected = (
"function out1 = test(x, y, z)\n"
" %TEST Autogenerated by sympy\n"
" % Code generated with sympy " + sympy.__version__ + "\n"
" %\n"
" % See http://www.sympy.org/ for more information.\n"
" %\n"
" % This file is part of 'project'\n"
" out1 = z.*(x + y);\n"
"end\n"
)
assert source == expected
def test_m_simple_code_nameout():
expr = Equality(z, (x + y))
name_expr = ("test", expr)
result, = codegen(name_expr, "Octave", header=False, empty=False)
source = result[1]
expected = (
"function z = test(x, y)\n"
" z = x + y;\n"
"end\n"
)
assert source == expected
def test_m_numbersymbol():
name_expr = ("test", pi**Catalan)
result, = codegen(name_expr, "Octave", header=False, empty=False)
source = result[1]
expected = (
"function out1 = test()\n"
" out1 = pi^0.915965594177219;\n"
"end\n"
)
assert source == expected
@XFAIL
def test_m_numbersymbol_no_inline():
# FIXME: how to pass inline=False to the OctaveCodePrinter?
name_expr = ("test", [pi**Catalan, EulerGamma])
result, = codegen(name_expr, "Octave", header=False,
empty=False, inline=False)
source = result[1]
expected = (
"function [out1, out2] = test()\n"
" Catalan = 0.915965594177219; % constant\n"
" EulerGamma = 0.5772156649015329; % constant\n"
" out1 = pi^Catalan;\n"
" out2 = EulerGamma;\n"
"end\n"
)
assert source == expected
def test_m_code_argument_order():
expr = x + y
routine = make_routine("test", expr, argument_sequence=[z, x, y], language="octave")
code_gen = OctaveCodeGen()
output = StringIO()
code_gen.dump_m([routine], output, "test", header=False, empty=False)
source = output.getvalue()
expected = (
"function out1 = test(z, x, y)\n"
" out1 = x + y;\n"
"end\n"
)
assert source == expected
def test_multiple_results_m():
# Here the output order is the input order
expr1 = (x + y)*z
expr2 = (x - y)*z
name_expr = ("test", [expr1, expr2])
result, = codegen(name_expr, "Octave", header=False, empty=False)
source = result[1]
expected = (
"function [out1, out2] = test(x, y, z)\n"
" out1 = z.*(x + y);\n"
" out2 = z.*(x - y);\n"
"end\n"
)
assert source == expected
def test_results_named_unordered():
# Here output order is based on name_expr
A, B, C = symbols('A,B,C')
expr1 = Equality(C, (x + y)*z)
expr2 = Equality(A, (x - y)*z)
expr3 = Equality(B, 2*x)
name_expr = ("test", [expr1, expr2, expr3])
result, = codegen(name_expr, "Octave", header=False, empty=False)
source = result[1]
expected = (
"function [C, A, B] = test(x, y, z)\n"
" C = z.*(x + y);\n"
" A = z.*(x - y);\n"
" B = 2*x;\n"
"end\n"
)
assert source == expected
def test_results_named_ordered():
A, B, C = symbols('A,B,C')
expr1 = Equality(C, (x + y)*z)
expr2 = Equality(A, (x - y)*z)
expr3 = Equality(B, 2*x)
name_expr = ("test", [expr1, expr2, expr3])
result = codegen(name_expr, "Octave", header=False, empty=False,
argument_sequence=(x, z, y))
assert result[0][0] == "test.m"
source = result[0][1]
expected = (
"function [C, A, B] = test(x, z, y)\n"
" C = z.*(x + y);\n"
" A = z.*(x - y);\n"
" B = 2*x;\n"
"end\n"
)
assert source == expected
def test_complicated_m_codegen():
from sympy import sin, cos, tan
name_expr = ("testlong",
[ ((sin(x) + cos(y) + tan(z))**3).expand(),
cos(cos(cos(cos(cos(cos(cos(cos(x + y + z))))))))
])
result = codegen(name_expr, "Octave", header=False, empty=False)
assert result[0][0] == "testlong.m"
source = result[0][1]
expected = (
"function [out1, out2] = testlong(x, y, z)\n"
" out1 = sin(x).^3 + 3*sin(x).^2.*cos(y) + 3*sin(x).^2.*tan(z)"
" + 3*sin(x).*cos(y).^2 + 6*sin(x).*cos(y).*tan(z) + 3*sin(x).*tan(z).^2"
" + cos(y).^3 + 3*cos(y).^2.*tan(z) + 3*cos(y).*tan(z).^2 + tan(z).^3;\n"
" out2 = cos(cos(cos(cos(cos(cos(cos(cos(x + y + z))))))));\n"
"end\n"
)
assert source == expected
def test_m_output_arg_mixed_unordered():
# named outputs are alphabetical, unnamed output appear in the given order
from sympy import sin, cos, tan
a = symbols("a")
name_expr = ("foo", [cos(2*x), Equality(y, sin(x)), cos(x), Equality(a, sin(2*x))])
result, = codegen(name_expr, "Octave", header=False, empty=False)
assert result[0] == "foo.m"
source = result[1];
expected = (
'function [out1, y, out3, a] = foo(x)\n'
' out1 = cos(2*x);\n'
' y = sin(x);\n'
' out3 = cos(x);\n'
' a = sin(2*x);\n'
'end\n'
)
assert source == expected
def test_m_piecewise_():
pw = Piecewise((0, x < -1), (x**2, x <= 1), (-x+2, x > 1), (1, True))
name_expr = ("pwtest", pw)
result, = codegen(name_expr, "Octave", header=False, empty=False)
source = result[1]
expected = (
"function out1 = pwtest(x)\n"
" out1 = ((x < -1).*(0) + (~(x < -1)).*( ...\n"
" (x <= 1).*(x.^2) + (~(x <= 1)).*( ...\n"
" (x > 1).*(-x + 2) + (~(x > 1)).*(1))));\n"
"end\n"
)
assert source == expected
@XFAIL
def test_m_piecewise_no_inline():
# FIXME: how to pass inline=False to the OctaveCodePrinter?
pw = Piecewise((0, x < -1), (x**2, x <= 1), (-x+2, x > 1), (1, True))
name_expr = ("pwtest", pw)
result, = codegen(name_expr, "Octave", header=False, empty=False,
inline=False)
source = result[1]
expected = (
"function out1 = pwtest(x)\n"
" if (x < -1)\n"
" out1 = 0;\n"
" elseif (x <= 1)\n"
" out1 = x.^2;\n"
" elseif (x > 1)\n"
" out1 = -x + 2;\n"
" else\n"
" out1 = 1;\n"
" end\n"
"end\n"
)
assert source == expected
def test_m_multifcns_per_file():
name_expr = [ ("foo", [2*x, 3*y]), ("bar", [y**2, 4*y]) ]
result = codegen(name_expr, "Octave", header=False, empty=False)
assert result[0][0] == "foo.m"
source = result[0][1];
expected = (
"function [out1, out2] = foo(x, y)\n"
" out1 = 2*x;\n"
" out2 = 3*y;\n"
"end\n"
"function [out1, out2] = bar(y)\n"
" out1 = y.^2;\n"
" out2 = 4*y;\n"
"end\n"
)
assert source == expected
def test_m_multifcns_per_file_w_header():
name_expr = [ ("foo", [2*x, 3*y]), ("bar", [y**2, 4*y]) ]
result = codegen(name_expr, "Octave", header=True, empty=False)
assert result[0][0] == "foo.m"
source = result[0][1];
expected = (
"function [out1, out2] = foo(x, y)\n"
" %FOO Autogenerated by sympy\n"
" % Code generated with sympy " + sympy.__version__ + "\n"
" %\n"
" % See http://www.sympy.org/ for more information.\n"
" %\n"
" % This file is part of 'project'\n"
" out1 = 2*x;\n"
" out2 = 3*y;\n"
"end\n"
"function [out1, out2] = bar(y)\n"
" out1 = y.^2;\n"
" out2 = 4*y;\n"
"end\n"
)
assert source == expected
def test_m_filename_match_first_fcn():
name_expr = [ ("foo", [2*x, 3*y]), ("bar", [y**2, 4*y]) ]
raises(ValueError, lambda: codegen(name_expr,
"Octave", prefix="bar", header=False, empty=False))
def test_m_matrix_named():
e2 = Matrix([[x, 2*y, pi*z]])
name_expr = ("test", Equality(MatrixSymbol('myout1', 1, 3), e2))
result = codegen(name_expr, "Octave", header=False, empty=False)
assert result[0][0] == "test.m"
source = result[0][1]
expected = (
"function myout1 = test(x, y, z)\n"
" myout1 = [x 2*y pi*z];\n"
"end\n"
)
assert source == expected
def test_m_matrix_named_matsym():
myout1 = MatrixSymbol('myout1', 1, 3)
e2 = Matrix([[x, 2*y, pi*z]])
name_expr = ("test", Equality(myout1, e2, evaluate=False))
result, = codegen(name_expr, "Octave", header=False, empty=False)
source = result[1]
expected = (
"function myout1 = test(x, y, z)\n"
" myout1 = [x 2*y pi*z];\n"
"end\n"
)
assert source == expected
def test_m_matrix_output_autoname():
expr = Matrix([[x, x+y, 3]])
name_expr = ("test", expr)
result, = codegen(name_expr, "Octave", header=False, empty=False)
source = result[1]
expected = (
"function out1 = test(x, y)\n"
" out1 = [x x + y 3];\n"
"end\n"
)
assert source == expected
def test_m_matrix_output_autoname_2():
e1 = (x + y)
e2 = Matrix([[2*x, 2*y, 2*z]])
e3 = Matrix([[x], [y], [z]])
e4 = Matrix([[x, y], [z, 16]])
name_expr = ("test", (e1, e2, e3, e4))
result, = codegen(name_expr, "Octave", header=False, empty=False)
source = result[1]
expected = (
"function [out1, out2, out3, out4] = test(x, y, z)\n"
" out1 = x + y;\n"
" out2 = [2*x 2*y 2*z];\n"
" out3 = [x; y; z];\n"
" out4 = [x y;\n"
" z 16];\n"
"end\n"
)
assert source == expected
def test_m_results_matrix_named_ordered():
B, C = symbols('B,C')
A = MatrixSymbol('A', 1, 3)
expr1 = Equality(C, (x + y)*z)
expr2 = Equality(A, Matrix([[1, 2, x]]))
expr3 = Equality(B, 2*x)
name_expr = ("test", [expr1, expr2, expr3])
result, = codegen(name_expr, "Octave", header=False, empty=False,
argument_sequence=(x, z, y))
source = result[1]
expected = (
"function [C, A, B] = test(x, z, y)\n"
" C = z.*(x + y);\n"
" A = [1 2 x];\n"
" B = 2*x;\n"
"end\n"
)
assert source == expected
def test_m_matrixsymbol_slice():
A = MatrixSymbol('A', 2, 3)
B = MatrixSymbol('B', 1, 3)
C = MatrixSymbol('C', 1, 3)
D = MatrixSymbol('D', 2, 1)
name_expr = ("test", [Equality(B, A[0, :]),
Equality(C, A[1, :]),
Equality(D, A[:, 2])])
result, = codegen(name_expr, "Octave", header=False, empty=False)
source = result[1]
expected = (
"function [B, C, D] = test(A)\n"
" B = A(1, :);\n"
" C = A(2, :);\n"
" D = A(:, 3);\n"
"end\n"
)
assert source == expected
def test_m_matrixsymbol_slice2():
A = MatrixSymbol('A', 3, 4)
B = MatrixSymbol('B', 2, 2)
C = MatrixSymbol('C', 2, 2)
name_expr = ("test", [Equality(B, A[0:2, 0:2]),
Equality(C, A[0:2, 1:3])])
result, = codegen(name_expr, "Octave", header=False, empty=False)
source = result[1]
expected = (
"function [B, C] = test(A)\n"
" B = A(1:2, 1:2);\n"
" C = A(1:2, 2:3);\n"
"end\n"
)
assert source == expected
def test_m_matrixsymbol_slice3():
A = MatrixSymbol('A', 8, 7)
B = MatrixSymbol('B', 2, 2)
C = MatrixSymbol('C', 4, 2)
name_expr = ("test", [Equality(B, A[6:, 1::3]),
Equality(C, A[::2, ::3])])
result, = codegen(name_expr, "Octave", header=False, empty=False)
source = result[1]
expected = (
"function [B, C] = test(A)\n"
" B = A(7:end, 2:3:end);\n"
" C = A(1:2:end, 1:3:end);\n"
"end\n"
)
assert source == expected
def test_m_matrixsymbol_slice_autoname():
A = MatrixSymbol('A', 2, 3)
B = MatrixSymbol('B', 1, 3)
name_expr = ("test", [Equality(B, A[0,:]), A[1,:], A[:,0], A[:,1]])
result, = codegen(name_expr, "Octave", header=False, empty=False)
source = result[1]
expected = (
"function [B, out2, out3, out4] = test(A)\n"
" B = A(1, :);\n"
" out2 = A(2, :);\n"
" out3 = A(:, 1);\n"
" out4 = A(:, 2);\n"
"end\n"
)
assert source == expected
def test_m_loops():
# Note: an Octave programmer would probably vectorize this across one or
# more dimensions. Also, size(A) would be used rather than passing in m
# and n. Perhaps users would expect us to vectorize automatically here?
# Or is it possible to represent such things using IndexedBase?
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n, m = symbols('n m', integer=True)
A = IndexedBase('A')
x = IndexedBase('x')
y = IndexedBase('y')
i = Idx('i', m)
j = Idx('j', n)
result, = codegen(('mat_vec_mult', Eq(y[i], A[i, j]*x[j])), "Octave",
header=False, empty=False)
source = result[1]
expected = (
'function y = mat_vec_mult(A, m, n, x)\n'
' for i = 1:m\n'
' y(i) = 0;\n'
' end\n'
' for i = 1:m\n'
' for j = 1:n\n'
' y(i) = %(rhs)s + y(i);\n'
' end\n'
' end\n'
'end\n'
)
assert (source == expected % {'rhs': 'A(%s, %s).*x(j)' % (i, j)} or
source == expected % {'rhs': 'x(j).*A(%s, %s)' % (i, j)})
def test_m_tensor_loops_multiple_contractions():
# see comments in previous test about vectorizing
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n, m, o, p = symbols('n m o p', integer=True)
A = IndexedBase('A')
B = IndexedBase('B')
y = IndexedBase('y')
i = Idx('i', m)
j = Idx('j', n)
k = Idx('k', o)
l = Idx('l', p)
result, = codegen(('tensorthing', Eq(y[i], B[j, k, l]*A[i, j, k, l])),
"Octave", header=False, empty=False)
source = result[1]
expected = (
'function y = tensorthing(A, B, m, n, o, p)\n'
' for i = 1:m\n'
' y(i) = 0;\n'
' end\n'
' for i = 1:m\n'
' for j = 1:n\n'
' for k = 1:o\n'
' for l = 1:p\n'
' y(i) = y(i) + B(j, k, l).*A(i, j, k, l);\n'
' end\n'
' end\n'
' end\n'
' end\n'
'end\n'
)
assert source == expected
def test_m_InOutArgument():
expr = Equality(x, x**2)
name_expr = ("mysqr", expr)
result, = codegen(name_expr, "Octave", header=False, empty=False)
source = result[1]
expected = (
"function x = mysqr(x)\n"
" x = x.^2;\n"
"end\n"
)
assert source == expected
def test_m_InOutArgument_order():
# can specify the order as (x, y)
expr = Equality(x, x**2 + y)
name_expr = ("test", expr)
result, = codegen(name_expr, "Octave", header=False,
empty=False, argument_sequence=(x,y))
source = result[1]
expected = (
"function x = test(x, y)\n"
" x = x.^2 + y;\n"
"end\n"
)
assert source == expected
# make sure it gives (x, y) not (y, x)
expr = Equality(x, x**2 + y)
name_expr = ("test", expr)
result, = codegen(name_expr, "Octave", header=False, empty=False)
source = result[1]
expected = (
"function x = test(x, y)\n"
" x = x.^2 + y;\n"
"end\n"
)
assert source == expected
def test_m_not_supported():
f = Function('f')
name_expr = ("test", [f(x).diff(x), S.ComplexInfinity])
result, = codegen(name_expr, "Octave", header=False, empty=False)
source = result[1]
expected = (
"function [out1, out2] = test(x)\n"
" % unsupported: Derivative(f(x), x)\n"
" % unsupported: zoo\n"
" out1 = Derivative(f(x), x);\n"
" out2 = zoo;\n"
"end\n"
)
assert source == expected
def test_global_vars_octave():
x, y, z, t = symbols("x y z t")
result = codegen(('f', x*y), "Octave", header=False, empty=False,
global_vars=(y,))
source = result[0][1]
expected = (
"function out1 = f(x)\n"
" out1 = x.*y;\n"
"end\n"
)
assert source == expected
result = codegen(('f', x*y+z), "Octave", header=False, empty=False,
argument_sequence=(x, y), global_vars=(z, t))
source = result[0][1]
expected = (
"function out1 = f(x, y)\n"
" out1 = x.*y + z;\n"
"end\n"
)
assert source == expected
|
|
import click
def validate_value(ctx, param, value):
"""
Check to make sure the arg is fomatted correctly...
"""
#TODO: Write this function
toreturn = []
for v in value:
toreturn.append((str(v[0]), int(v[1])))
return toreturn
@click.group()
def cli():
pass
def find_fit_prompt(ctx, param, value):
if not value or ctx.resilient_parsing:
return
ctx.abort()
@click.command()
@click.option('-i', '--image', type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True,
resolve_path=True),
required=True, help="Path to the multidate image file.")
@click.option('-j', '--signaturedirectory', type=click.Path(exists=True, file_okay=False, dir_okay=True, readable=True,
resolve_path=True),
required=True, help="Path to the directory containing the temporal signature files to be used.")
@click.option('--vi', type=click.STRING, default="", help="Name of the VI being used. Default is a blank string.")
@click.option('-o', '--outputdir', type=click.Path(exists=True, file_okay=False, dir_okay=True, writable=True,
readable=True, resolve_path=True),
default=None, help="Path to the output directory. Default is to use the directory containing the image.",)
@click.option('-f', '--outputfoldername', type=click.STRING, default='fit_images',
help="Name of the folder to be created with the output files. Default is 'fit_images'.")
@click.option('-s', '--startDOY', type=click.INT, help="The start DOY for the multidate image.", required=True)
@click.option('-d', '-DOYinterval', type=click.INT, help="The interval of the imagery in the multidate image.",
required=True)
@click.option('-t', '--temporalshift', type=click.INT, default=0,
help="A guess to the temporal shift of the multidate image from the temporal signatures. Default is 0 days.")
@click.option('-T', '--threshold', type=click.INT, default=None,
help="A value beneath which all VI values will be ignored. Default is none.")
@click.option('-n', '--ndvalue', type=click.INT, default=-3000,
help="The value for NODATA in the multidate image and output fit images. Default is -3000.")
@click.option('-S', '--subset', type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True,
resolve_path=True),
default=None, help="A shapefile of points for each pixel to be fit. Used to eliminate mixels. Default is none.")
@click.option('-m', '--meantype', type=click.Choice(['arithmetic', 'geometric']), default='arithmetic',
help="The type of mean (arithmetic or geometric) used in the RMSE fitting of the signatures to the pixels. Default is arithmetic.")
@click.option('-p', '--numberofprocesses', type=click.INT, default=4,
help="The max number of processes to spawn at any time. Default is 4. Set lower or higher depending on number of processors/cores in your machine.")
@click.option('-c', '--cliptoshapeextent', type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True,
resolve_path=True), default=None,
help="Path to a shapefile to clip the raster. If omitted, entire raster extent will be processed.")
@click.option('-C', '--cliptopixelextent', nargs=4, type=int, default=None,
help="Pixel coordinates and number of pixels to clip raster. For example, entering \"2482, 1089, 100, 100\" will create a 100px square image going right and down from pixel 2482, 1089 in the original image.")
@click.option('--timebounds', nargs=2, type=int, default=None,
help="Number of days to allow curve shifting before and after: -10, 10 is default and allows the curve to be shifted 10 days in either direction.")
@click.option('--xbounds', nargs=2, type=float, default=None,
help="Bonds of allowable x-scaling: default is 0.6 and 1.4, allowing the curve to be stretched horizontally between 60% and 140% of initial width.")
@click.option('--ybounds', nargs=2, type=float, default=None,
help="Bonds of allowable y-scaling: default is 0.6 and 1.4, allowing the curve to be stretched vertically between 60% and 140% of initial height.")
#TODO Add an option to use geographic or pixel extent (done) to clip raster in addition to clip to shape option
@click.option('-P', '--prompt-mode', is_flag=True, is_eager=True, expose_value=False, callback=find_fit_prompt,
help="**CURRENTLY DISABLED** Enable prompt mode. This will prompt you for each of the arguments to the function. Use if you aren't good at the command line.")
def find_fit(vi, signaturedirectory, image, outputdir, outputfoldername, startdoy, doyinterval, temporalshift,
threshold, ndvalue, subset, meantype, numberofprocesses, cliptopixelextent, cliptoshapeextent, timebounds,
xbounds, ybounds):
"""
Fit the fit of reference temporal signatures to pixels in a multidate image.
"""
#TODO Docstring
#TODO Add Parameter Validation Callbacks as necessary
# validate clip options
if cliptopixelextent and cliptoshapeextent:
click.BadParameter("Cannot clip the image to both a shapefile and pixel extent. Choose one or the other.")
# import required modules
import os
from signatureFunctions import get_sigs_in_dir
from utils import create_output_dir
from imageFunctions import clip_raster_to_extent, clip_and_mask_raster_with_shapefile
from fitting import fit_refs_to_image
signatures = get_sigs_in_dir(signaturedirectory, viname=vi)
if outputdir is None:
outputdir = os.path.dirname(image)
outdir = create_output_dir(outputdir, outputfoldername)
if cliptoshapeextent:
imagename, ext = os.path.splitext(os.path.basename(image))
outimage = os.path.join(outdir, imagename + "_clip" + ext)
imagetoprocess = clip_and_mask_raster_with_shapefile(image, cliptoshapeextent, outimage)
elif cliptopixelextent:
imagename, ext = os.path.splitext(os.path.basename(image))
outimage = os.path.join(outdir, imagename + "_clip" + ext)
imagetoprocess = clip_raster_to_extent(image, outimage, cliptopixelextent[0], cliptopixelextent[1],
cliptopixelextent[2], cliptopixelextent[3])
else:
imagetoprocess = image
fit_refs_to_image(imagetoprocess, outdir, signatures, startdoy, doyinterval,
temporalshift, threshold=threshold, ndvalue=ndvalue, subset=subset, meantype=meantype,
workers=numberofprocesses, timebounds=timebounds, xbounds=xbounds, ybounds=ybounds)
@click.command()
@click.option('-d', '--imagedirectory', type=click.Path(exists=True, file_okay=False, dir_okay=True, readable=True,
resolve_path=True),
required=True, help="Path to the directory containing the .hdf image files to be used.")
@click.option('-n', '--outputimagename', type=click.STRING, default='multidate_image.tif',
help="Name of the image to be created with the file extension. Default is 'multidate_image.tif'.")
@click.option('--vi', type=click.STRING, default="NDVI", help="Name of the VI to be used. Default is NDVI.")
@click.option('-o', '--outputdir', type=click.Path(exists=True, file_okay=False, dir_okay=True, writable=True,
readable=True, resolve_path=True),
default=None, help="Path to the output directory. Default is to use the directory containing the image.",)
@click.option('-f', '--outputfoldername', type=click.STRING, default='multidate_image',
help="Name of the folder to be created for the output file. Default is 'multidate_image'.")
@click.option('-N', '--ndvalue', type=click.INT, default=-3000,
help="The value for NODATA in the multidate image and output fit images. Default is -3000.")
@click.option('-D', '--drivercode', type=click.STRING, default='GTiff',
help="GDAL driver code for output image format. Default is GeoTIFF. Ensure output name extension is correct if using a different format.")
def build_multidate_image(imagedirectory, outputimagename, outputdir, outputfoldername, vi, drivercode, ndvalue):
"""
Search directory for HDF MODIS files, get a VI from each HDF, and build single-date VI images in to a multi-date
composite image.
"""
from imageFunctions import build_multiband_image
build_multiband_image(imagedirectory, outputimagename, outputfoldername, vi, str(drivercode), ndvalue,
outputdir=outputdir)
@click.command()
@click.option('-i', '--image', type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True,
resolve_path=True),
required=True, help="Path to the multidate image file.")
@click.option('-v', '--shapefiledirectory', type=click.Path(exists=True, file_okay=False, dir_okay=True, readable=True,
resolve_path=True),
required=True, help="Path to the directory containing point .shp files for each of the classes.")
@click.option('-o', '--outputdir', type=click.Path(exists=True, file_okay=False, dir_okay=True, writable=True,
readable=True, resolve_path=True),
default=None, help="Path to the output directory. Default is to create a directory in the folder containing the image.")
@click.option('-s', '--startDOY', type=click.INT, help="The start DOY for the multidate image.", required=True)
@click.option('-d', '--DOYinterval', type=click.INT, help="The interval of the imagery in the multidate image.",
required=True)
@click.option('-l', '--filelabel', type=click.STRING, default="",
help="A label to postfix on each of the .ref file names")
@click.option('-p', '--plotsigs', is_flag=True,
help="Create a pdf plot of all the generated signatures.")
def extract_signatures(image, shapefiledirectory, startdoy, doyinterval, outputdir, filelabel, plotsigs):
"""
Extracts temporal signatures for a set of point geometry shapefiles in a specified directory and outputs them to a
set of .ref files in an output directory.
"""
import os
from plotting import SignaturePlot
from utils import find_files, create_output_dir, unique_name
from signatureFunctions import get_sigs_in_dir, get_reference_curves
if outputdir is None:
outputdir = create_output_dir(os.path.dirname(image), "signatures", usetime=True)
shapefiles = find_files(shapefiledirectory, ".shp", recursive=False)
#TODO: Need a method to find only valid shapefiles in the directory
get_reference_curves(image, shapefiles, startdoy, doyinterval, outdir=outputdir, filepostfix=filelabel)
if plotsigs:
path = unique_name(outputdir, "signaturePlot", ext=".pdf")
sigs = get_sigs_in_dir(outputdir)
plot = SignaturePlot(outputdir, os.path.basename(path))
plot.plot_collection(sigs)
@click.command()
@click.option('-i', '--fitimagedirectory', type=click.Path(exists=True, file_okay=False, dir_okay=True, writable=True,
readable=True, resolve_path=True),
required=True, help="Path to the directory containing the crop fit images.")
@click.option('-c', '--cropimage', type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True,
resolve_path=True),
required=True, help="Path to the crop ground truth image file.")
@click.option('-o', '--outputdirectory', type=click.Path(exists=True, file_okay=False, dir_okay=True, writable=True,
readable=True, resolve_path=True),
default=None, help="Path to the output directory. Default is to use the directory containing the image.")
@click.option('-v', '--valueofcropinimage', multiple=True, nargs=2, callback=validate_value,
help="The class name and its value in the crop image used for the accuracy assessment. E.g. \"Corn 1\"")
@click.option('-t', '--thresholds', default=[], type=click.STRING,
help="A list of threshold values to use. Format each entry as a tuple in a python list with no spaces e.g. [(800,500,1200)]. Cannot be used with threshold stepping.")
@click.option('-n', '--ndvalue', type=click.INT, default=-3000,
help="The value for NODATA in the multidate image and output fit images. Default is -3000.")
@click.option('-O', '--outputimagename', type=click.STRING, default=None,
help="Name of the image to be created with the file extension. Default is the date and crop image name.")
@click.option('--tstart', type=click.INT,
help="The threshold start value.")
@click.option('--tstep', type=click.INT,
help="The threshold step value.")
@click.option('--tstepcount', type=click.INT,
help="The number of threshold steps.")
@click.option('--nocombo', is_flag=True,
help="Does not find combination of threshold steps, but steps through a single threshold value applied to all fit images.")
@click.option('-p', '--numberofprocesses', type=click.INT, default=1,
help="The max number of processes to spawn at any time. Default is 4. Set lower or higher depending on number of processors/cores in your machine.")
@click.option('-q', '--chunksize', type=click.INT, default=10000,
help="The max number of thresholds to be assigned to a given process.")
def classify(fitimagedirectory, cropimage, outputdirectory, ndvalue, outputimagename, valueofcropinimage, tstart, tstep,
tstepcount, nocombo, thresholds, numberofprocesses, chunksize):
"""
Classify a multidate image and assess the accuracy of said classification.
"""
# import required functions
import os
from utils import create_output_dir
from classify import classify_and_assess_accuracy, generate_thresholds, get_fit_rasters, chunks
import multiprocessing
# get the fit rasters to use
filevallist = get_fit_rasters(fitimagedirectory, valueofcropinimage)
# validate threshold parameters
if (tstart or tstep or tstepcount or nocombo) and thresholds:
raise click.BadParameter("Cannot use both a threshold list and stepping threshold options.")
elif thresholds:
thresholds = eval(thresholds)
for thresh in thresholds:
if len(thresh) != len(filevallist):
raise click.BadParameter("Length of threshold in threshold value list is not the same as the number of fit rasters. Counts must be equal.")
else:
pass
thresholds = (thresholds, len(thresholds))
elif tstart and tstepcount and tstep:
# create threshold generator
if nocombo:
thresholds = []
for val in range(tstart, (tstepcount * tstep + tstart), tstep):
threshtemp = [val for item in filevallist]
thresholds.append(threshtemp)
thresholds = (thresholds, len(thresholds))
else:
thresholds = (generate_thresholds(tstart, tstep, tstepcount, len(filevallist)),
tstepcount**len(filevallist))
else:
raise click.BadParameter("Threshold options incomplete or otherwise incorrectly used.")
if outputdirectory is None:
outputdirectory = create_output_dir(os.path.dirname(fitimagedirectory), "classification", usetime=True)
if numberofprocesses == 1:
classify_and_assess_accuracy(outputdirectory, cropimage, valueofcropinimage, filevallist, ndvalue, thresholds,
classifiedimagename=outputimagename)
elif numberofprocesses > 1:
processes = []
threshlength = thresholds[1]
i = 0
for chunk in chunks(thresholds[0], size=chunksize):
if threshlength - chunksize >= 0:
threshlength -= chunksize
chunk = (chunk, chunksize)
else:
chunk = (chunk, threshlength)
processoutput = create_output_dir(outputdirectory, "process_" + str(i))
i += 1
p = multiprocessing.Process(target=classify_and_assess_accuracy,
args=(processoutput, cropimage, valueofcropinimage, filevallist, ndvalue, chunk),
kwargs={"classifiedimagename": outputimagename})
p.start()
processes.append(p)
if len(processes) == numberofprocesses:
for p in processes:
p.join()
processes.remove(p)
for p in processes:
p.join()
processes.remove(p)
else:
click.BadParameter("Number of worker processes must be greater than zero.")
@click.command()
@click.option('-i', '--multidateraster', type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True,
resolve_path=True),
required=True, help="Path to the multidate raster file.")
@click.option('-p', '--pointfile', type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True,
resolve_path=True), default=None, required=True,
help="Path to a point shapefile containing the points to be plotted")
@click.option('-s', '--startDOY', type=click.INT, help="The start DOY for the multidate image.", required=True)
@click.option('-d', '--DOYinterval', type=click.INT, help="The interval of the imagery in the multidate image.",
required=True)
def plot_points(multidateraster, pointfile, startdoy, doyinterval):
"""
"""
import os
from utils import unique_name
from plotting import PixelPlot
from core import pixel as pixelObject
from vectorFunctions import get_px_coords_from_shapefile
from imageFunctions import openImage
outpath = unique_name(os.path.dirname(multidateraster), "plots", ext=".pdf", usetime=True)
coords = get_px_coords_from_shapefile(multidateraster, pointfile)
plot = PixelPlot(os.path.dirname(outpath), os.path.basename(outpath))
raster = openImage(multidateraster)
for coord in coords:
pixel = pixelObject(coord[0], coord[1])
pixel.get_pixel_values(raster, startdoy, doyinterval)
plot.add_pixel(pixel, closefigure=True)
plot.close_plot()
raster = None
@click.command()
@click.option('-j', '--signaturedirectory', type=click.Path(exists=True, file_okay=False, dir_okay=True, readable=True,
resolve_path=True),
required=True, help="Path to the directory containing the temporal signature files to be used.")
@click.option('-o', '--outputdirectory', type=click.Path(exists=True, file_okay=False, dir_okay=True, writable=True,
readable=True, resolve_path=True),
default=None, help="Path to the output directory. Default is to use the directory containing the signatures.")
@click.option('-n', '--name', type=click.STRING, default='signatures.pdf',
help="Name of the plot pdf to be created with the file extension. Default is 'signatures.pdf'.")
@click.option('-s', '--signaturename', multiple=True,
help="The signature name or some other string to search for in the signature directory. If omitted, all mean signatures in the directory will be plotted. This parameter can be used multiple times for multiple sreach strings.")
def plot_sigs(signaturedirectory, outputdirectory, name, signaturename):
"""
"""
import os
from utils import find_files, unique_name
from core import signatureCollection
from plotting import SignaturePlot
if not outputdirectory:
outputdirectory = signaturedirectory
sigs = find_files(signaturedirectory, "mean.ref")
if not sigs:
click.BadParameter("Did not find any signature files in the specified directory.")
if signaturename:
filteredsigs = []
for searchstring in signaturename:
for sig in sigs:
if searchstring.upper() in sig.upper():
filteredsigs.append(sig)
sigs = filteredsigs
signatures = signatureCollection()
for sig in sigs:
try:
signatures.add(sig)
except Exception as e:
print e
#TODO Fix core temporalSignature to use exceptions so they can be properly handled here
name, ext = os.path.splitext(name)
path = unique_name(outputdirectory, name, ext=ext)
print("Outputting to {0}".format(path))
plot = SignaturePlot(outputdirectory, os.path.basename(path))
plot.plot_collection(signatures)
cli.add_command(find_fit)
cli.add_command(build_multidate_image)
cli.add_command(extract_signatures)
cli.add_command(classify)
cli.add_command(plot_points)
cli.add_command(plot_sigs)
if __name__ == '__main__':
cli()
|
|
from nose.tools import * # flake8: noqa
from api.base.settings.defaults import API_BASE
from tests.base import ApiTestCase
from osf_tests.factories import SubjectFactory, PreprintProviderFactory
class TestPreprintProviderSubjects(ApiTestCase):
def create_subject_rules(self):
'''
Subject Hierarchy
+-----------------------------+
| |
| +-------->B+----->F |
| | |
| A+----------->C |
| | |
| +-------->D+----->G |
| |
| H+------>I+----->J |
| | |
| +----->K |
| |
| L+------>M+----->N |
| | |
| +------->E |
| |
| O |
+-----------------------------+
'''
self.subA = SubjectFactory(text='A')
self.subB = SubjectFactory(text='B', parent=self.subA)
self.subC = SubjectFactory(text='C', parent=self.subA)
self.subD = SubjectFactory(text='D', parent=self.subA)
self.subF = SubjectFactory(text='F', parent=self.subB)
self.subG = SubjectFactory(text='G', parent=self.subD)
self.subH = SubjectFactory(text='H')
self.subI = SubjectFactory(text='I', parent=self.subH)
self.subJ = SubjectFactory(text='J', parent=self.subI)
self.subK = SubjectFactory(text='K', parent=self.subI)
self.subL = SubjectFactory(text='L')
self.subM = SubjectFactory(text='M', parent=self.subL)
self.subE = SubjectFactory(text='E', parent=self.subM)
self.subN = SubjectFactory(text='N', parent=self.subM)
self.subO = SubjectFactory(text='O')
rules = [
([self.subA._id, self.subB._id], False),
([self.subA._id, self.subD._id], True),
([self.subH._id, self.subI._id, self.subJ._id], True),
([self.subL._id], True)
]
# This should allow: A, B, D, G, H, I, J, L, M, N and E
# This should not allow: C, F, K, O
return rules
def setUp(self):
super(TestPreprintProviderSubjects, self).setUp()
self.lawless_preprint_provider = PreprintProviderFactory()
self.ruled_preprint_provider = PreprintProviderFactory()
self.ruled_preprint_provider.subjects_acceptable = self.create_subject_rules()
self.ruled_preprint_provider.save()
self.lawless_url = '/{}preprint_providers/{}/taxonomies/?page[size]=15&'.format(
API_BASE, self.lawless_preprint_provider._id)
self.ruled_url = '/{}preprint_providers/{}/taxonomies/?page[size]=15&'.format(
API_BASE, self.ruled_preprint_provider._id)
def test_max_page_size(self):
base_url = '/{}preprint_providers/{}/taxonomies/'.format(
API_BASE, self.lawless_preprint_provider._id)
res = self.app.get(base_url)
assert_equal(res.status_code, 200)
assert_equal(res.json['links']['meta']['per_page'], 10)
res = self.app.get(base_url + '?page[size]=150')
assert_equal(res.status_code, 200)
assert_equal(res.json['links']['meta']['per_page'], 150)
res = self.app.get(base_url + '?page[size]=2018')
assert_equal(res.status_code, 200)
assert_equal(res.json['links']['meta']['per_page'], 1000)
def test_no_rules_grabs_all(self):
res = self.app.get(self.lawless_url)
assert_equal(res.status_code, 200)
assert_equal(res.json['links']['meta']['total'], 15)
def test_rules_only_grab_acceptable_subjects(self):
res = self.app.get(self.ruled_url)
assert_equal(res.status_code, 200)
assert_equal(res.json['links']['meta']['total'], 11)
def test_no_rules_with_null_parent_filter(self):
res = self.app.get(self.lawless_url + 'filter[parents]=null')
assert_equal(res.status_code, 200)
assert_equal(res.json['links']['meta']['total'], 4)
def test_rules_enforced_with_null_parent_filter(self):
res = self.app.get(self.ruled_url + 'filter[parents]=null')
assert_equal(res.status_code, 200)
assert_equal(res.json['links']['meta']['total'], 3)
texts = [item['attributes']['text'] for item in res.json['data']]
assert_in('A', texts)
assert_in('H', texts)
assert_in('L', texts)
assert_not_in('O', texts)
def test_no_rules_with_parents_filter(self):
res = self.app.get(
self.lawless_url +
'filter[parents]={}'.format(
self.subB._id))
assert_equal(res.status_code, 200)
assert_equal(res.json['links']['meta']['total'], 1)
assert_equal(res.json['data'][0]['attributes']['text'], 'F')
res = self.app.get(
self.lawless_url +
'filter[parents]={}'.format(
self.subI._id))
assert_equal(res.status_code, 200)
assert_equal(res.json['links']['meta']['total'], 2)
res = self.app.get(
self.lawless_url +
'filter[parents]={}'.format(
self.subM._id))
assert_equal(res.status_code, 200)
assert_equal(res.json['links']['meta']['total'], 2)
def test_rules_enforced_with_parents_filter(self):
res = self.app.get(
self.ruled_url +
'filter[parents]={}'.format(
self.subB._id))
assert_equal(res.status_code, 200)
assert_equal(res.json['links']['meta']['total'], 0)
texts = [item['attributes']['text'] for item in res.json['data']]
assert_not_in('F', texts)
res = self.app.get(
self.ruled_url +
'filter[parents]={}'.format(
self.subI._id))
assert_equal(res.status_code, 200)
assert_equal(res.json['links']['meta']['total'], 1)
texts = [item['attributes']['text'] for item in res.json['data']]
assert_in('J', texts)
assert_not_in('K', texts)
res = self.app.get(
self.ruled_url +
'filter[parents]={}'.format(
self.subM._id))
assert_equal(res.status_code, 200)
assert_equal(res.json['links']['meta']['total'], 2)
texts = [item['attributes']['text'] for item in res.json['data']]
assert_in('N', texts)
assert_in('E', texts)
def test_no_rules_with_parent_filter(self):
res = self.app.get(
self.lawless_url +
'filter[parent]={}'.format(
self.subB._id))
assert_equal(res.status_code, 200)
assert_equal(res.json['links']['meta']['total'], 1)
assert_equal(res.json['data'][0]['attributes']['text'], 'F')
res = self.app.get(
self.lawless_url +
'filter[parent]={}'.format(
self.subI._id))
assert_equal(res.status_code, 200)
assert_equal(res.json['links']['meta']['total'], 2)
res = self.app.get(
self.lawless_url +
'filter[parent]={}'.format(
self.subM._id))
assert_equal(res.status_code, 200)
assert_equal(res.json['links']['meta']['total'], 2)
def test_rules_enforced_with_parent_filter(self):
res = self.app.get(
self.ruled_url +
'filter[parent]={}'.format(
self.subB._id))
assert_equal(res.status_code, 200)
assert_equal(res.json['links']['meta']['total'], 0)
texts = [item['attributes']['text'] for item in res.json['data']]
assert_not_in('F', texts)
res = self.app.get(
self.ruled_url +
'filter[parent]={}'.format(
self.subI._id))
assert_equal(res.status_code, 200)
assert_equal(res.json['links']['meta']['total'], 1)
texts = [item['attributes']['text'] for item in res.json['data']]
assert_in('J', texts)
assert_not_in('K', texts)
res = self.app.get(
self.ruled_url +
'filter[parent]={}'.format(
self.subM._id))
assert_equal(res.status_code, 200)
assert_equal(res.json['links']['meta']['total'], 2)
texts = [item['attributes']['text'] for item in res.json['data']]
assert_in('N', texts)
assert_in('E', texts)
def test_no_rules_with_grandparent_filter(self):
res = self.app.get(
self.lawless_url +
'filter[parents]={}'.format(
self.subA._id))
assert_equal(res.status_code, 200)
assert_equal(res.json['links']['meta']['total'], 3)
def test_rules_enforced_with_grandparent_filter(self):
res = self.app.get(
self.ruled_url +
'filter[parents]={}'.format(
self.subA._id))
assert_equal(res.status_code, 200)
assert_equal(res.json['links']['meta']['total'], 2)
texts = [item['attributes']['text'] for item in res.json['data']]
assert_in('B', texts)
assert_in('D', texts)
assert_not_in('C', texts)
class TestPreprintProviderSpecificSubjects(ApiTestCase):
def setUp(self):
super(TestPreprintProviderSpecificSubjects, self).setUp()
self.provider_1 = PreprintProviderFactory()
self.provider_2 = PreprintProviderFactory()
self.root_subject_1 = SubjectFactory(
text='R1', provider=self.provider_1)
self.parent_subject_1 = SubjectFactory(
text='P1', provider=self.provider_1, parent=self.root_subject_1)
self.child_subject_1 = SubjectFactory(
text='C1', provider=self.provider_1, parent=self.parent_subject_1)
self.root_subject_2 = SubjectFactory(
text='R2', provider=self.provider_2)
self.parent_subject_2 = SubjectFactory(
text='P2', provider=self.provider_2, parent=self.root_subject_2)
self.child_subject_2 = SubjectFactory(
text='C2', provider=self.provider_2, parent=self.parent_subject_2)
self.url_1 = '/{}preprint_providers/{}/taxonomies/?page[size]=15&'.format(
API_BASE, self.provider_1._id)
self.url_2 = '/{}preprint_providers/{}/taxonomies/?page[size]=15&'.format(
API_BASE, self.provider_2._id)
def test_mapped_subjects_are_not_shared_list(self):
res_1 = self.app.get(self.url_1)
res_2 = self.app.get(self.url_2)
assert_equal(res_1.status_code, 200)
assert_equal(res_2.status_code, 200)
assert_equal(res_1.json['links']['meta']['total'], 3)
assert_equal(res_2.json['links']['meta']['total'], 3)
assert_equal(
len(set([d['attributes']['text'] for d in res_1.json['data']]) &
set([d['attributes']['text'] for d in res_2.json['data']])),
0
)
assert_equal(
len(set([d['attributes']['text'] for d in res_1.json['data']]) |
set([d['attributes']['text'] for d in res_2.json['data']])),
6
)
def test_mapped_subjects_are_not_shared_filter(self):
res_1 = self.app.get(
self.url_1 +
'filter[parent]={}'.format(
self.root_subject_1._id))
res_2 = self.app.get(
self.url_2 +
'filter[parent]={}'.format(
self.root_subject_2._id))
assert_equal(res_1.status_code, 200)
assert_equal(res_2.status_code, 200)
assert_equal(res_1.json['links']['meta']['total'], 1)
assert_equal(res_2.json['links']['meta']['total'], 1)
assert_equal(
len(set([d['attributes']['text'] for d in res_1.json['data']]) &
set([d['attributes']['text'] for d in res_2.json['data']])),
0
)
assert_equal(
len(set([d['attributes']['text'] for d in res_1.json['data']]) |
set([d['attributes']['text'] for d in res_2.json['data']])),
2
)
def test_mapped_subjects_filter_wrong_provider(self):
res_1 = self.app.get(
self.url_1 +
'filter[parent]={}'.format(
self.root_subject_2))
res_2 = self.app.get(
self.url_2 +
'filter[parent]={}'.format(
self.root_subject_1))
assert_equal(res_1.status_code, 200)
assert_equal(res_2.status_code, 200)
assert_equal(res_1.json['links']['meta']['total'], 0)
assert_equal(res_2.json['links']['meta']['total'], 0)
class TestPreprintProviderHighlightedSubjects(ApiTestCase):
def setUp(self):
super(TestPreprintProviderHighlightedSubjects, self).setUp()
self.provider = PreprintProviderFactory()
self.subj_a = SubjectFactory(provider=self.provider, text='A')
self.subj_aa = SubjectFactory(
provider=self.provider,
text='AA',
parent=self.subj_a,
highlighted=True)
self.url = '/{}preprint_providers/{}/taxonomies/highlighted/'.format(
API_BASE, self.provider._id)
def test_mapped_subjects_filter_wrong_provider(self):
res = self.app.get(self.url)
assert res.status_code == 200
assert len(res.json['data']) == 1
assert res.json['data'][0]['id'] == self.subj_aa._id
class TestCustomTaxonomy(ApiTestCase):
def setUp(self):
super(TestCustomTaxonomy, self).setUp()
self.osf_provider = PreprintProviderFactory(
_id='osf', share_title='bepress')
self.asdf_provider = PreprintProviderFactory(
_id='asdf', share_title='ASDF')
bepress_subj = SubjectFactory(
text='BePress Text',
provider=self.osf_provider)
other_subj = SubjectFactory(
text='Other Text',
bepress_subject=bepress_subj,
provider=self.asdf_provider)
self.url = '/{}preprint_providers/{}/taxonomies/'
def test_taxonomy_share_title(self):
bepress_res = self.app.get(
self.url.format(
API_BASE,
self.osf_provider._id))
asdf_res = self.app.get(
self.url.format(
API_BASE,
self.asdf_provider._id))
assert len(bepress_res.json['data']) == len(asdf_res.json['data']) == 1
assert bepress_res.json['data'][0]['attributes']['share_title'] == self.osf_provider.share_title
assert asdf_res.json['data'][0]['attributes']['share_title'] == self.asdf_provider.share_title
|
|
# moosic/client/cli/main.py - The client portion of the moosic jukebox system.
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
import sys, socket, os, os.path, getopt, xmlrpclib, errno, time, locale, re
from moosic import VERSION
from moosic.utilities import *
from moosic.client.factory import *
from moosic.client.cli.dispatcher import *
# Define the True and False constants if they don't already exist.
try: True
except NameError: True = 1
try: False
except NameError: False = 0
# Giving stderr a shorter name is convenient when printing error messages.
err = sys.stderr
# Set the locale to the user's default settings, if possible.
try: locale.setlocale(locale.LC_ALL, '')
except: pass
def main(argv):
COMMANDS = get_command_docs() \
+ center_text('This Moosic has Super Cow Powers.', pad_char=' ')
USAGE = "usage: " + os.path.basename(argv[0]) + \
" [options] <command>" + '''
Options:
-d, --shuffle-dir When a directory is named on the command line,
shuffle the result of recursing through the
directory before inserting it into the list.
-a, --shuffle-args Shuffle only the arguments explicitly specified
on the command line.
-g, --shuffle-global Shuffle the entire argument list after
directory arguments specified on the command
line have been replaced with their contents.
This is the default behavior.
-o, --inorder Don't shuffle the given filelist at all, and
maintain the order specified on the command
line.
-s, --sort Sort the filelist, regardless of the order
specified on the command line.
-i, --ignore-case Treat any given regular expressions as if they
were case-insensitive.
-r, --no-recurse Don't replace directories named on the command
line with their contents.
-n, --non-file-args Don't change any names given in a filelist.
Useful if your filelist consists of URLs or
other objects that aren't local files.
-f, --auto-find Replace each string in the given filelist with
the results of a "fuzzy" search for music files
which match that string. (Beware: this can
be slow with large music collections.)
-F, --auto-grep Replace each string in the given filelist with
the results of a regular-expression search for
music files which match that string. (Beware:
this can be slow with large music collections.)
-m, --music-dir <dir> Specifies the directory to search when using the
"auto-find" and "auto-grep" features.
(Default: ~/music/)
-c, --config-dir <dir> Specifies the directory where moosic should
find the files kept by moosicd.
(Default: ~/.moosic/)
-t, --tcp <host>:<port> Communicate with a Moosic server that is
listening to the specified TCP/IP port on the
specified host. (Not recommended.)
-N, --no-startserver Don't automatically start moosicd if it isn't
already running.
-U, --allow-unplayable Allow songs that the server doesn't know how to
play to be added into the song queue.
-C, --current-in-list Show the currently playing song in the "list"
and "plainlist" commands.
-S, --showcommands Print the list of possible commands and exit.
-h, --help Print this help text and exit.
-v, --version Print version information and exit.
This Moosic has Super Cow Powers.'''
# Option processing.
def process_options(arglist, opts):
opts = opts.copy()
try:
opt_spec = { 'd':'shuffle-dir',
'a':'shuffle-args',
'g':'shuffle-global',
'o':'inorder',
's':'sort',
'i':'ignore-case',
'r':'no-recurse',
'n':'non-file-args',
'':'no-file-munge',
'f':'auto-find',
'F':'auto-grep',
'm:':'music-dir=',
'c:':'config-dir=',
't:':'tcp=',
'N':'no-startserver',
'U':'allow-unplayable',
'C':'current-in-list',
'S':'showcommands',
'h':'help',
'v':'version', }
short_opts = ''.join(opt_spec.keys())
long_opts = opt_spec.values()
options, arglist = getopt.getopt(arglist, short_opts, long_opts)
except getopt.error, e:
sys.exit('Option processing error: %s' % e)
for option, val in options:
if option == '-d' or option == '--shuffle-dir':
opts['shuffle-dir'] = True
opts['shuffle-global'] = False
if option == '-a' or option == '--shuffle-args':
opts['shuffle-args'] = True
opts['shuffle-global'] = False
if option == '-g' or option == '--shuffle-global':
opts['shuffle-global'] = True
if option == '-o' or option == '--inorder':
opts['shuffle-global'] = False
opts['shuffle-args'] = False
opts['shuffle-dir'] = False
if option == '-s' or option == '--sort':
opts['shuffle-global'] = False
opts['shuffle-args'] = False
opts['shuffle-dir'] = False
opts['sort'] = True
if option == '-i' or option == '--ignore-case':
opts['ignore-case'] = True
if option == '-r' or option == '--no-recurse':
opts['dir-recurse'] = False
if option == '-n' or option == '--no-file-munge' or option == '--non-file-args':
opts['file-munge'] = False
opts['dir-recurse'] = False
if option == '-f' or option == '--auto-find':
opts['auto-find'] = True
opts['auto-grep'] = False
if option == '-F' or option == '--auto-grep':
opts['auto-grep'] = True
opts['auto-find'] = False
if option == '-m' or option == '--music-dir':
opts['music-dir'] = os.path.abspath(os.path.expanduser(val))
if option == '-c' or option == '--config-dir':
opts['config-dir'] = os.path.abspath(os.path.expanduser(val))
if option == '-t' or option == '--tcp':
if ":" not in val:
sys.exit(('Invalid address: %s\n' % val) +
'You must specify both a hostname and a port number.\n'
'For example, "example.com:123"')
host, port = val.split(':', 1)
try:
port = int(port)
except ValueError, e:
sys.exit("Invalid port number: %s" % port)
opts['tcp-address'] = (host, port)
if option == '-N' or option == '--no-startserver':
opts['start moosicd'] = False
if option == '-U' or option == '--allow-unplayable':
opts['no-unplayables'] = False
if option == '-C' or option == '--current-in-list':
opts['current-in-list'] = True
if option == '-S' or option == '--showcommands':
print COMMANDS
sys.exit(0)
if option == '-h' or option == '--help':
print USAGE
sys.exit(0)
if option == '-v' or option == '--version':
print "moosic", VERSION
print """
Copyright (C) 2001-2003 Daniel Pearson <[email protected]>
This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."""
sys.exit(0)
return arglist, opts
home = os.getenv('HOME', '/tmp')
# Set the built-in default options.
opts = {'shuffle-global':True, 'shuffle-args':False, 'shuffle-dir':False,
'debug':False, 'file-munge':True, 'sort':False, 'dir-recurse':True,
'config-dir':os.path.join(home, '.moosic', ''), 'tcp-address':None,
'music-dir':os.path.join(home, 'music', ''), 'auto-find':False,
'auto-grep':False, 'start moosicd':True, 'ignore-case':False,
'sort':False, 'rc-filename':os.path.join(home, '.moosicrc'),
'no-unplayables':True, 'current-in-list':False}
# Gather options specified before the command.
arglist, opts = process_options(argv[1:], opts)
# Pluck the command out of the argument list.
if not arglist:
sys.exit("You must provide a command.\n"
"Use the --showcommands option to learn what commands are available.\n"
"Use the --help option to learn what options are available.\n"
"usage: %s [options] <command>" % os.path.basename(argv[0]))
command = arglist.pop(0)
command = command.lower()
command = re.sub(r'[\W_]', r'', command)
# Gather options specified after the command.
if command != 'startserver': # ...except for the startserver command.
arglist, opts = process_options(arglist, opts)
# TODO: Gather option values from a config file.
#file_opts = process_configfile(opts['rc-filename'], opts)
# Use the options from the command-line to override the options from the
# config file.
#file_opts.update(opts)
#opts = file_opts
if command not in dispatcher:
sys.exit(wrap(('Error: invalid command: "%s"\n' % command) +
"Use the --showcommands option or the 'help' command to see the "
"available commands. Use the --help option to learn about the "
"possible options.\n"
"usage: %s [options] <command>" % os.path.basename(argv[0]), 79))
# Check the number of arguments given to the command.
if not check_args(command, arglist):
sys.exit(2)
# Create a proxy object for speaking to the Moosic server.
if opts['tcp-address']:
host, port = opts['tcp-address']
moosic = InetMoosicProxy(host, port)
else:
server_address = os.path.join(opts['config-dir'], 'socket')
moosic = LocalMoosicProxy(server_address)
# Make sure that our connection to the server is working properly.
try:
if command != 'startserver': # unless the user is starting it explicitly
moosic.no_op()
except socket.error, e:
if e[0] in (errno.ECONNREFUSED, errno.ENOENT):
# The server doesn't seem to be running, so let's try to start it
# for ourselves.
if opts['tcp-address']:
failure_reason = "The target Moosic server is on a remote computer."
elif opts['start moosicd']:
print >>err, "Notice: The Moosic server isn't running, so it " \
"is being started automatically."
failure_reason = startServer('moosicd', '-c', opts['config-dir'])
else:
failure_reason = "Automatic launching of the server is disabled."
if failure_reason:
sys.exit(wrap("Error: The server (moosicd) doesn't seem to be "
"running, and it could not be started automatically "
"because:\n" + failure_reason, 79))
else:
# Wait bit to give moosicd time to start up.
time.sleep(0.25)
# Test the server connection again.
try:
moosic.no_op()
except Exception, e:
# We tried our best. Finally give up.
sys.exit("An attempt was made to start the Moosic "
"server, but it still can't be contacted.\n"
"%s: %s" % (str(e.__class__).split('.')[-1], e))
else:
sys.exit("Socket error: %s" % e)
try:
# Dispatch the command.
exit_status = dispatcher[command](moosic, arglist, opts)
except socket.error, e:
exit_status = "Socket error: %s" % e[1]
except xmlrpclib.Fault, e:
if ':' in e.faultString:
fault_type, fault_msg = e.faultString.split(':', 1)
fault_type = fault_type.split('.')[-1]
exit_status = "Error from Moosic server: [%s] %s" % (fault_type, fault_msg)
else:
exit_status = "Error from Moosic server: %s" % e.faultString
except xmlrpclib.ProtocolError, e:
exit_status = "RPC protocol error: %s %s." % (e.errcode, e.errmsg)
except ValueError, e:
exit_status = "Error: %s" % e
except Exception, e:
if isinstance(e, SystemExit):
raise e
else:
exit_status = "%s: %s" % (str(e.__class__).split('.')[-1], e)
sys.exit(exit_status)
if __name__ == '__main__':
main(sys.argv)
|
|
#!/usr/bin/env python3
import argparse
import logging
import os
import stat
import subprocess
import sys
import time
import yaml
class ConfigurationError(Exception):
pass
def configure_httpd(logger, run_dir, mgmt_ip):
sh_file = "{}/httpd_config-{}.sh".format(run_dir, time.strftime("%Y%m%d%H%M%S"))
logger.debug("Creating script file %s", sh_file)
with open(sh_file, "w") as f:
f.write(r'''#!/usr/bin/expect -f
set login "centos"
set addr {mgmt_ip}
set pw "centos"
set retry 0
set max 20
while {{ $retry < $max }} {{
sleep 5
spawn ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null $login@$addr
set timeout 10
expect "yes/no" {{
send "yes\r"
expect "*?assword:" {{ send "$pw\r"; break }}
}} "*?assword:" {{ send "$pw\r"; break }}
set retry [ expr $retry+1 ]
if {{ $retry == $max }} {{
puts "Configuration timed out."
exit 1
}}
}}
expect "]$ "
send "sudo su\r"
expect "]# "
# Move the default index.html so we get served the noindex page which
# has more content.
send "mv -f /var/www/html/index.html /var/www/html/index.html.bak\r"
expect "]# "
'''.format(mgmt_ip=mgmt_ip))
os.chmod(sh_file, stat.S_IRWXU)
rc = subprocess.call(sh_file, shell=True)
if rc != 0:
raise ConfigurationError("HAProxy add httpd config failed: {}".format(rc))
def configure_haproxy_add_httpd(logger, run_dir, haproxy_mgmt_ip, httpd_cp_ip, httpd_server_name):
sh_file = "{}/haproxy_add_httpd_config-{}.sh".format(run_dir, time.strftime("%Y%m%d%H%M%S"))
logger.debug("Creating script file %s", sh_file)
with open(sh_file, "w") as f:
f.write(r'''#!/usr/bin/expect -f
set login "centos"
set addr {mgmt_ip}
set pw "centos"
set retry 0
set max 20
while {{ $retry < $max }} {{
sleep 5
spawn ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null $login@$addr
set timeout 10
expect "yes/no" {{
send "yes\r"
expect "*?assword:" {{ send "$pw\r"; break }}
}} "*?assword:" {{ send "$pw\r"; break }}
set retry [ expr $retry+1 ]
if {{ $retry == $max }} {{
puts "Configuration timed out."
exit 1
}}
}}
expect "]$ "
send "sudo su\r"
expect "]# "
send "grep \"server {httpd_server_name} {httpd_cp_ip}\" /etc/haproxy/haproxy.cfg && echo \"Already configured\" && exit 0\r"
expect {{
"]$ " {{ exit }}
"]# "
}}
send "sed -i \'s/\\(.*Web server list.*\\)/\\1\\n server {httpd_server_name} {httpd_cp_ip}:80 check/g\' /etc/haproxy/haproxy.cfg\r"
expect "]# "
send "systemctl reload haproxy\r"
expect "]# "
set date [clock format [clock seconds] -format {{%Y-%m-%d %k:%M:%S}}]
send "echo '$date Added {httpd_server_name} {httpd_cp_ip}' >> /tmp/progress\r"
expect "]# "
'''.format(mgmt_ip=haproxy_mgmt_ip, httpd_cp_ip=httpd_cp_ip, httpd_server_name=httpd_server_name))
os.chmod(sh_file, stat.S_IRWXU)
rc = subprocess.call(sh_file, shell=True)
if rc != 0:
raise ConfigurationError("HAProxy add httpd config failed: {}".format(rc))
def configure_haproxy_remove_httpd(logger, run_dir, haproxy_mgmt_ip, httpd_server_name):
sh_file = "{}/haproxy_remove_httpd_config-{}.sh".format(run_dir, time.strftime("%Y%m%d%H%M%S"))
logger.debug("Creating script file %s", sh_file)
with open(sh_file, "w") as f:
f.write(r'''#!/usr/bin/expect -f
set login "centos"
set addr {mgmt_ip}
set pw "centos"
set retry 0
set max 20
while {{ $retry < $max }} {{
sleep 5
spawn ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null $login@$addr
set timeout 10
expect "yes/no" {{
send "yes\r"
expect "*?assword:" {{ send "$pw\r"; break }}
}} "*?assword:" {{ send "$pw\r"; break }}
set retry [ expr $retry+1 ]
if {{ $retry == $max }} {{
puts "Configuration timed out."
exit 1
}}
}}
expect "]$ "
send "sudo su\r"
expect "]# "
send "sed -i \'/server {httpd_server_name}/d\' /etc/haproxy/haproxy.cfg\r"
expect "]# "
send "systemctl reload haproxy\r"
expect "]# "
set date [clock format [clock seconds] -format {{%Y-%m-%d %k:%M:%S}}]
send "echo '$date Removed {httpd_server_name} ' >> /tmp/progress\r"
expect "]# "
'''.format(mgmt_ip=haproxy_mgmt_ip, httpd_server_name=httpd_server_name))
os.chmod(sh_file, stat.S_IRWXU)
rc = subprocess.call(sh_file, shell=True)
if rc != 0:
raise ConfigurationError("HAProxy remove httpd config failed: {}".format(rc))
def main(argv=sys.argv[1:]):
try:
parser = argparse.ArgumentParser()
parser.add_argument("yaml_cfg_file", type=argparse.FileType('r'))
parser.add_argument("--dry-run", action="store_true")
parser.add_argument("--quiet", "-q", dest="verbose", action="store_false")
args = parser.parse_args()
run_dir = os.path.join(os.environ['RIFT_INSTALL'], "var/run/rift")
if not os.path.exists(run_dir):
os.makedirs(run_dir)
log_file = "{}/rift_httpd_config-{}.log".format(run_dir, time.strftime("%Y%m%d%H%M%S"))
logging.basicConfig(filename=log_file, level=logging.DEBUG)
logger = logging.getLogger()
ch = logging.StreamHandler()
if args.verbose:
ch.setLevel(logging.DEBUG)
else:
ch.setLevel(logging.INFO)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
except Exception as e:
print("Got exception:{}".format(e))
raise
try:
yaml_str = args.yaml_cfg_file.read()
logger.debug("Input YAML file: %s", yaml_str)
yaml_cfg = yaml.load(yaml_str)
logger.debug("Input YAML cfg: %s", yaml_cfg)
# Check if this is post scale out trigger
def find_cp_ip(vnfr_list, vnfd_name, cp_name):
for vnfr in vnfr_list:
if vnfd_name in vnfr['name']:
for cp in vnfr['connection_points']:
logger.debug("Connection point: %s", format(cp))
if cp_name in cp['name']:
return cp['ip_address']
raise ValueError("Could not find vnfd %s connection point %s", vnfd_name, cp_name)
def find_mgmt_ip(vnfr_list, vnfd_name):
for vnfr in vnfr_list:
if vnfd_name in vnfr['name']:
return vnfr['rw_mgmt_ip']
raise ValueError("Could not find vnfd %s mgmt ip", vnfd_name)
def find_vnfr(vnfr_list, vnfd_name):
for vnfr in vnfr_list:
if vnfd_name in vnfr['name']:
return vnfr
raise ValueError("Could not find vnfd %s", vnfd_name)
haproxy_mgmt_ip = find_mgmt_ip(yaml_cfg['vnfrs_others'], "haproxy_vnfd")
httpd_cp_ip = find_cp_ip(yaml_cfg['vnfrs_in_group'], "httpd_vnfd", "cp0")
httpd_mgmt_ip = find_mgmt_ip(yaml_cfg['vnfrs_in_group'], "httpd_vnfd")
httpd_vnfr = find_vnfr(yaml_cfg['vnfrs_in_group'], "httpd_vnfd")
# HAProxy wants to use a name without .'s
httpd_server_name = httpd_vnfr["name"].replace(".", "__")
if yaml_cfg['trigger'] == 'post_scale_out':
logger.debug("Sleeping for 60 seconds to give VNFD mgmt VM a chance to boot up")
time.sleep(60)
configure_httpd(logger, run_dir, httpd_mgmt_ip)
logger.debug("HTTPD config done")
configure_haproxy_add_httpd(logger, run_dir, haproxy_mgmt_ip, httpd_cp_ip, httpd_server_name)
logger.debug("HA proxy add httpd done")
elif yaml_cfg['trigger'] == 'pre_scale_in':
configure_haproxy_remove_httpd(logger, run_dir, haproxy_mgmt_ip, httpd_server_name)
logger.debug("HA proxy remove httpd done")
else:
raise ValueError("Unexpected trigger {}".format(yaml_cfg['trigger']))
except Exception as e:
logger.exception(e)
raise
if __name__ == "__main__":
main()
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Time processing functions for use with GEOS-Chem/Data analysis
Use help(<name of function>) to get details on a particular function.
Notes
-----
- This module is underdevelopment vestigial/inefficient code is being removed/updated.
- Where external code is used, credit is given.
"""
import logging
import numpy as np
import pandas as pd
import time
import calendar
import datetime as datetime
from datetime import datetime as datetime_
import sys
# Attempt to import ephem if installed
if sys.version_info.major < 3:
try:
import ephem
except ImportError:
print('ephem package not installed')
def get_day_fraction(date):
"""
Get day fraction from a datetime object
Notes
-----
- for working with numpy arrays of datetimes, instead of pandas dataframes
"""
secs = (date.hour * 60.*60.)+(date.minute*60.)+(date.second)
dsecs = 24.*60.*60.
return secs/dsecs
def dt64_2_dt(dt64, RtnAsArray=True):
"""
Convert numpy.datetime64 to datetime.datetime (assuming UTC )
Parameters
-----
dt64 (numpy.datetime64): datetime to convert
Notes
-----
- TODO: Convert this to work as a lamdba function for scalability
"""
ns = 1e-9 # number of seconds in a nanosecond
dt = [datetime_.utcfromtimestamp(i.astype(int) * ns) for i in dt64]
if RtnAsArray:
return np.array(dt)
else:
return dt
def nonISOdate2ISO(ds):
"""
Convert a non ISO date string to a ISO date string
Parameters
-----
ds(str): date string
"""
import re
logging.info('nonISOdate2ISO called')
regex = re.compile('(\d\d\d\d-\d-\d\d)')
regexII = re.compile('(.*\s\d:.*)')
print(ds)
for d in ds:
# print 0, d, len(d[0]), [ re.match(regexII, d[0]) ]
d = d[0]
# swap ' ?:00:00' for '00:00:00'
d = d.replace(' 0:', ' 00:')
if re.match(regexII, d):
d = d[:-7]+'0'+d[-7:]
# print 1, d, len(d)
if len(d) != 19:
# if one digit for day and month
if len(d) == 17:
d = d[:5]+'0'+d[5:7]+'0'+d[7:]
# print 1.1, d, len(d), [ re.match(regex, d) ]
# if one digit for day
if (re.match(regex, d)):
d = d[:5]+'0'+d[5:]
# print 1.2, d, len(d)
# if one digit for month
if len(d) != 19:
d = d[:8]+'0'+d[8:]
if len(d) != 19:
print((1.3, d, len(d[0])))
d = [d]
print((2, d, len(d[0])))
return ds
def nearest(ts, s):
"""
Find the nearest values (e.g. timestamp)
Parameters
-------
ts (float, int, timestamp): point as object that nearest to which is being sought
s (list): list of objects of the same type to be searched
Returns
-------
(timestamp)
Notes
-------
- Credit: Raymond Hettinger
http://stackoverflow.com/questions/8162379/python-locating-the-closest-timestamp
"""
# Given a presorted list of timestamps: s = sorted(index)
i = bisect_left(s, ts)
return min(s[max(0, i-1): i+2], key=lambda t: abs(ts - t))
def YYYYMMDD_HHMM_2_datetime(str1=None, str2=None, combined=False,
verbose=False, debug=False):
"""
Mappable converter of strings to datetime.
Parameters
-------
str1 (list): list of strings of times
str2 (list): list of strings of dates
combined (bool): if True, then a single list of strings is provided
debug (bool): print debugging options to screen
Returns
-------
(list)
"""
# Combined as one string
if combined:
dtime = str1
# Translate from str to datetime
dtime = [time.strptime(i, '%Y%m%d%H%M') for i in dtime]
dtime = [datetime_.fromtimestamp(time.mktime(i)) for i in dtime]
# Combine to one string
else:
# Make pandas dataframe
data = np.array([str1, str2])
if debug:
print((data.shape, data[:5, :], [type(i) for i in (str1, str2)]))
df = pd.DataFrame(data=data.T, columns=['YYYYMMDD', 'HHMM'])
# Convert to datetime
dtime = DF_YYYYMMDD_HHMM_2_dt(df=df)
dtime = dtime.index
return dtime
def add_months(sourcedate, months):
"""
Incremental increase of datetime by given months
"""
month = sourcedate.month - 1 + months
year = sourcedate.year + month / 12
month = month % 12 + 1
day = min(sourcedate.day, calendar.monthrange(year, month)[1])
return datetime.datetime(year, month, day)
def add_days(sourcedate, days_):
"""
Incremental increase of datetime by given days
"""
sourcedate += datetime.timedelta(days=float(days_))
return sourcedate
def add_hrs(sourcedate, hrs_, debug=False):
"""
Incremental increase of datetime by given hours
"""
if debug:
print((sourcedate, hrs_))
sourcedate += datetime.timedelta(hours=float(hrs_))
return sourcedate
def add_minutes(sourcedate, min_, debug=False):
"""
Incremental increase of datetime by given minutes
"""
sourcedate += datetime.timedelta(minutes=float(min_))
return sourcedate
def add_secs(sourcedate, secs_, debug=False):
"""
Incremental increase of datetime by given seconds
"""
sourcedate += datetime.timedelta(seconds=float(secs_))
return sourcedate
def secs_in_month(months=None, years=None):
"""
Get number of seconds in a specific month for a specific year (default=2009)
"""
# Get generica months and year (2009) if not given
if not isinstance(months, list):
months = list(range(1, 13))
if not isinstance(years, list):
years = [2009] * len(months)
# Get number of seconds in specific month in year
# conversion: sec => min => hours => days => months
ars = []
for i, m_ in enumerate(months):
ars += [60*60*24*calendar.monthrange(int(years[i]), int(m_))[1]]
# Return as a np.array
return np.array(ars)
def get_dt4run(time_span='year', period=1, startyear=2005, endyear=2005,
endhour=23, a=None, b=None):
"""
Make list of datetimes for a given range or between two datetimes
Parameters
-------
a, b (datetime.datetime): dates to create list of dates between (a=first date)
endhour (int): last hour to use in list of dates
startyear, endyear (int): first and last year to output list of dates for
time_span (str): string of time period (e.g. days)
period (int): periodicity of returned list of dates (1= 1 hour)
Returns
-------
(list)
"""
# Set dates
if isinstance(a, type(None)):
a = datetime.datetime(startyear, 2, 1, 0, 0)
if time_span == '3days':
b = datetime.datetime(endyear, 2, 3, endhour, 0) # 3 day
if time_span == 'week':
b = datetime.datetime(endyear, 2, 7, endhour, 0) # week
if time_span == 'month':
b = datetime.datetime(endyear, 3, 1, endhour, 0) # one month
if time_span == '6months':
b = datetime.datetime(endyear, 8, 1, endhour, 0) # 6 month(s)
if time_span == 'year':
endyear = 2006 # Kludge as Data ran from Feb to Feb
b = datetime.datetime(endyear, 1, 31, endhour, 0) # full year
# Make list of dates to view (hourly intervals between a and b)
dates = dt_hrs_a2b(a, b)
return dates
def dt_hrs_a2b(a, b, period=1, debug=False):
"""
Returns list of hour spaced datetimes between two given datetimes
Parameters
-------
a, b (datetime.datetime): dates to create list of dates between (a=first date)
period (int): periodicity of returned list of dates (1= 1 hour)
Returns
-------
(list)
"""
dates = [a]
if debug:
print((dates, a, b, period))
while dates[-1] < b:
dates += [add_hrs(dates[-1], period)]
if debug:
print((dates[0], dates[-1]))
return dates
# def normalise2dailymax(dates, data, debug=False):
# """
# Normalise data to daily maximiun.
#
# ARGUMENTS:
# - list of dates as datetime.datetime objects.
# - list of of
# """
# logging.info('normalise2dailymax called')
# if debug:
# logging.debug([(type(i), i.shape) for i in (data, dates)])
#
# # Get list of unique dates & remove mean from dates
# dates = np.ma.array([datetime.datetime(*i.timetuple()[:3]) for i in dates])
# idates = np.ma.array((sorted(set(dates))))
#
# if debug:
# logging.debug([(np.min(i), np.max(i), np.mean(i)) for i in [data]])
# for s in idates:
# # print s, len(data[np.ma.where( dates == s) ]), np.ma.max(data[np.ma.where( dates == s )] )
# data[np.ma.where(dates == s)] = data[np.ma.where(
# dates == s)] - np.ma.max(data[np.ma.where(dates == s)])
# if debug:
# logging.debug([(np.min(i), np.max(i), np.mean(i)) for i in [data]])
# return data
def time2datetime(dates):
"""
Convert time object to datetime object
"""
assert type(dates) == list, 'Please provide a list of times to unc'
return [datetime_.fromtimestamp(time.mktime(i)) for i in dates]
def num2month(input=None, reverse=False, rtn_dict=False):
"""
Convert number (1-12) to abbreviated name of month
Parameters
-------
reverse (bool): invert dictionary if reverse==True.
rtn_dict (bool): return the entire dictionary instead of a value for a key
Notes
-------
- input is either a 3 character month string or an integer 1=>12
"""
d = {
1: 'Jan',
2: 'Feb',
3: 'Mar',
4: 'Apr',
5: 'May',
6: 'Jun',
7: 'Jul',
8: 'Aug',
9: 'Sep',
10: 'Oct',
11: 'Nov',
12: 'Dec'
}
if reverse:
d = {v: k for k, v in list(d.items())}
if rtn_dict:
return d
else:
return d[input]
def DF_YYYYMMDD_HHMM_2_dt(df, date_header='YYYYMMDD', time_header='HHMM',
rmvars=None, epoch=False):
"""
Convert times to datetime from time strings of HHMM and YYYYMMDD
Parameters
-------
df (pd.DataFrame): dataframe containing columns of datetimes in string format
time_header, date_header (str): column titles for time and date (?_header)
rmvars (list): list of variables to remove from dataframe
epoch (bool): return the values in terms of epoch (unix) time
Returns
-------
(pd.DataFrame)
"""
# Function to map integer to 4 char str
def format(x): return '{:0>4}'.format(int(x))
# Use mapped function for speed.
df[time_header] = df[time_header].map(format)
# Combine to make datetime.
# ( go via integer for dates, to ensure no floating zeros appear )
df['Datetime'] = df[date_header].astype(int).astype(str) + \
df[time_header].astype(str)
logging.debug('1st 10 dates: '.format(logging.debug(df['Datetime'][:10])))
df['Datetime'] = pd.to_datetime(df['Datetime'], format='%Y%m%d%H%M')
# Remove variables if list provided as "rmvars"
if isinstance(rmvars, list):
[df.drop(i, 1) for i in rmvars]
# Convert to Epoch if requested
if epoch:
def format(x): return unix_time(x)
df['Epoch'] = df['Datetime'].map(format).astype('i8')
del df['Datetime']
else:
df.index = df['Datetime']
return df
def get_TZ4loc(lat=50, lon=0):
"""
Get the UTC offset (timezone/TZ) in hours for a given location
Parameters
-------
lon (float): longitude in decimal degrees North
lat (float): latitude in decimal degrees east
Notes
-------
- Original file with timezone boundaries can be found here http://ftp.as.harvard.edu/gcgrid/geos-chem/data/ExtData/HEMCO/TIMEZONES/v2015-02/
- This may not include all locations (e.g. Cape Verde) and at somepoint should be updated to use the latest best data (linked below)
https://github.com/evansiroky/timezone-boundary-builder
Notes
-------
(float)
"""
import os
import xarray as xr
import inspect
filename = inspect.getframeinfo(inspect.currentframe()).filename
path = os.path.dirname(os.path.abspath(filename))
folder = path+'/../data/'
filename = 'timezones_voronoi_1x1.nc'
folder = '/users/ts551/scratch/data/TIMEZONES/v2015-02/'
ds = xr.open_dataset(folder+filename)
UTC_offset = ds.sel(lat=lat, lon=lon, method='nearest').squeeze()
UTC_offset = UTC_offset['UTC_OFFSET'].values.astype('timedelta64[h]')
return UTC_offset.astype(float)
def unix_time(dt):
"""
Convert datetime to Unix time.
Parameters
-------
dt (datetime.datetime): Single datetime object
Notes
-------
- epoch is counted from a reference time of:
datetime.datetime(1970, 1, 1, 0, 0)
"""
epoch = datetime.datetime.utcfromtimestamp(0)
delta = dt - epoch
return delta.days*86400+delta.seconds+delta.microseconds/1e6
def dt_days_a2b(a, b, period=1, debug=False):
"""
Calculate days between two dattime.datetime format dates
Parameters
-------
a, b (datetime.datetime): dates to create list of dates between (a=first date)
period (int): periodicity of returned list of dates (1= 1 hour)
Returns
-------
(list)
"""
dates = [a]
if debug:
print((dates, a, b, period))
while dates[-1] < b:
dates += [add_days(dates[-1], period)]
if debug:
print((dates[0], dates[-1]))
return dates
def get_nighttime_values(dates=None, data=None, select_nighttime=True,
select_daytime=False,
daybreak=datetime.datetime(1970, 1, 1, 6),
dayend=datetime.datetime(1970, 1, 1, 18)):
"""
Calculate nighttime values using dates array and pandas
"""
# use dataframe to map daytime boolean
df = pd.DataFrame(np.array(dates))
print(df)
df.columns = ['Datetime']
# function to generate boolean for daytime
def is_daytime(input, daybreak=daybreak, dayend=dayend):
"""
Takes datetime.datetime and retruns True (bool) if daytime
"""
daytime = False
# after daybreak
if (input.hour >= daybreak.hour):
daytime = True
# ... and after nightfall
if (input.hour > dayend.hour):
daytime = False
return daytime
df['ind'] = df.index.values
df['daytime'] = df['Datetime'].map(is_daytime)
# Just select nighttime or daytime
if select_nighttime:
df = df[df['daytime'] == False]
if select_daytime: # select daytime
df = df[df['daytime'] == True]
# Select just indexed values
data = np.array(data)[df['ind'].values, ...]
dates = np.array(dates)[df['ind'].values]
return data, dates
def get_daily_maximum(dates=None, data=None):
"""
Calculate daily maximum values using dates array and pandas
"""
# Use dataframe to hold dates and name column datetime
df = pd.DataFrame(np.array(dates))
df.columns = ['Datetime']
# Add column of index numbers to allow for later indexing...
df['ind'] = df.index.values
# Add column for days
def convert_datetime2days(input):
return datetime.datetime(*input.timetuple()[:3])
df['days'] = df['Datetime'].map(convert_datetime2days)
# - loop days
daily_max_data = []
# Make sure data is a numpy array
data = np.array(data)
for day in sorted(set(df['days'])):
print((day, df['days'][:5]))
# Select data for day
a_day_ind = df[df['days'] == day]
# Select data for day
a_day_data = data[a_day_ind['ind'].values, ...]
print([i.shape for i in (a_day_data, a_day_ind, data)])
# Get daily maximum
daily_max_data += [a_day_data.max(axis=0)]
# Get average daily maximum
avg_data = np.array(daily_max_data).mean(axis=0)
return avg_data
def get_8hr_rolling_mean(df, window=8):
"""
Get 8 hour rolling mean of pandas dataframe/series.
Parameters
-------
df (pd.DataFrame):
window (int): the window (hrs) over which to calculate mean (default=8 hrs)
Returns
-------
(pd.DataFrame)
"""
# loop columns if Dataframe
dfs = []
try:
for col in df.columns:
# apply mean
dfs += [df[col].rolling(window=window, center=False).mean()]
# Just process values if Series
except AttributeError:
df = df.rolling(window=window, center=False).mean()
# Combine dataframes
if len(dfs) > 1:
# concatenate
df = pd.concat(dfs, axis=1)
return df
def solartime(observer, sun=None):
"""
Get Solartime for location of 'observer' relative to 'sun'
Parameters
-------
observer (ephem observer object): Location of the observer
sun (ephem sun object): Which dun to use? (default: our sun)
Returns
-------
(float)
Notes
-------
- Credit: J.F. Sebastian
http://stackoverflow.com/questions/13314626/local-solar-time-function-from-utc-and-longitude
"""
import ephem
if isinstance(sun, type(None)):
sun = ephem.Sun()
# Astronomical math - compute the angle between the sun and observe
sun.compute(observer)
# sidereal time == ra (right ascension) is the highest point (noon)
hour_angle = observer.sidereal_time() - sun.ra
return ephem.hours(hour_angle + ephem.hours('12:00')).norm # norm for 24h
|
|
# Copyright (c) 2014 Alex Meade. All rights reserved.
# Copyright (c) 2015 Dustin Schoenbrun. All rights reserved.
# Copyright (c) 2015 Tom Barron. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from lxml import etree
import mock
import six
from cinder import exception
from cinder import test
from cinder.tests.unit.volume.drivers.netapp.dataontap.client import (
fakes as fake_client)
from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as fake
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
from cinder.volume.drivers.netapp.dataontap.client import client_cmode
from cinder.volume.drivers.netapp import utils as netapp_utils
CONNECTION_INFO = {'hostname': 'hostname',
'transport_type': 'https',
'port': 443,
'username': 'admin',
'password': 'passw0rd',
'vserver': 'fake_vserver'}
class NetAppCmodeClientTestCase(test.TestCase):
def setUp(self):
super(NetAppCmodeClientTestCase, self).setUp()
with mock.patch.object(client_cmode.Client,
'get_ontapi_version',
return_value=(1, 20)):
self.client = client_cmode.Client(**CONNECTION_INFO)
self.client.connection = mock.MagicMock()
self.connection = self.client.connection
self.vserver = CONNECTION_INFO['vserver']
self.fake_volume = six.text_type(uuid.uuid4())
self.fake_lun = six.text_type(uuid.uuid4())
self.mock_send_request = self.mock_object(self.client, 'send_request')
def tearDown(self):
super(NetAppCmodeClientTestCase, self).tearDown()
def test_get_iscsi_target_details_no_targets(self):
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<num-records>1</num-records>
<attributes-list></attributes-list>
</results>"""))
self.connection.invoke_successfully.return_value = response
target_list = self.client.get_iscsi_target_details()
self.assertEqual([], target_list)
def test_get_iscsi_target_details(self):
expected_target = {
"address": "127.0.0.1",
"port": "1337",
"interface-enabled": "true",
"tpgroup-tag": "7777",
}
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<num-records>1</num-records>
<attributes-list>
<iscsi-interface-list-entry-info>
<ip-address>%(address)s</ip-address>
<ip-port>%(port)s</ip-port>
<is-interface-enabled>%(interface-enabled)s</is-interface-enabled>
<tpgroup-tag>%(tpgroup-tag)s</tpgroup-tag>
</iscsi-interface-list-entry-info>
</attributes-list>
</results>""" % expected_target))
self.connection.invoke_successfully.return_value = response
target_list = self.client.get_iscsi_target_details()
self.assertEqual([expected_target], target_list)
def test_get_iscsi_service_details_with_no_iscsi_service(self):
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<num-records>0</num-records>
</results>"""))
self.connection.invoke_successfully.return_value = response
iqn = self.client.get_iscsi_service_details()
self.assertEqual(None, iqn)
def test_get_iscsi_service_details(self):
expected_iqn = 'iqn.1998-01.org.openstack.iscsi:name1'
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<num-records>1</num-records>
<attributes-list>
<iscsi-service-info>
<node-name>%s</node-name>
</iscsi-service-info>
</attributes-list>
</results>""" % expected_iqn))
self.connection.invoke_successfully.return_value = response
iqn = self.client.get_iscsi_service_details()
self.assertEqual(expected_iqn, iqn)
def test_get_lun_list(self):
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<num-records>2</num-records>
<attributes-list>
<lun-info>
</lun-info>
<lun-info>
</lun-info>
</attributes-list>
</results>"""))
self.connection.invoke_successfully.return_value = response
luns = self.client.get_lun_list()
self.assertEqual(2, len(luns))
def test_get_lun_list_with_multiple_pages(self):
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<num-records>2</num-records>
<attributes-list>
<lun-info> </lun-info>
<lun-info> </lun-info>
</attributes-list>
<next-tag>fake-next</next-tag>
</results>"""))
response_2 = netapp_api.NaElement(
etree.XML("""<results status="passed">
<num-records>2</num-records>
<attributes-list>
<lun-info> </lun-info>
<lun-info> </lun-info>
</attributes-list>
</results>"""))
self.connection.invoke_successfully.side_effect = [response,
response_2]
luns = self.client.get_lun_list()
self.assertEqual(4, len(luns))
def test_get_lun_map_no_luns_mapped(self):
path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun)
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<num-records>0</num-records>
<attributes-list></attributes-list>
</results>"""))
self.connection.invoke_successfully.return_value = response
lun_map = self.client.get_lun_map(path)
self.assertEqual([], lun_map)
def test_get_lun_map(self):
path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun)
expected_lun_map = {
"initiator-group": "igroup",
"lun-id": "1337",
"vserver": "vserver",
}
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<num-records>1</num-records>
<attributes-list>
<lun-map-info>
<lun-id>%(lun-id)s</lun-id>
<initiator-group>%(initiator-group)s</initiator-group>
<vserver>%(vserver)s</vserver>
</lun-map-info>
</attributes-list>
</results>""" % expected_lun_map))
self.connection.invoke_successfully.return_value = response
lun_map = self.client.get_lun_map(path)
self.assertEqual([expected_lun_map], lun_map)
def test_get_lun_map_multiple_pages(self):
path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun)
expected_lun_map = {
"initiator-group": "igroup",
"lun-id": "1337",
"vserver": "vserver",
}
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<num-records>1</num-records>
<attributes-list>
<lun-map-info>
<lun-id>%(lun-id)s</lun-id>
<initiator-group>%(initiator-group)s</initiator-group>
<vserver>%(vserver)s</vserver>
</lun-map-info>
</attributes-list>
<next-tag>blah</next-tag>
</results>""" % expected_lun_map))
response_2 = netapp_api.NaElement(
etree.XML("""<results status="passed">
<num-records>1</num-records>
<attributes-list>
<lun-map-info>
<lun-id>%(lun-id)s</lun-id>
<initiator-group>%(initiator-group)s</initiator-group>
<vserver>%(vserver)s</vserver>
</lun-map-info>
</attributes-list>
</results>""" % expected_lun_map))
self.connection.invoke_successfully.side_effect = [response,
response_2]
lun_map = self.client.get_lun_map(path)
self.assertEqual([expected_lun_map, expected_lun_map], lun_map)
def test_get_igroup_by_initiator_none_found(self):
initiator = 'initiator'
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<num-records>0</num-records>
<attributes-list></attributes-list>
</results>"""))
self.connection.invoke_successfully.return_value = response
igroup = self.client.get_igroup_by_initiators([initiator])
self.assertEqual([], igroup)
def test_get_igroup_by_initiators(self):
initiators = ['11:22:33:44:55:66:77:88']
expected_igroup = {
'initiator-group-os-type': 'default',
'initiator-group-type': 'fcp',
'initiator-group-name': 'openstack-igroup1',
}
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<attributes-list>
<initiator-group-info>
<initiator-group-alua-enabled>true</initiator-group-alua-enabled>
<initiator-group-name>%(initiator-group-name)s</initiator-group-name>
<initiator-group-os-type>default</initiator-group-os-type>
<initiator-group-throttle-borrow>false</initiator-group-throttle-borrow>
<initiator-group-throttle-reserve>0</initiator-group-throttle-reserve>
<initiator-group-type>%(initiator-group-type)s</initiator-group-type>
<initiator-group-use-partner>true</initiator-group-use-partner>
<initiator-group-uuid>f8aa707a-57fa-11e4-ad08-123478563412
</initiator-group-uuid>
<initiator-group-vsa-enabled>false</initiator-group-vsa-enabled>
<initiators>
<initiator-info>
<initiator-name>11:22:33:44:55:66:77:88</initiator-name>
</initiator-info>
</initiators>
<vserver>cinder-iscsi</vserver>
</initiator-group-info>
</attributes-list>
<num-records>1</num-records>
</results>""" % expected_igroup))
self.connection.invoke_successfully.return_value = response
igroups = self.client.get_igroup_by_initiators(initiators)
# make these lists of dicts comparable using hashable dictionaries
igroups = set(
[netapp_utils.hashabledict(igroup) for igroup in igroups])
expected = set([netapp_utils.hashabledict(expected_igroup)])
self.assertSetEqual(igroups, expected)
def test_get_igroup_by_initiators_multiple(self):
initiators = ['11:22:33:44:55:66:77:88', '88:77:66:55:44:33:22:11']
expected_igroup = {
'initiator-group-os-type': 'default',
'initiator-group-type': 'fcp',
'initiator-group-name': 'openstack-igroup1',
}
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<attributes-list>
<initiator-group-info>
<initiator-group-alua-enabled>true</initiator-group-alua-enabled>
<initiator-group-name>%(initiator-group-name)s</initiator-group-name>
<initiator-group-os-type>default</initiator-group-os-type>
<initiator-group-throttle-borrow>false</initiator-group-throttle-borrow>
<initiator-group-throttle-reserve>0</initiator-group-throttle-reserve>
<initiator-group-type>%(initiator-group-type)s</initiator-group-type>
<initiator-group-use-partner>true</initiator-group-use-partner>
<initiator-group-uuid>f8aa707a-57fa-11e4-ad08-123478563412
</initiator-group-uuid>
<initiator-group-vsa-enabled>false</initiator-group-vsa-enabled>
<initiators>
<initiator-info>
<initiator-name>11:22:33:44:55:66:77:88</initiator-name>
</initiator-info>
<initiator-info>
<initiator-name>88:77:66:55:44:33:22:11</initiator-name>
</initiator-info>
</initiators>
<vserver>cinder-iscsi</vserver>
</initiator-group-info>
</attributes-list>
<num-records>1</num-records>
</results>""" % expected_igroup))
self.connection.invoke_successfully.return_value = response
igroups = self.client.get_igroup_by_initiators(initiators)
# make these lists of dicts comparable using hashable dictionaries
igroups = set(
[netapp_utils.hashabledict(igroup) for igroup in igroups])
expected = set([netapp_utils.hashabledict(expected_igroup)])
self.assertSetEqual(igroups, expected)
def test_get_igroup_by_initiators_multiple_pages(self):
initiator = '11:22:33:44:55:66:77:88'
expected_igroup1 = {
'initiator-group-os-type': 'default',
'initiator-group-type': 'fcp',
'initiator-group-name': 'openstack-igroup1',
}
expected_igroup2 = {
'initiator-group-os-type': 'default',
'initiator-group-type': 'fcp',
'initiator-group-name': 'openstack-igroup2',
}
response_1 = netapp_api.NaElement(
etree.XML("""<results status="passed">
<attributes-list>
<initiator-group-info>
<initiator-group-alua-enabled>true</initiator-group-alua-enabled>
<initiator-group-name>%(initiator-group-name)s</initiator-group-name>
<initiator-group-os-type>default</initiator-group-os-type>
<initiator-group-throttle-borrow>false</initiator-group-throttle-borrow>
<initiator-group-throttle-reserve>0</initiator-group-throttle-reserve>
<initiator-group-type>%(initiator-group-type)s</initiator-group-type>
<initiator-group-use-partner>true</initiator-group-use-partner>
<initiator-group-uuid>f8aa707a-57fa-11e4-ad08-123478563412
</initiator-group-uuid>
<initiator-group-vsa-enabled>false</initiator-group-vsa-enabled>
<initiators>
<initiator-info>
<initiator-name>11:22:33:44:55:66:77:88</initiator-name>
</initiator-info>
</initiators>
<vserver>cinder-iscsi</vserver>
</initiator-group-info>
</attributes-list>
<next-tag>12345</next-tag>
<num-records>1</num-records>
</results>""" % expected_igroup1))
response_2 = netapp_api.NaElement(
etree.XML("""<results status="passed">
<attributes-list>
<initiator-group-info>
<initiator-group-alua-enabled>true</initiator-group-alua-enabled>
<initiator-group-name>%(initiator-group-name)s</initiator-group-name>
<initiator-group-os-type>default</initiator-group-os-type>
<initiator-group-throttle-borrow>false</initiator-group-throttle-borrow>
<initiator-group-throttle-reserve>0</initiator-group-throttle-reserve>
<initiator-group-type>%(initiator-group-type)s</initiator-group-type>
<initiator-group-use-partner>true</initiator-group-use-partner>
<initiator-group-uuid>f8aa707a-57fa-11e4-ad08-123478563412
</initiator-group-uuid>
<initiator-group-vsa-enabled>false</initiator-group-vsa-enabled>
<initiators>
<initiator-info>
<initiator-name>11:22:33:44:55:66:77:88</initiator-name>
</initiator-info>
</initiators>
<vserver>cinder-iscsi</vserver>
</initiator-group-info>
</attributes-list>
<num-records>1</num-records>
</results>""" % expected_igroup2))
self.connection.invoke_successfully.side_effect = [response_1,
response_2]
igroups = self.client.get_igroup_by_initiators([initiator])
# make these lists of dicts comparable using hashable dictionaries
igroups = set(
[netapp_utils.hashabledict(igroup) for igroup in igroups])
expected = set([netapp_utils.hashabledict(expected_igroup1),
netapp_utils.hashabledict(expected_igroup2)])
self.assertSetEqual(igroups, expected)
def test_clone_lun(self):
self.client.clone_lun(
'volume', 'fakeLUN', 'newFakeLUN',
qos_policy_group_name=fake.QOS_POLICY_GROUP_NAME)
self.assertEqual(1, self.connection.invoke_successfully.call_count)
def test_clone_lun_multiple_zapi_calls(self):
"""Test for when lun clone requires more than one zapi call."""
# Max block-ranges per call = 32, max blocks per range = 2^24
# Force 2 calls
bc = 2 ** 24 * 32 * 2
self.client.clone_lun('volume', 'fakeLUN', 'newFakeLUN',
block_count=bc)
self.assertEqual(2, self.connection.invoke_successfully.call_count)
def test_get_lun_by_args(self):
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<num-records>2</num-records>
<attributes-list>
<lun-info>
</lun-info>
</attributes-list>
</results>"""))
self.connection.invoke_successfully.return_value = response
lun = self.client.get_lun_by_args()
self.assertEqual(1, len(lun))
def test_get_lun_by_args_no_lun_found(self):
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<num-records>2</num-records>
<attributes-list>
</attributes-list>
</results>"""))
self.connection.invoke_successfully.return_value = response
lun = self.client.get_lun_by_args()
self.assertEqual(0, len(lun))
def test_get_lun_by_args_with_args_specified(self):
path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun)
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<num-records>2</num-records>
<attributes-list>
<lun-info>
</lun-info>
</attributes-list>
</results>"""))
self.connection.invoke_successfully.return_value = response
lun = self.client.get_lun_by_args(path=path)
__, _args, __ = self.connection.invoke_successfully.mock_calls[0]
actual_request = _args[0]
query = actual_request.get_child_by_name('query')
lun_info_args = query.get_child_by_name('lun-info').get_children()
# Assert request is made with correct arguments
self.assertEqual('path', lun_info_args[0].get_name())
self.assertEqual(path, lun_info_args[0].get_content())
self.assertEqual(1, len(lun))
def test_file_assign_qos(self):
api_args = {
'volume': fake.FLEXVOL,
'qos-policy-group-name': fake.QOS_POLICY_GROUP_NAME,
'file': fake.NFS_FILE_PATH,
'vserver': self.vserver
}
self.client.file_assign_qos(
fake.FLEXVOL, fake.QOS_POLICY_GROUP_NAME, fake.NFS_FILE_PATH)
self.mock_send_request.assert_has_calls([
mock.call('file-assign-qos', api_args, False)])
def test_set_lun_qos_policy_group(self):
api_args = {
'path': fake.LUN_PATH,
'qos-policy-group': fake.QOS_POLICY_GROUP_NAME,
}
self.client.set_lun_qos_policy_group(
fake.LUN_PATH, fake.QOS_POLICY_GROUP_NAME)
self.mock_send_request.assert_has_calls([
mock.call('lun-set-qos-policy-group', api_args)])
def test_provision_qos_policy_group_no_qos_policy_group_info(self):
self.client.provision_qos_policy_group(qos_policy_group_info=None)
self.assertEqual(0, self.connection.qos_policy_group_create.call_count)
def test_provision_qos_policy_group_legacy_qos_policy_group_info(self):
self.client.provision_qos_policy_group(
qos_policy_group_info=fake.QOS_POLICY_GROUP_INFO_LEGACY)
self.assertEqual(0, self.connection.qos_policy_group_create.call_count)
def test_provision_qos_policy_group_with_qos_spec(self):
self.mock_object(self.client, 'qos_policy_group_create')
self.client.provision_qos_policy_group(fake.QOS_POLICY_GROUP_INFO)
self.client.qos_policy_group_create.assert_has_calls([
mock.call(fake.QOS_POLICY_GROUP_NAME, fake.MAX_THROUGHPUT)])
def test_qos_policy_group_create(self):
api_args = {
'policy-group': fake.QOS_POLICY_GROUP_NAME,
'max-throughput': fake.MAX_THROUGHPUT,
'vserver': self.vserver,
}
self.client.qos_policy_group_create(
fake.QOS_POLICY_GROUP_NAME, fake.MAX_THROUGHPUT)
self.mock_send_request.assert_has_calls([
mock.call('qos-policy-group-create', api_args, False)])
def test_qos_policy_group_delete(self):
api_args = {
'policy-group': fake.QOS_POLICY_GROUP_NAME
}
self.client.qos_policy_group_delete(
fake.QOS_POLICY_GROUP_NAME)
self.mock_send_request.assert_has_calls([
mock.call('qos-policy-group-delete', api_args, False)])
def test_qos_policy_group_rename(self):
new_name = 'new-' + fake.QOS_POLICY_GROUP_NAME
api_args = {
'policy-group-name': fake.QOS_POLICY_GROUP_NAME,
'new-name': new_name,
}
self.client.qos_policy_group_rename(
fake.QOS_POLICY_GROUP_NAME, new_name)
self.mock_send_request.assert_has_calls([
mock.call('qos-policy-group-rename', api_args, False)])
def test_mark_qos_policy_group_for_deletion_no_qos_policy_group_info(self):
mock_rename = self.mock_object(self.client, 'qos_policy_group_rename')
mock_remove = self.mock_object(self.client,
'remove_unused_qos_policy_groups')
self.client.mark_qos_policy_group_for_deletion(
qos_policy_group_info=None)
self.assertEqual(0, mock_rename.call_count)
self.assertEqual(0, mock_remove.call_count)
def test_mark_qos_policy_group_for_deletion_legacy_qos_policy(self):
mock_rename = self.mock_object(self.client, 'qos_policy_group_rename')
mock_remove = self.mock_object(self.client,
'remove_unused_qos_policy_groups')
self.client.mark_qos_policy_group_for_deletion(
qos_policy_group_info=fake.QOS_POLICY_GROUP_INFO_LEGACY)
self.assertEqual(0, mock_rename.call_count)
self.assertEqual(1, mock_remove.call_count)
def test_mark_qos_policy_group_for_deletion_w_qos_spec(self):
mock_rename = self.mock_object(self.client, 'qos_policy_group_rename')
mock_remove = self.mock_object(self.client,
'remove_unused_qos_policy_groups')
mock_log = self.mock_object(client_cmode.LOG, 'warning')
new_name = 'deleted_cinder_%s' % fake.QOS_POLICY_GROUP_NAME
self.client.mark_qos_policy_group_for_deletion(
qos_policy_group_info=fake.QOS_POLICY_GROUP_INFO)
mock_rename.assert_has_calls([
mock.call(fake.QOS_POLICY_GROUP_NAME, new_name)])
self.assertEqual(0, mock_log.call_count)
self.assertEqual(1, mock_remove.call_count)
def test_mark_qos_policy_group_for_deletion_exception_path(self):
mock_rename = self.mock_object(self.client, 'qos_policy_group_rename')
mock_rename.side_effect = netapp_api.NaApiError
mock_remove = self.mock_object(self.client,
'remove_unused_qos_policy_groups')
mock_log = self.mock_object(client_cmode.LOG, 'warning')
new_name = 'deleted_cinder_%s' % fake.QOS_POLICY_GROUP_NAME
self.client.mark_qos_policy_group_for_deletion(
qos_policy_group_info=fake.QOS_POLICY_GROUP_INFO)
mock_rename.assert_has_calls([
mock.call(fake.QOS_POLICY_GROUP_NAME, new_name)])
self.assertEqual(1, mock_log.call_count)
self.assertEqual(1, mock_remove.call_count)
def test_remove_unused_qos_policy_groups(self):
mock_log = self.mock_object(client_cmode.LOG, 'debug')
api_args = {
'query': {
'qos-policy-group-info': {
'policy-group': 'deleted_cinder_*',
'vserver': self.vserver,
}
},
'max-records': 3500,
'continue-on-failure': 'true',
'return-success-list': 'false',
'return-failure-list': 'false',
}
self.client.remove_unused_qos_policy_groups()
self.mock_send_request.assert_has_calls([
mock.call('qos-policy-group-delete-iter', api_args, False)])
self.assertEqual(0, mock_log.call_count)
def test_remove_unused_qos_policy_groups_api_error(self):
mock_log = self.mock_object(client_cmode.LOG, 'debug')
api_args = {
'query': {
'qos-policy-group-info': {
'policy-group': 'deleted_cinder_*',
'vserver': self.vserver,
}
},
'max-records': 3500,
'continue-on-failure': 'true',
'return-success-list': 'false',
'return-failure-list': 'false',
}
self.mock_send_request.side_effect = netapp_api.NaApiError
self.client.remove_unused_qos_policy_groups()
self.mock_send_request.assert_has_calls([
mock.call('qos-policy-group-delete-iter', api_args, False)])
self.assertEqual(1, mock_log.call_count)
@mock.patch('cinder.volume.drivers.netapp.utils.resolve_hostname',
return_value='192.168.1.101')
def test_get_if_info_by_ip_not_found(self, mock_resolve_hostname):
fake_ip = '192.168.1.101'
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<num-records>0</num-records>
<attributes-list>
</attributes-list>
</results>"""))
self.connection.invoke_successfully.return_value = response
self.assertRaises(exception.NotFound, self.client.get_if_info_by_ip,
fake_ip)
@mock.patch('cinder.volume.drivers.netapp.utils.resolve_hostname',
return_value='192.168.1.101')
def test_get_if_info_by_ip(self, mock_resolve_hostname):
fake_ip = '192.168.1.101'
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<num-records>1</num-records>
<attributes-list>
<net-interface-info>
</net-interface-info>
</attributes-list>
</results>"""))
self.connection.invoke_successfully.return_value = response
results = self.client.get_if_info_by_ip(fake_ip)
self.assertEqual(1, len(results))
def test_get_vol_by_junc_vserver_not_found(self):
fake_vserver = 'fake_vserver'
fake_junc = 'fake_junction_path'
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<num-records>0</num-records>
<attributes-list>
</attributes-list>
</results>"""))
self.connection.invoke_successfully.return_value = response
self.assertRaises(exception.NotFound,
self.client.get_vol_by_junc_vserver,
fake_vserver, fake_junc)
def test_get_vol_by_junc_vserver(self):
fake_vserver = 'fake_vserver'
fake_junc = 'fake_junction_path'
expected_flex_vol = 'fake_flex_vol'
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<num-records>1</num-records>
<attributes-list>
<volume-attributes>
<volume-id-attributes>
<name>%(flex_vol)s</name>
</volume-id-attributes>
</volume-attributes>
</attributes-list>
</results>""" % {'flex_vol': expected_flex_vol}))
self.connection.invoke_successfully.return_value = response
actual_flex_vol = self.client.get_vol_by_junc_vserver(fake_vserver,
fake_junc)
self.assertEqual(expected_flex_vol, actual_flex_vol)
def test_clone_file(self):
expected_flex_vol = "fake_flex_vol"
expected_src_path = "fake_src_path"
expected_dest_path = "fake_dest_path"
self.connection.get_api_version.return_value = (1, 20)
self.client.clone_file(expected_flex_vol, expected_src_path,
expected_dest_path, self.vserver)
__, _args, __ = self.connection.invoke_successfully.mock_calls[0]
actual_request = _args[0]
actual_flex_vol = actual_request.get_child_by_name('volume') \
.get_content()
actual_src_path = actual_request \
.get_child_by_name('source-path').get_content()
actual_dest_path = actual_request.get_child_by_name(
'destination-path').get_content()
self.assertEqual(expected_flex_vol, actual_flex_vol)
self.assertEqual(expected_src_path, actual_src_path)
self.assertEqual(expected_dest_path, actual_dest_path)
self.assertEqual(actual_request.get_child_by_name(
'destination-exists'), None)
def test_clone_file_when_destination_exists(self):
expected_flex_vol = "fake_flex_vol"
expected_src_path = "fake_src_path"
expected_dest_path = "fake_dest_path"
self.connection.get_api_version.return_value = (1, 20)
self.client.clone_file(expected_flex_vol, expected_src_path,
expected_dest_path, self.vserver,
dest_exists=True)
__, _args, __ = self.connection.invoke_successfully.mock_calls[0]
actual_request = _args[0]
actual_flex_vol = actual_request.get_child_by_name('volume') \
.get_content()
actual_src_path = actual_request \
.get_child_by_name('source-path').get_content()
actual_dest_path = actual_request.get_child_by_name(
'destination-path').get_content()
self.assertEqual(expected_flex_vol, actual_flex_vol)
self.assertEqual(expected_src_path, actual_src_path)
self.assertEqual(expected_dest_path, actual_dest_path)
self.assertEqual('true',
actual_request.get_child_by_name(
'destination-exists').get_content())
def test_clone_file_when_destination_exists_and_version_less_than_1_20(
self):
expected_flex_vol = "fake_flex_vol"
expected_src_path = "fake_src_path"
expected_dest_path = "fake_dest_path"
self.connection.get_api_version.return_value = (1, 19)
self.client.clone_file(expected_flex_vol, expected_src_path,
expected_dest_path, self.vserver,
dest_exists=True)
__, _args, __ = self.connection.invoke_successfully.mock_calls[0]
actual_request = _args[0]
actual_flex_vol = actual_request.get_child_by_name('volume') \
.get_content()
actual_src_path = actual_request \
.get_child_by_name('source-path').get_content()
actual_dest_path = actual_request.get_child_by_name(
'destination-path').get_content()
self.assertEqual(expected_flex_vol, actual_flex_vol)
self.assertEqual(expected_src_path, actual_src_path)
self.assertEqual(expected_dest_path, actual_dest_path)
self.assertEqual(None,
actual_request.get_child_by_name(
'destination-exists'))
def test_get_file_usage(self):
expected_bytes = "2048"
fake_vserver = 'fake_vserver'
fake_path = 'fake_path'
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<unique-bytes>%(unique-bytes)s</unique-bytes>
</results>""" % {'unique-bytes': expected_bytes}))
self.connection.invoke_successfully.return_value = response
actual_bytes = self.client.get_file_usage(fake_vserver, fake_path)
self.assertEqual(expected_bytes, actual_bytes)
def test_get_operational_network_interface_addresses(self):
expected_result = ['1.2.3.4', '99.98.97.96']
api_response = netapp_api.NaElement(
fake_client.GET_OPERATIONAL_NETWORK_INTERFACE_ADDRESSES_RESPONSE)
self.mock_send_request.return_value = api_response
address_list = (
self.client.get_operational_network_interface_addresses())
self.assertEqual(expected_result, address_list)
def test_get_flexvol_capacity(self):
expected_total_size = 1000
expected_available_size = 750
fake_flexvol_path = '/fake/vol'
api_response = netapp_api.NaElement(
etree.XML("""
<results status="passed">
<attributes-list>
<volume-attributes>
<volume-space-attributes>
<size-available>%(available_size)s</size-available>
<size-total>%(total_size)s</size-total>
</volume-space-attributes>
</volume-attributes>
</attributes-list>
</results>""" % {'available_size': expected_available_size,
'total_size': expected_total_size}))
self.mock_send_request.return_value = api_response
total_size, available_size = (
self.client.get_flexvol_capacity(fake_flexvol_path))
self.assertEqual(expected_total_size, total_size)
self.assertEqual(expected_available_size, available_size)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 19 March 2012
@author: tcezard
'''
import sys
import os
import logging
from optparse import OptionParser
from glob import glob
from utils import utils_logging
import command_runner
from RAD_merge_bam_files import merge_bam_files
#get the path to the current script to infer the path to RAD_set_read1_consensus_to_read2.py
RADmapper_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
path_to_picard = os.path.join(RADmapper_dir,"picard")
mergeSamFilesWithCat_jar=os.path.join(path_to_picard,'MergeSamFilesWithCat.jar')
##### Generic merging functions
def merge_bam_files_with_picard(list_of_file, output_file=None, **kwargs):
"""This is a generic merging function for bam files.
It assumes that all the bam file comes from mapping to independent contigs"""
if not output_file:
#Create a generic name and put it in the current working directory
working_directory=os.getcwd()
i=1
output_file_template=os.path.join(working_directory,'tmp_merge_bam_%s.bam')
output_file=output_file_template%i
while os.path.exists(output_file):
i+=1
output_file=output_file_template%i
command = 'java -jar -Xmx2G %s VALIDATION_STRINGENCY=SILENT CAT_SEQUENCE_DICTIONARIES=True USE_THREADING=True O=%s '%(mergeSamFilesWithCat_jar,output_file)
inputs=['I=%s'%file for file in list_of_file]
command += ' '.join(inputs)
return_code=command_runner.run_command(command)
if return_code==0:
return output_file
else:
return None
def concatenate_file(list_of_file,output_file=None, **kwargs):
"""This is a generic merging function for concatenating text files.
It can take a filter keyword argument to grep out using the provided value"""
if not output_file:
#Create a generic name and put it in the current working directory
if kwargs.has_key('output_dir'):
working_directory = kwargs.get('output_dir')
else:
working_directory = os.getcwd()
i=1
output_file_template=os.path.join(working_directory,'tmp_concatenate_%s')
output_file=output_file_template%i
while os.path.exists(output_file):
i+=1
output_file=output_file_template%i
if kwargs.has_key('filter'):
filter_on = kwargs.get('filter')
command = 'cat %s | egrep -v %s > %s '%(' '.join(list_of_file), filter_on, output_file)
else:
command = 'cat %s > %s '%(' '.join(list_of_file), output_file)
return_code=command_runner.run_command(command)
if return_code==0:
return output_file
else:
return None
def merge_by_chunck(file_to_merge, function_to_merge, output_file=None, max_nb_file=100, **kwargs):
"""This function merge file using a generic merge function. It merges chunk of max_nb_file at one time"""
if len(file_to_merge) == 1:
if output_file:
os.rename(file_to_merge[0], output_file)
else:
output_file=file_to_merge[0]
elif len(file_to_merge)>max_nb_file:
new_file_to_merge=[]
for i in range(0,len(file_to_merge),max_nb_file):
tmp_merge_file = function_to_merge(file_to_merge[i:i+max_nb_file], **kwargs)
new_file_to_merge.append(tmp_merge_file)
output_file = merge_by_chunck(new_file_to_merge, function_to_merge, output_file, **kwargs)
for tmp_file in new_file_to_merge:
logging.info('Remove %s'%tmp_file)
os.remove(tmp_file)
else:
output_file= function_to_merge(file_to_merge, output_file, **kwargs)
return output_file
def merge_all_bam_files_from_directory(directory):
"""This function merge bam file from a single directory"""
directory=os.path.abspath(directory)
all_bam_files = glob(os.path.join(directory,'*_dir','*_corrected_sorted_mrk_dup_fixed.bam'))
output_file = os.path.join(directory,'%s_files.bam'%len(all_bam_files))
output_file = merge_bam_files_with_picard(all_bam_files, output_file)
if not output_file:
logging.error("Merging bam files in %s failed"%(directory))
#TODO do something about it
return output_file
def merge_contigs_files(directory):
"""This function merge bam file from a single directory"""
all_fasta_files = glob(os.path.join(directory,'*_dir','best_assembly.fa'))
output_file = os.path.join(directory,'%s_best_assembly.fa'%len(all_fasta_files))
output_file = concatenate_file(all_fasta_files,output_file)
if not output_file:
logging.error("Merging assemblies in %s failed"%(directory))
#TODO do something about it
return output_file
def merge_snps_files(directory):
"""This function merge snps files from a single directory"""
return_code=0
all_vcf_files = glob(os.path.join(directory,'*_dir','*samtools.vcf'))
output_file_body = os.path.join(directory,'%s_snps_files.vcf.body'%len(all_vcf_files))
output_file_body = concatenate_file(all_vcf_files, output_file_body, filter="^#")
if output_file_body:
return_code=0
output_file_header = os.path.join(directory,'%s_snps_files.vcf.header'%len(all_vcf_files))
command = 'grep "^#" %s > %s '%(all_vcf_files[0], output_file_header)
if return_code==0:
return_code = command_runner.run_command(command)
output_file = os.path.join(directory,'%s_snps_files.vcf'%len(all_vcf_files))
command = 'cat %s %s > %s '%(output_file_header, output_file_body, output_file)
if return_code==0:
return_code = command_runner.run_command(command)
command = 'rm %s %s'%(output_file_header, output_file_body)
if return_code==0:
return_code = command_runner.run_command(command)
return return_code
def merge_all_bam_files_from_directories(directory):
"""This function will merge the bam files across all the directories"""
directory=os.path.abspath(directory)
all_bam_files = glob(os.path.join(directory,'*_dir','*_files.bam'))
#Need to sort as glob retuns the file in random order
all_bam_files.sort()
output_file = os.path.join(directory,'all_consensus_merged.bam')
merge_bam_files(all_bam_files,output_file)
#output_file = merge_by_chunck(all_bam_files, merge_bam_files_with_picard, output_file)
if output_file:
return 0
def merge_all_contigs_files_from_directories(directory):
"""This function will merge the contigs files across all the directories"""
all_fasta_files = glob(os.path.join(directory,'*_dir','*_best_assembly.fa'))
#Need to sort as glob retuns the file in random order
all_fasta_files.sort()
output_file = os.path.join(directory,'all_consensus_assembly.fa')
output_file = merge_by_chunck(all_fasta_files, concatenate_file, output_file)
if output_file:
return 0
def merge_all_snps_files_from_directories(directory):
"""This function will merge the snps files across all the directories"""
return_code=0
all_vcf_files = glob(os.path.join(directory,'*_dir','*_snps_files.vcf'))
output_file_body = os.path.join(directory,'all_consensus_snps_files.vcf.body')
output_file_body = merge_by_chunck(all_vcf_files, concatenate_file, output_file_body, filter="^#")
if output_file_body:
return_code=0
output_file_header = os.path.join(directory,'all_consensus_snps_files.vcf.header')
command = 'grep "^#" %s > %s '%(all_vcf_files[0], output_file_header)
if return_code==0:
return_code = command_runner.run_command(command)
output_file = os.path.join(directory,'all_consensus_snps_files.vcf')
command = 'cat %s %s > %s '%(output_file_header, output_file_body, output_file)
if return_code==0:
return_code = command_runner.run_command(command)
command = 'rm %s %s'%(output_file_header, output_file_body)
if return_code==0:
return_code = command_runner.run_command(command)
return return_code
def merge_all_summary_files_from_directories(directory):
"""This function will merge the summary files across all the directories"""
return_code=0
all_summary_files = glob(os.path.join(directory,'*_dir','*summary_stat.txt'))
output_file_body = os.path.join(directory,'all_summary_stat.txt.body')
output_file_body = merge_by_chunck(all_summary_files, concatenate_file, output_file_body, filter="^name")
if output_file_body:
return_code=0
output_file_header = os.path.join(directory,'all_summary_stat.txt.header')
command = 'head -n 1 %s > %s '%(all_summary_files[0], output_file_header)
if return_code==0:
return_code = command_runner.run_command(command)
output_file = os.path.join(directory,'all_summary_stat.txt')
command = 'cat %s %s > %s '%(output_file_header, output_file_body, output_file)
if return_code==0:
return_code = command_runner.run_command(command)
command = 'rm %s %s'%(output_file_header, output_file_body)
if return_code==0:
return_code = command_runner.run_command(command)
return return_code
def merge_results(directory):
return_code=0
return_code = merge_contigs_files(directory)
return_code = merge_all_bam_files_from_directory(directory)
return_code = merge_snps_files(directory)
return return_code
def merge_all_results(directory):
return_code=0
if return_code==0:
return_code = merge_all_contigs_files_from_directories(directory)
if return_code==0:
return_code = merge_all_snps_files_from_directories(directory)
if return_code==0:
return_code = merge_all_summary_files_from_directories(directory)
# if return_code==0:
# return_code = merge_all_bam_files_from_directories(directory)
return return_code
def main():
#initialize the logging
utils_logging.init_logging(logging.INFO)
#Setup options
optparser=_prepare_optparser()
(options,args) = optparser.parse_args()
#verify options
arg_pass=_verifyOption(options)
if not arg_pass:
logging.warning(optparser.get_usage())
logging.critical("Non valid arguments: exit")
sys.exit(1)
if not options.print_commands:
command_runner.set_command_to_run_localy()
if options.debug:
utils_logging.init_logging(logging.DEBUG)
if options.final_merge:
code = merge_all_results(options.consensus_dir)
else:
code = merge_results(options.consensus_dir)
sys.exit(code)
def _prepare_optparser():
"""Prepare optparser object. New options will be added in this
function first.
"""
usage = """usage: %prog <-b bam_file> [ -o output_file]"""
description = """This script will take aligned RAD read to the consensuses and calculate per consensus coverage."""
optparser = OptionParser(version="None",description=description,usage=usage,add_help_option=False)
optparser.add_option("-h","--help",action="help",help="show this help message and exit.")
optparser.add_option("-d","--consensus_dir",dest="consensus_dir",type="string",
help="Path to a directory containing fastq file (only extension .fastq will be processed). Default: %default")
optparser.add_option("--final_merge",dest="final_merge",action='store_true',default=False,
help="Merge the already merged file. Default: %default")
optparser.add_option("--print",dest="print_commands",action='store_true',default=False,
help="print commands instead of running them. Default: %default")
optparser.add_option("--debug",dest="debug",action='store_true',default=False,
help="Output debug statements. Default: %default")
return optparser
def _verifyOption(options):
"""Check if the mandatory option are present in the options objects.
@return False if any argument is wrong."""
arg_pass=True
return arg_pass
if __name__=="__main__":
main()
|
|
# coding=utf-8
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Common code to manage .isolate format.
"""
import logging
import os
import posixpath
import re
import sys
import trace_inputs
PATH_VARIABLES = ('DEPTH', 'PRODUCT_DIR')
# Files that should be 0-length when mapped.
KEY_TOUCHED = 'isolate_dependency_touched'
# Files that should be tracked by the build tool.
KEY_TRACKED = 'isolate_dependency_tracked'
# Files that should not be tracked by the build tool.
KEY_UNTRACKED = 'isolate_dependency_untracked'
_GIT_PATH = os.path.sep + '.git'
_SVN_PATH = os.path.sep + '.svn'
def posix_relpath(path, root):
"""posix.relpath() that keeps trailing slash."""
out = posixpath.relpath(path, root)
if path.endswith('/'):
out += '/'
return out
def cleanup_path(x):
"""Cleans up a relative path. Converts any os.path.sep to '/' on Windows."""
if x:
x = x.rstrip(os.path.sep).replace(os.path.sep, '/')
if x == '.':
x = ''
if x:
x += '/'
return x
def get_flavor():
"""Returns the system default flavor. Copied from gyp/pylib/gyp/common.py."""
flavors = {
'cygwin': 'win',
'win32': 'win',
'darwin': 'mac',
'sunos5': 'solaris',
'freebsd7': 'freebsd',
'freebsd8': 'freebsd',
}
return flavors.get(sys.platform, 'linux')
def default_blacklist(f):
"""Filters unimportant files normally ignored."""
return (
f.endswith(('.pyc', 'testserver.log')) or
_GIT_PATH in f or
_SVN_PATH in f or
f in ('.git', '.svn'))
def classify_files(root_dir, tracked, untracked):
"""Converts the list of files into a .isolate 'variables' dictionary.
Arguments:
- tracked: list of files names to generate a dictionary out of that should
probably be tracked.
- untracked: list of files names that must not be tracked.
"""
# These directories are not guaranteed to be always present on every builder.
OPTIONAL_DIRECTORIES = (
'test/data/plugin',
'third_party/WebKit/LayoutTests',
)
new_tracked = []
new_untracked = list(untracked)
def should_be_tracked(filepath):
"""Returns True if it is a file without whitespace in a non-optional
directory that has no symlink in its path.
"""
if filepath.endswith('/'):
return False
if ' ' in filepath:
return False
if any(i in filepath for i in OPTIONAL_DIRECTORIES):
return False
# Look if any element in the path is a symlink.
split = filepath.split('/')
for i in range(len(split)):
if os.path.islink(os.path.join(root_dir, '/'.join(split[:i+1]))):
return False
return True
for filepath in sorted(tracked):
if should_be_tracked(filepath):
new_tracked.append(filepath)
else:
# Anything else.
new_untracked.append(filepath)
variables = {}
if new_tracked:
variables[KEY_TRACKED] = sorted(new_tracked)
if new_untracked:
variables[KEY_UNTRACKED] = sorted(new_untracked)
return variables
def generate_simplified(
tracked, untracked, touched, root_dir, variables, relative_cwd):
"""Generates a clean and complete .isolate 'variables' dictionary.
Cleans up and extracts only files from within root_dir then processes
variables and relative_cwd.
"""
logging.info(
'generate_simplified(%d files, %s, %s, %s)' %
(len(tracked) + len(untracked) + len(touched),
root_dir, variables, relative_cwd))
# Constants.
# Skip log in PRODUCT_DIR. Note that these are applied on '/' style path
# separator.
LOG_FILE = re.compile(r'^\<\(PRODUCT_DIR\)\/[^\/]+\.log$')
EXECUTABLE = re.compile(
r'^(\<\(PRODUCT_DIR\)\/[^\/\.]+)' +
re.escape(variables.get('EXECUTABLE_SUFFIX', '')) +
r'$')
# Preparation work.
relative_cwd = cleanup_path(relative_cwd)
# Creates the right set of variables here. We only care about PATH_VARIABLES.
variables = dict(
('<(%s)' % k, variables[k].replace(os.path.sep, '/'))
for k in PATH_VARIABLES if k in variables)
# Actual work: Process the files.
# TODO(maruel): if all the files in a directory are in part tracked and in
# part untracked, the directory will not be extracted. Tracked files should be
# 'promoted' to be untracked as needed.
tracked = trace_inputs.extract_directories(
root_dir, tracked, default_blacklist)
untracked = trace_inputs.extract_directories(
root_dir, untracked, default_blacklist)
# touched is not compressed, otherwise it would result in files to be archived
# that we don't need.
def fix(f):
"""Bases the file on the most restrictive variable."""
logging.debug('fix(%s)' % f)
# Important, GYP stores the files with / and not \.
f = f.replace(os.path.sep, '/')
# If it's not already a variable.
if not f.startswith('<'):
# relative_cwd is usually the directory containing the gyp file. It may be
# empty if the whole directory containing the gyp file is needed.
f = posix_relpath(f, relative_cwd) or './'
for variable, root_path in variables.iteritems():
if f.startswith(root_path):
f = variable + f[len(root_path):]
break
# Now strips off known files we want to ignore and to any specific mangling
# as necessary. It's easier to do it here than generate a blacklist.
match = EXECUTABLE.match(f)
if match:
return match.group(1) + '<(EXECUTABLE_SUFFIX)'
if LOG_FILE.match(f):
return None
if sys.platform == 'darwin':
# On OSX, the name of the output is dependent on gyp define, it can be
# 'Google Chrome.app' or 'Chromium.app', same for 'XXX
# Framework.framework'. Furthermore, they are versioned with a gyp
# variable. To lower the complexity of the .isolate file, remove all the
# individual entries that show up under any of the 4 entries and replace
# them with the directory itself. Overall, this results in a bit more
# files than strictly necessary.
OSX_BUNDLES = (
'<(PRODUCT_DIR)/Chromium Framework.framework/',
'<(PRODUCT_DIR)/Chromium.app/',
'<(PRODUCT_DIR)/Google Chrome Framework.framework/',
'<(PRODUCT_DIR)/Google Chrome.app/',
)
for prefix in OSX_BUNDLES:
if f.startswith(prefix):
# Note this result in duplicate values, so the a set() must be used to
# remove duplicates.
return prefix
return f
tracked = set(filter(None, (fix(f.path) for f in tracked)))
untracked = set(filter(None, (fix(f.path) for f in untracked)))
touched = set(filter(None, (fix(f.path) for f in touched)))
out = classify_files(root_dir, tracked, untracked)
if touched:
out[KEY_TOUCHED] = sorted(touched)
return out
def generate_isolate(
tracked, untracked, touched, root_dir, variables, relative_cwd):
"""Generates a clean and complete .isolate file."""
result = generate_simplified(
tracked, untracked, touched, root_dir, variables, relative_cwd)
return {
'conditions': [
['OS=="%s"' % get_flavor(), {
'variables': result,
}],
],
}
def split_touched(files):
"""Splits files that are touched vs files that are read."""
tracked = []
touched = []
for f in files:
if f.size:
tracked.append(f)
else:
touched.append(f)
return tracked, touched
def pretty_print(variables, stdout):
"""Outputs a gyp compatible list from the decoded variables.
Similar to pprint.print() but with NIH syndrome.
"""
# Order the dictionary keys by these keys in priority.
ORDER = (
'variables', 'condition', 'command', 'relative_cwd', 'read_only',
KEY_TRACKED, KEY_UNTRACKED)
def sorting_key(x):
"""Gives priority to 'most important' keys before the others."""
if x in ORDER:
return str(ORDER.index(x))
return x
def loop_list(indent, items):
for item in items:
if isinstance(item, basestring):
stdout.write('%s\'%s\',\n' % (indent, item))
elif isinstance(item, dict):
stdout.write('%s{\n' % indent)
loop_dict(indent + ' ', item)
stdout.write('%s},\n' % indent)
elif isinstance(item, list):
# A list inside a list will write the first item embedded.
stdout.write('%s[' % indent)
for index, i in enumerate(item):
if isinstance(i, basestring):
stdout.write(
'\'%s\', ' % i.replace('\\', '\\\\').replace('\'', '\\\''))
elif isinstance(i, dict):
stdout.write('{\n')
loop_dict(indent + ' ', i)
if index != len(item) - 1:
x = ', '
else:
x = ''
stdout.write('%s}%s' % (indent, x))
else:
assert False
stdout.write('],\n')
else:
assert False
def loop_dict(indent, items):
for key in sorted(items, key=sorting_key):
item = items[key]
stdout.write("%s'%s': " % (indent, key))
if isinstance(item, dict):
stdout.write('{\n')
loop_dict(indent + ' ', item)
stdout.write(indent + '},\n')
elif isinstance(item, list):
stdout.write('[\n')
loop_list(indent + ' ', item)
stdout.write(indent + '],\n')
elif isinstance(item, basestring):
stdout.write(
'\'%s\',\n' % item.replace('\\', '\\\\').replace('\'', '\\\''))
elif item in (True, False, None):
stdout.write('%s\n' % item)
else:
assert False, item
stdout.write('{\n')
loop_dict(' ', variables)
stdout.write('}\n')
|
|
"""Task runner"""
import sys
from multiprocessing import Process, Queue as MQueue
from threading import Thread
import pickle
import six
from six.moves import queue, xrange
import cloudpickle
from .exceptions import InvalidTask, CatchedException
from .exceptions import TaskFailed, SetupError, DependencyError, UnmetDependency
from .task import DelayedLoaded
# execution result.
SUCCESS = 0
FAILURE = 1
ERROR = 2
class Runner(object):
"""Task runner
run_all()
run_tasks():
for each task:
select_task()
execute_task()
process_task_result()
finish()
"""
def __init__(self, dep_manager, reporter, continue_=False,
always_execute=False, verbosity=0):
"""
@param dep_manager: DependencyBase
@param reporter: reporter object to be used
@param continue_: (bool) execute all tasks even after a task failure
@param always_execute: (bool) execute even if up-to-date or ignored
@param verbosity: (int) 0,1,2 see Task.execute
"""
self.dep_manager = dep_manager
self.reporter = reporter
self.continue_ = continue_
self.always_execute = always_execute
self.verbosity = verbosity
self.teardown_list = [] # list of tasks to be teardown
self.final_result = SUCCESS # until something fails
self._stop_running = False
def _handle_task_error(self, node, catched_excp):
"""handle all task failures/errors
called whenever there is an error before executing a task or
its execution is not successful.
"""
assert isinstance(catched_excp, CatchedException)
node.run_status = "failure"
self.dep_manager.remove_success(node.task)
self.reporter.add_failure(node.task, catched_excp)
# only return FAILURE if no errors happened.
if isinstance(catched_excp, TaskFailed) and self.final_result != ERROR:
self.final_result = FAILURE
else:
self.final_result = ERROR
if not self.continue_:
self._stop_running = True
def _get_task_args(self, task, tasks_dict):
"""get values from other tasks"""
task.init_options()
def get_value(task_id, key_name):
"""get single value or dict from task's saved values"""
if key_name is None:
return self.dep_manager.get_values(task_id)
return self.dep_manager.get_value(task_id, key_name)
# selected just need to get values from other tasks
for arg, value in six.iteritems(task.getargs):
task_id, key_name = value
if tasks_dict[task_id].has_subtask:
# if a group task, pass values from all sub-tasks
arg_value = {}
base_len = len(task_id) + 1 # length of base name string
for sub_id in tasks_dict[task_id].task_dep:
name = sub_id[base_len:]
arg_value[name] = get_value(sub_id, key_name)
else:
arg_value = get_value(task_id, key_name)
task.options[arg] = arg_value
def select_task(self, node, tasks_dict):
"""Returns bool, task should be executed
* side-effect: set task.options
Tasks should be executed if they are not up-to-date.
Tasks that cointains setup-tasks must be selected twice,
so it gives chance for dependency tasks to be executed after
checking it is not up-to-date.
"""
task = node.task
# if run_status is not None, it was already calculated
if node.run_status is None:
self.reporter.get_status(task)
# check if task should be ignored (user controlled)
if node.ignored_deps or self.dep_manager.status_is_ignore(task):
node.run_status = 'ignore'
self.reporter.skip_ignore(task)
return False
# check task_deps
if node.bad_deps:
bad_str = " ".join(n.task.name for n in node.bad_deps)
self._handle_task_error(node, UnmetDependency(bad_str))
return False
# check if task is up-to-date
res = self.dep_manager.get_status(task, tasks_dict)
if res.status == 'error':
msg = "ERROR: Task '{}' checking dependencies: {}".format(
task.name, res.get_error_message())
self._handle_task_error(node, DependencyError(msg))
return False
# set node.run_status
if self.always_execute:
node.run_status = 'run'
else:
node.run_status = res.status
# if task is up-to-date skip it
if node.run_status == 'up-to-date':
self.reporter.skip_uptodate(task)
task.values = self.dep_manager.get_values(task.name)
return False
if task.setup_tasks:
# dont execute now, execute setup first...
return False
else:
# sanity checks
assert node.run_status == 'run', \
"%s:%s" % (task.name, node.run_status)
assert task.setup_tasks
try:
self._get_task_args(task, tasks_dict)
except Exception as exception:
msg = ("ERROR getting value for argument\n" + str(exception))
self._handle_task_error(node, DependencyError(msg))
return False
return True
def execute_task(self, task):
"""execute task's actions"""
# register cleanup/teardown
if task.teardown:
self.teardown_list.append(task)
# finally execute it!
self.reporter.execute_task(task)
return task.execute(sys.stdout, sys.stderr, self.verbosity)
def process_task_result(self, node, catched_excp):
"""handles result"""
task = node.task
# save execution successful
if catched_excp is None:
node.run_status = "successful"
task.save_extra_values()
self.dep_manager.save_success(task)
self.reporter.add_success(task)
# task error
else:
self._handle_task_error(node, catched_excp)
def run_tasks(self, task_dispatcher):
"""This will actually run/execute the tasks.
It will check file dependencies to decide if task should be executed
and save info on successful runs.
It also deals with output to stdout/stderr.
@param task_dispatcher: L{TaskDispacher}
"""
node = None
while True:
if self._stop_running:
break
try:
node = task_dispatcher.generator.send(node)
except StopIteration:
break
if not self.select_task(node, task_dispatcher.tasks):
continue
catched_excp = self.execute_task(node.task)
self.process_task_result(node, catched_excp)
def teardown(self):
"""run teardown from all tasks"""
for task in reversed(self.teardown_list):
self.reporter.teardown_task(task)
catched = task.execute_teardown(sys.stdout, sys.stderr,
self.verbosity)
if catched:
msg = "ERROR: task '%s' teardown action" % task.name
error = SetupError(msg, catched)
self.reporter.cleanup_error(error)
def finish(self):
"""finish running tasks"""
# flush update dependencies
self.dep_manager.close()
self.teardown()
# report final results
self.reporter.complete_run()
return self.final_result
def run_all(self, task_dispatcher):
"""entry point to run tasks
@ivar task_dispatcher (TaskDispatcher)
"""
try:
if hasattr(self.reporter, 'initialize'):
self.reporter.initialize(task_dispatcher.tasks)
self.run_tasks(task_dispatcher)
except InvalidTask as exception:
self.reporter.runtime_error(str(exception))
self.final_result = ERROR
finally:
self.finish()
return self.final_result
# JobXXX objects send from main process to sub-process for execution
class JobHold(object):
"""Indicates there is no task ready to be executed"""
type = object()
class JobTask(object):
"""Contains a Task object"""
type = object()
def __init__(self, task):
self.name = task.name
try:
self.task_pickle = cloudpickle.dumps(task)
except pickle.PicklingError as excp:
msg = """Error on Task: `{}`.
Task created at execution time that has an attribute than can not be pickled,
so not feasible to be used with multi-processing. To fix this issue make sure
the task is pickable or just do not use multi-processing execution.
Original exception {}: {}
"""
raise InvalidTask(msg.format(self.name, excp.__class__, excp))
class JobTaskPickle(object):
"""dict of Task object excluding attributes that might be unpicklable"""
type = object()
def __init__(self, task):
self.task_dict = task.pickle_safe_dict() # actually a dict to be pickled
@property
def name(self):
return self.task_dict['name']
class MReporter(object):
"""send reported messages to master process
puts a dictionary {'name': <task-name>,
'reporter': <reporter-method-name>}
on runner's 'result_q'
"""
def __init__(self, runner, reporter_cls):
self.runner = runner
self.reporter_cls = reporter_cls
def __getattr__(self, method_name):
"""substitute any reporter method with a dispatching method"""
if not hasattr(self.reporter_cls, method_name):
raise AttributeError(method_name)
def rep_method(task):
self.runner.result_q.put({'name':task.name,
'reporter':method_name})
return rep_method
def complete_run(self):
"""ignore this on MReporter"""
pass
class MRunner(Runner):
"""MultiProcessing Runner """
Queue = staticmethod(MQueue)
Child = staticmethod(Process)
@staticmethod
def available():
"""check if multiprocessing module is available"""
# see: https://bitbucket.org/schettino72/doit/issue/17
# http://bugs.python.org/issue3770
# not available on BSD systens
try:
import multiprocessing.synchronize
multiprocessing # pyflakes
except ImportError: # pragma: no cover
return False
else:
return True
def __init__(self, dep_manager, reporter,
continue_=False, always_execute=False,
verbosity=0, num_process=1):
Runner.__init__(self, dep_manager, reporter, continue_=continue_,
always_execute=always_execute, verbosity=verbosity)
self.num_process = num_process
self.free_proc = 0 # number of free process
self.task_dispatcher = None # TaskDispatcher retrieve tasks
self.tasks = None # dict of task instances by name
self.result_q = None
def __getstate__(self):
# multiprocessing on Windows will try to pickle self.
# These attributes are actually not used by spawend process so
# safe to be removed.
pickle_dict = self.__dict__.copy()
pickle_dict['reporter'] = None
pickle_dict['task_dispatcher'] = None
pickle_dict['dep_manager'] = None
return pickle_dict
def get_next_job(self, completed):
"""get next task to be dispatched to sub-process
On MP needs to check if the dependencies finished its execution
@returns : - None -> no more tasks to be executed
- JobXXX
"""
if self._stop_running:
return None # gentle stop
node = completed
while True:
# get next task from controller
try:
node = self.task_dispatcher.generator.send(node)
if node == "hold on":
self.free_proc += 1
return JobHold()
# no more tasks from controller...
except StopIteration:
# ... terminate one sub process if no other task waiting
return None
# send a task to be executed
if self.select_task(node, self.tasks):
# If sub-process already contains the Task object send
# only safe pickle data, otherwise send whole object.
task = node.task
if task.loader is DelayedLoaded and self.Child == Process:
return JobTask(task)
else:
return JobTaskPickle(task)
def _run_tasks_init(self, task_dispatcher):
"""initialization for run_tasks"""
self.task_dispatcher = task_dispatcher
self.tasks = task_dispatcher.tasks
def _run_start_processes(self, job_q, result_q):
"""create and start sub-processes
@param job_q: (multiprocessing.Queue) tasks to be executed
@param result_q: (multiprocessing.Queue) collect task results
@return list of Process
"""
# #### DEBUG PICKLE ERRORS
# # Python3 uses C implementation of pickle
# if six.PY2:
# Pickler = pickle.Pickler
# else: # pragma no cover
# Pickler = pickle._Pickler
# class MyPickler (Pickler):
# def save(self, obj):
# print('pickling object {} of type {}'.format(obj, type(obj)))
# try:
# Pickler.save(self, obj)
# except:
# print('error. skipping...')
# from six import BytesIO
# pickler = MyPickler(BytesIO())
# pickler.dump(self)
# ### END DEBUG
proc_list = []
for _ in xrange(self.num_process):
next_job = self.get_next_job(None)
if next_job is None:
break # do not start more processes than tasks
job_q.put(next_job)
process = self.Child(
target=self.execute_task_subprocess,
args=(job_q, result_q, self.reporter.__class__))
process.start()
proc_list.append(process)
return proc_list
def _process_result(self, node, task, result):
"""process result received from sub-process"""
if 'failure' in result:
catched_excp = result['failure']
else:
# success set values taken from subprocess result
catched_excp = None
task.update_from_pickle(result['task'])
for action, output in zip(task.actions, result['out']):
action.out = output
for action, output in zip(task.actions, result['err']):
action.err = output
self.process_task_result(node, catched_excp)
def run_tasks(self, task_dispatcher):
"""controls subprocesses task dispatching and result collection
"""
# result queue - result collected from sub-processes
result_q = self.Queue()
# task queue - tasks ready to be dispatched to sub-processes
job_q = self.Queue()
self._run_tasks_init(task_dispatcher)
proc_list = self._run_start_processes(job_q, result_q)
# wait for all processes terminate
proc_count = len(proc_list)
try:
while proc_count:
# wait until there is a result to be consumed
result = result_q.get()
if 'exit' in result:
raise result['exit'](result['exception'])
node = task_dispatcher.nodes[result['name']]
task = node.task
if 'reporter' in result:
getattr(self.reporter, result['reporter'])(task)
continue
self._process_result(node, task, result)
# update num free process
free_proc = self.free_proc + 1
self.free_proc = 0
# tries to get as many tasks as free process
completed = node
for _ in range(free_proc):
next_job = self.get_next_job(completed)
completed = None
if next_job is None:
proc_count -= 1
job_q.put(next_job)
# check for cyclic dependencies
assert len(proc_list) > self.free_proc
except (SystemExit, KeyboardInterrupt, Exception):
if self.Child == Process:
for proc in proc_list:
proc.terminate()
raise
# we are done, join all process
for proc in proc_list:
proc.join()
# get teardown results
while not result_q.empty(): # safe because subprocess joined
result = result_q.get()
assert 'reporter' in result
task = task_dispatcher.tasks[result['name']]
getattr(self.reporter, result['reporter'])(task)
def execute_task_subprocess(self, job_q, result_q, reporter_class):
"""executed on child processes
@param job_q: task queue,
* None elements indicate process can terminate
* JobHold indicate process should wait for next task
* JobTask / JobTaskPickle task to be executed
"""
self.result_q = result_q
if self.Child == Process:
self.reporter = MReporter(self, reporter_class)
try:
while True:
job = job_q.get()
if job is None:
self.teardown()
return # no more tasks to execute finish this process
# job is an incomplete Task obj when pickled, attrbiutes
# that might contain unpickleble data were removed.
# so we need to get task from this process and update it
# to get dynamic task attributes.
if job.type is JobTaskPickle.type:
task = self.tasks[job.name]
if self.Child == Process: # pragma: no cover ...
# ... actually covered but subprocess doesnt get it.
task.update_from_pickle(job.task_dict)
elif job.type is JobTask.type:
task = pickle.loads(job.task_pickle)
# do nothing. this is used to start the subprocess even
# if no task is available when process is created.
else:
assert job.type is JobHold.type
continue # pragma: no cover
result = {'name': task.name}
t_result = self.execute_task(task)
if t_result is None:
result['task'] = task.pickle_safe_dict()
result['out'] = [a.out for a in task.actions]
result['err'] = [a.err for a in task.actions]
else:
result['failure'] = t_result
result_q.put(result)
except (SystemExit, KeyboardInterrupt, Exception) as exception:
# error, blow-up everything. send exception info to master process
result_q.put({
'exit': exception.__class__,
'exception': str(exception)})
class MThreadRunner(MRunner):
"""Parallel runner using threads"""
Queue = staticmethod(queue.Queue)
class DaemonThread(Thread):
"""daemon thread to make sure process is terminated if there is
an uncatch exception and threads are not correctly joined.
"""
def __init__(self, *args, **kwargs):
Thread.__init__(self, *args, **kwargs)
self.daemon = True
Child = staticmethod(DaemonThread)
@staticmethod
def available():
return True
|
|
"""Support for Tibber sensors."""
import asyncio
from datetime import datetime, timedelta
import logging
from random import randrange
import aiohttp
from homeassistant.components.sensor import (
DEVICE_CLASS_CURRENT,
DEVICE_CLASS_ENERGY,
DEVICE_CLASS_POWER,
DEVICE_CLASS_SIGNAL_STRENGTH,
DEVICE_CLASS_VOLTAGE,
STATE_CLASS_MEASUREMENT,
SensorEntity,
)
from homeassistant.const import (
ELECTRICAL_CURRENT_AMPERE,
ENERGY_KILO_WATT_HOUR,
POWER_WATT,
SIGNAL_STRENGTH_DECIBELS,
VOLT,
)
from homeassistant.core import callback
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers.device_registry import async_get as async_get_dev_reg
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity_registry import async_get as async_get_entity_reg
from homeassistant.util import Throttle, dt as dt_util
from .const import DOMAIN as TIBBER_DOMAIN, MANUFACTURER
_LOGGER = logging.getLogger(__name__)
ICON = "mdi:currency-usd"
SCAN_INTERVAL = timedelta(minutes=1)
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=5)
PARALLEL_UPDATES = 0
SIGNAL_UPDATE_ENTITY = "tibber_rt_update_{}"
RT_SENSOR_MAP = {
"averagePower": ["average power", DEVICE_CLASS_POWER, POWER_WATT, None],
"power": ["power", DEVICE_CLASS_POWER, POWER_WATT, None],
"minPower": ["min power", DEVICE_CLASS_POWER, POWER_WATT, None],
"maxPower": ["max power", DEVICE_CLASS_POWER, POWER_WATT, None],
"accumulatedConsumption": [
"accumulated consumption",
DEVICE_CLASS_ENERGY,
ENERGY_KILO_WATT_HOUR,
STATE_CLASS_MEASUREMENT,
],
"accumulatedConsumptionLastHour": [
"accumulated consumption current hour",
DEVICE_CLASS_ENERGY,
ENERGY_KILO_WATT_HOUR,
STATE_CLASS_MEASUREMENT,
],
"accumulatedProduction": [
"accumulated production",
DEVICE_CLASS_ENERGY,
ENERGY_KILO_WATT_HOUR,
STATE_CLASS_MEASUREMENT,
],
"accumulatedProductionLastHour": [
"accumulated production current hour",
DEVICE_CLASS_ENERGY,
ENERGY_KILO_WATT_HOUR,
STATE_CLASS_MEASUREMENT,
],
"lastMeterConsumption": [
"last meter consumption",
DEVICE_CLASS_ENERGY,
ENERGY_KILO_WATT_HOUR,
STATE_CLASS_MEASUREMENT,
],
"lastMeterProduction": [
"last meter production",
DEVICE_CLASS_ENERGY,
ENERGY_KILO_WATT_HOUR,
STATE_CLASS_MEASUREMENT,
],
"voltagePhase1": [
"voltage phase1",
DEVICE_CLASS_VOLTAGE,
VOLT,
STATE_CLASS_MEASUREMENT,
],
"voltagePhase2": [
"voltage phase2",
DEVICE_CLASS_VOLTAGE,
VOLT,
STATE_CLASS_MEASUREMENT,
],
"voltagePhase3": [
"voltage phase3",
DEVICE_CLASS_VOLTAGE,
VOLT,
STATE_CLASS_MEASUREMENT,
],
"currentL1": [
"current L1",
DEVICE_CLASS_CURRENT,
ELECTRICAL_CURRENT_AMPERE,
STATE_CLASS_MEASUREMENT,
],
"currentL2": [
"current L2",
DEVICE_CLASS_CURRENT,
ELECTRICAL_CURRENT_AMPERE,
STATE_CLASS_MEASUREMENT,
],
"currentL3": [
"current L3",
DEVICE_CLASS_CURRENT,
ELECTRICAL_CURRENT_AMPERE,
STATE_CLASS_MEASUREMENT,
],
"signalStrength": [
"signal strength",
DEVICE_CLASS_SIGNAL_STRENGTH,
SIGNAL_STRENGTH_DECIBELS,
STATE_CLASS_MEASUREMENT,
],
"accumulatedCost": ["accumulated cost", None, None, STATE_CLASS_MEASUREMENT],
}
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up the Tibber sensor."""
tibber_connection = hass.data.get(TIBBER_DOMAIN)
entity_registry = async_get_entity_reg(hass)
device_registry = async_get_dev_reg(hass)
entities = []
for home in tibber_connection.get_homes(only_active=False):
try:
await home.update_info()
except asyncio.TimeoutError as err:
_LOGGER.error("Timeout connecting to Tibber home: %s ", err)
raise PlatformNotReady() from err
except aiohttp.ClientError as err:
_LOGGER.error("Error connecting to Tibber home: %s ", err)
raise PlatformNotReady() from err
if home.has_active_subscription:
entities.append(TibberSensorElPrice(home))
if home.has_real_time_consumption:
await home.rt_subscribe(
TibberRtDataHandler(async_add_entities, home, hass).async_callback
)
# migrate
old_id = home.info["viewer"]["home"]["meteringPointData"]["consumptionEan"]
if old_id is None:
continue
# migrate to new device ids
old_entity_id = entity_registry.async_get_entity_id(
"sensor", TIBBER_DOMAIN, old_id
)
if old_entity_id is not None:
entity_registry.async_update_entity(
old_entity_id, new_unique_id=home.home_id
)
# migrate to new device ids
device_entry = device_registry.async_get_device({(TIBBER_DOMAIN, old_id)})
if device_entry and entry.entry_id in device_entry.config_entries:
device_registry.async_update_device(
device_entry.id, new_identifiers={(TIBBER_DOMAIN, home.home_id)}
)
async_add_entities(entities, True)
class TibberSensor(SensorEntity):
"""Representation of a generic Tibber sensor."""
def __init__(self, tibber_home):
"""Initialize the sensor."""
self._tibber_home = tibber_home
self._home_name = tibber_home.info["viewer"]["home"]["appNickname"]
if self._home_name is None:
self._home_name = tibber_home.info["viewer"]["home"]["address"].get(
"address1", ""
)
self._model = None
@property
def device_id(self):
"""Return the ID of the physical device this sensor is part of."""
return self._tibber_home.home_id
@property
def device_info(self):
"""Return the device_info of the device."""
device_info = {
"identifiers": {(TIBBER_DOMAIN, self.device_id)},
"name": self.name,
"manufacturer": MANUFACTURER,
}
if self._model is not None:
device_info["model"] = self._model
return device_info
class TibberSensorElPrice(TibberSensor):
"""Representation of a Tibber sensor for el price."""
def __init__(self, tibber_home):
"""Initialize the sensor."""
super().__init__(tibber_home)
self._last_updated = None
self._spread_load_constant = randrange(5000)
self._attr_available = False
self._attr_extra_state_attributes = {
"app_nickname": None,
"grid_company": None,
"estimated_annual_consumption": None,
"price_level": None,
"max_price": None,
"avg_price": None,
"min_price": None,
"off_peak_1": None,
"peak": None,
"off_peak_2": None,
}
self._attr_icon = ICON
self._attr_name = f"Electricity price {self._home_name}"
self._attr_unique_id = f"{self._tibber_home.home_id}"
self._model = "Price Sensor"
async def async_update(self):
"""Get the latest data and updates the states."""
now = dt_util.now()
if (
not self._tibber_home.last_data_timestamp
or (self._tibber_home.last_data_timestamp - now).total_seconds()
< 5 * 3600 + self._spread_load_constant
or not self.available
):
_LOGGER.debug("Asking for new data")
await self._fetch_data()
elif (
self._tibber_home.current_price_total
and self._last_updated
and self._last_updated.hour == now.hour
and self._tibber_home.last_data_timestamp
):
return
res = self._tibber_home.current_price_data()
self._attr_state, price_level, self._last_updated = res
self._attr_extra_state_attributes["price_level"] = price_level
attrs = self._tibber_home.current_attributes()
self._attr_extra_state_attributes.update(attrs)
self._attr_available = self._attr_state is not None
self._attr_unit_of_measurement = self._tibber_home.price_unit
@Throttle(MIN_TIME_BETWEEN_UPDATES)
async def _fetch_data(self):
_LOGGER.debug("Fetching data")
try:
await self._tibber_home.update_info_and_price_info()
except (asyncio.TimeoutError, aiohttp.ClientError):
return
data = self._tibber_home.info["viewer"]["home"]
self._attr_extra_state_attributes["app_nickname"] = data["appNickname"]
self._attr_extra_state_attributes["grid_company"] = data["meteringPointData"][
"gridCompany"
]
self._attr_extra_state_attributes["estimated_annual_consumption"] = data[
"meteringPointData"
]["estimatedAnnualConsumption"]
class TibberSensorRT(TibberSensor):
"""Representation of a Tibber sensor for real time consumption."""
_attr_should_poll = False
def __init__(
self, tibber_home, sensor_name, device_class, unit, initial_state, state_class
):
"""Initialize the sensor."""
super().__init__(tibber_home)
self._sensor_name = sensor_name
self._model = "Tibber Pulse"
self._attr_device_class = device_class
self._attr_name = f"{self._sensor_name} {self._home_name}"
self._attr_state = initial_state
self._attr_unique_id = f"{self._tibber_home.home_id}_rt_{self._sensor_name}"
self._attr_unit_of_measurement = unit
self._attr_state_class = state_class
if sensor_name in [
"last meter consumption",
"last meter production",
]:
self._attr_last_reset = datetime.fromtimestamp(0)
elif self._sensor_name in [
"accumulated consumption",
"accumulated production",
"accumulated cost",
]:
self._attr_last_reset = dt_util.as_utc(
dt_util.now().replace(hour=0, minute=0, second=0, microsecond=0)
)
elif self._sensor_name in [
"accumulated consumption current hour",
"accumulated production current hour",
]:
self._attr_last_reset = dt_util.as_utc(
dt_util.now().replace(minute=0, second=0, microsecond=0)
)
else:
self._attr_last_reset = None
async def async_added_to_hass(self):
"""Start listen for real time data."""
self.async_on_remove(
async_dispatcher_connect(
self.hass,
SIGNAL_UPDATE_ENTITY.format(self._sensor_name),
self._set_state,
)
)
@property
def available(self):
"""Return True if entity is available."""
return self._tibber_home.rt_subscription_running
@callback
def _set_state(self, state, timestamp):
"""Set sensor state."""
if state < self._attr_state and self._sensor_name in [
"accumulated consumption",
"accumulated production",
"accumulated cost",
]:
self._attr_last_reset = dt_util.as_utc(
timestamp.replace(hour=0, minute=0, second=0, microsecond=0)
)
if state < self._attr_state and self._sensor_name in [
"accumulated consumption current hour",
"accumulated production current hour",
]:
self._attr_last_reset = dt_util.as_utc(
timestamp.replace(minute=0, second=0, microsecond=0)
)
self._attr_state = state
self.async_write_ha_state()
class TibberRtDataHandler:
"""Handle Tibber realtime data."""
def __init__(self, async_add_entities, tibber_home, hass):
"""Initialize the data handler."""
self._async_add_entities = async_add_entities
self._tibber_home = tibber_home
self.hass = hass
self._entities = set()
async def async_callback(self, payload):
"""Handle received data."""
errors = payload.get("errors")
if errors:
_LOGGER.error(errors[0])
return
data = payload.get("data")
if data is None:
return
live_measurement = data.get("liveMeasurement")
if live_measurement is None:
return
timestamp = dt_util.parse_datetime(live_measurement.pop("timestamp"))
new_entities = []
for sensor_type, state in live_measurement.items():
if state is None or sensor_type not in RT_SENSOR_MAP:
continue
if sensor_type in self._entities:
async_dispatcher_send(
self.hass,
SIGNAL_UPDATE_ENTITY.format(RT_SENSOR_MAP[sensor_type][0]),
state,
timestamp,
)
else:
sensor_name, device_class, unit, state_class = RT_SENSOR_MAP[
sensor_type
]
if sensor_type == "accumulatedCost":
unit = self._tibber_home.currency
entity = TibberSensorRT(
self._tibber_home,
sensor_name,
device_class,
unit,
state,
state_class,
)
new_entities.append(entity)
self._entities.add(sensor_type)
if new_entities:
self._async_add_entities(new_entities)
|
|
#! /usr/bin/env python
#
# See README for usage instructions.
import glob
import os
import subprocess
import sys
import platform
# We must use setuptools, not distutils, because we need to use the
# namespace_packages option for the "google" package.
from setuptools import setup, Extension, find_packages
from distutils.command.clean import clean as _clean
if sys.version_info[0] == 3:
# Python 3
from distutils.command.build_py import build_py_2to3 as _build_py
else:
# Python 2
from distutils.command.build_py import build_py as _build_py
from distutils.spawn import find_executable
# Find the Protocol Compiler.
if 'PROTOC' in os.environ and os.path.exists(os.environ['PROTOC']):
protoc = os.environ['PROTOC']
elif os.path.exists("../src/protoc"):
protoc = "../src/protoc"
elif os.path.exists("../src/protoc.exe"):
protoc = "../src/protoc.exe"
elif os.path.exists("../vsprojects/Debug/protoc.exe"):
protoc = "../vsprojects/Debug/protoc.exe"
elif os.path.exists("../vsprojects/Release/protoc.exe"):
protoc = "../vsprojects/Release/protoc.exe"
else:
protoc = find_executable("protoc")
def GetVersion():
"""Gets the version from google/protobuf/__init__.py
Do not import google.protobuf.__init__ directly, because an installed
protobuf library may be loaded instead."""
with open(os.path.join('google', 'protobuf', '__init__.py')) as version_file:
exec(version_file.read(), globals())
return __version__
def generate_proto(source, require = True):
"""Invokes the Protocol Compiler to generate a _pb2.py from the given
.proto file. Does nothing if the output already exists and is newer than
the input."""
if not require and not os.path.exists(source):
return
output = source.replace(".proto", "_pb2.py").replace("../src/", "")
if (not os.path.exists(output) or
(os.path.exists(source) and
os.path.getmtime(source) > os.path.getmtime(output))):
print("Generating %s..." % output)
if not os.path.exists(source):
sys.stderr.write("Can't find required file: %s\n" % source)
sys.exit(-1)
if protoc is None:
sys.stderr.write(
"protoc is not installed nor found in ../src. Please compile it "
"or install the binary package.\n")
sys.exit(-1)
protoc_command = [ protoc, "-I../src", "-I.", "--python_out=.", source ]
if subprocess.call(protoc_command) != 0:
sys.exit(-1)
def GenerateUnittestProtos():
generate_proto("../src/google/protobuf/any_test.proto", False)
generate_proto("../src/google/protobuf/map_proto2_unittest.proto", False)
generate_proto("../src/google/protobuf/map_unittest.proto", False)
generate_proto("../src/google/protobuf/test_messages_proto3.proto", False)
generate_proto("../src/google/protobuf/test_messages_proto2.proto", False)
generate_proto("../src/google/protobuf/unittest_arena.proto", False)
generate_proto("../src/google/protobuf/unittest_no_arena.proto", False)
generate_proto("../src/google/protobuf/unittest_no_arena_import.proto", False)
generate_proto("../src/google/protobuf/unittest.proto", False)
generate_proto("../src/google/protobuf/unittest_custom_options.proto", False)
generate_proto("../src/google/protobuf/unittest_import.proto", False)
generate_proto("../src/google/protobuf/unittest_import_public.proto", False)
generate_proto("../src/google/protobuf/unittest_mset.proto", False)
generate_proto("../src/google/protobuf/unittest_mset_wire_format.proto", False)
generate_proto("../src/google/protobuf/unittest_no_generic_services.proto", False)
generate_proto("../src/google/protobuf/unittest_proto3_arena.proto", False)
generate_proto("../src/google/protobuf/util/json_format_proto3.proto", False)
generate_proto("google/protobuf/internal/any_test.proto", False)
generate_proto("google/protobuf/internal/descriptor_pool_test1.proto", False)
generate_proto("google/protobuf/internal/descriptor_pool_test2.proto", False)
generate_proto("google/protobuf/internal/factory_test1.proto", False)
generate_proto("google/protobuf/internal/factory_test2.proto", False)
generate_proto("google/protobuf/internal/file_options_test.proto", False)
generate_proto("google/protobuf/internal/import_test_package/inner.proto", False)
generate_proto("google/protobuf/internal/import_test_package/outer.proto", False)
generate_proto("google/protobuf/internal/missing_enum_values.proto", False)
generate_proto("google/protobuf/internal/message_set_extensions.proto", False)
generate_proto("google/protobuf/internal/more_extensions.proto", False)
generate_proto("google/protobuf/internal/more_extensions_dynamic.proto", False)
generate_proto("google/protobuf/internal/more_messages.proto", False)
generate_proto("google/protobuf/internal/no_package.proto", False)
generate_proto("google/protobuf/internal/packed_field_test.proto", False)
generate_proto("google/protobuf/internal/test_bad_identifiers.proto", False)
generate_proto("google/protobuf/pyext/python.proto", False)
class clean(_clean):
def run(self):
# Delete generated files in the code tree.
for (dirpath, dirnames, filenames) in os.walk("."):
for filename in filenames:
filepath = os.path.join(dirpath, filename)
if filepath.endswith("_pb2.py") or filepath.endswith(".pyc") or \
filepath.endswith(".so") or filepath.endswith(".o"):
os.remove(filepath)
# _clean is an old-style class, so super() doesn't work.
_clean.run(self)
class build_py(_build_py):
def run(self):
# Generate necessary .proto file if it doesn't exist.
generate_proto("../src/google/protobuf/descriptor.proto")
generate_proto("../src/google/protobuf/compiler/plugin.proto")
generate_proto("../src/google/protobuf/any.proto")
generate_proto("../src/google/protobuf/api.proto")
generate_proto("../src/google/protobuf/duration.proto")
generate_proto("../src/google/protobuf/empty.proto")
generate_proto("../src/google/protobuf/field_mask.proto")
generate_proto("../src/google/protobuf/source_context.proto")
generate_proto("../src/google/protobuf/struct.proto")
generate_proto("../src/google/protobuf/timestamp.proto")
generate_proto("../src/google/protobuf/type.proto")
generate_proto("../src/google/protobuf/wrappers.proto")
GenerateUnittestProtos()
# _build_py is an old-style class, so super() doesn't work.
_build_py.run(self)
class test_conformance(_build_py):
target = 'test_python'
def run(self):
if sys.version_info >= (2, 7):
# Python 2.6 dodges these extra failures.
os.environ["CONFORMANCE_PYTHON_EXTRA_FAILURES"] = (
"--failure_list failure_list_python-post26.txt")
cmd = 'cd ../conformance && make %s' % (test_conformance.target)
status = subprocess.check_call(cmd, shell=True)
def get_option_from_sys_argv(option_str):
if option_str in sys.argv:
sys.argv.remove(option_str)
return True
return False
if __name__ == '__main__':
ext_module_list = []
warnings_as_errors = '--warnings_as_errors'
if get_option_from_sys_argv('--cpp_implementation'):
# Link libprotobuf.a and libprotobuf-lite.a statically with the
# extension. Note that those libraries have to be compiled with
# -fPIC for this to work.
compile_static_ext = get_option_from_sys_argv('--compile_static_extension')
libraries = ['protobuf']
extra_objects = None
if compile_static_ext:
libraries = None
extra_objects = ['../src/.libs/libprotobuf.a',
'../src/.libs/libprotobuf-lite.a']
test_conformance.target = 'test_python_cpp'
extra_compile_args = []
if sys.platform != 'win32':
extra_compile_args.append('-Wno-write-strings')
extra_compile_args.append('-Wno-invalid-offsetof')
extra_compile_args.append('-Wno-sign-compare')
# https://github.com/Theano/Theano/issues/4926
if sys.platform == 'win32':
extra_compile_args.append('-D_hypot=hypot')
# https://github.com/tpaviot/pythonocc-core/issues/48
if sys.platform == 'win32' and '64 bit' in sys.version:
extra_compile_args.append('-DMS_WIN64')
# MSVS default is dymanic
if (sys.platform == 'win32'):
extra_compile_args.append('/MT')
if "clang" in os.popen('$CC --version 2> /dev/null').read():
extra_compile_args.append('-Wno-shorten-64-to-32')
v, _, _ = platform.mac_ver()
if v:
extra_compile_args.append('-std=c++11')
elif os.getenv('KOKORO_BUILD_NUMBER') or os.getenv('KOKORO_BUILD_ID'):
extra_compile_args.append('-std=c++11')
if warnings_as_errors in sys.argv:
extra_compile_args.append('-Werror')
sys.argv.remove(warnings_as_errors)
# C++ implementation extension
ext_module_list.extend([
Extension(
"google.protobuf.pyext._message",
glob.glob('google/protobuf/pyext/*.cc'),
include_dirs=[".", "../src"],
libraries=libraries,
extra_objects=extra_objects,
library_dirs=['../src/.libs'],
extra_compile_args=extra_compile_args,
),
Extension(
"google.protobuf.internal._api_implementation",
glob.glob('google/protobuf/internal/api_implementation.cc'),
extra_compile_args=extra_compile_args + ['-DPYTHON_PROTO2_CPP_IMPL_V2'],
),
])
os.environ['PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION'] = 'cpp'
# Keep this list of dependencies in sync with tox.ini.
install_requires = ['six>=1.9', 'setuptools']
if sys.version_info <= (2,7):
install_requires.append('ordereddict')
install_requires.append('unittest2')
setup(
name='protobuf',
version=GetVersion(),
description='Protocol Buffers',
download_url='https://github.com/google/protobuf/releases',
long_description="Protocol Buffers are Google's data interchange format",
url='https://developers.google.com/protocol-buffers/',
maintainer='[email protected]',
maintainer_email='[email protected]',
license='3-Clause BSD License',
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
],
namespace_packages=['google'],
packages=find_packages(
exclude=[
'import_test_package',
],
),
test_suite='google.protobuf.internal',
cmdclass={
'clean': clean,
'build_py': build_py,
'test_conformance': test_conformance,
},
install_requires=install_requires,
ext_modules=ext_module_list,
)
|
|
#!/usr/bin/env python3
import os
import pymssql
import pymysql.cursors
from contextlib import contextmanager
from enum import Enum
REDCAP_VERSION = '7.2.2'
REDCAP_PATH = 'redcap/redcap_v{}/'.format(REDCAP_VERSION)
REDCAP_UOL_PATH = 'redcap_v{}/'.format(REDCAP_VERSION)
REDCAP_INTERNAL_URL = 'https://briccs.xuhl-tr.nhs.uk/{}'.format(REDCAP_PATH)
REDCAP_EXTERNAL_URL = 'https://uhlbriccsext01.xuhl-tr.nhs.uk/{}'.format(
REDCAP_PATH)
REDCAP_UOL_CRF_URL = 'https://crf.lcbru.le.ac.uk/{}'.format(
REDCAP_UOL_PATH)
REDCAP_UOL_SURVEY_URL = 'https://redcap.lcbru.le.ac.uk/{}'.format(
REDCAP_UOL_PATH)
REDCAP_INTERNAL_DB = 'STG_redcap'
REDCAP_EXTERNAL_DB = 'STG_redcap_briccsext'
REDCAP_UOL_DB = 'redcap'
REDCAP_UOL_SURVEY_DB = 'redcap6170'
OS_UOL_DB = 'catissueplus30'
OPENSPECIMEN_URL = 'https://catissue-live.lcbru.le.ac.uk/openspecimen/'
def get_redcap_link(link_text, project_id, record):
REDCAP_RECORD_URL = (
'[{}]({}/DataEntry/record_home.php'
'?pid={}&id={})')
return (REDCAP_RECORD_URL.format(
link_text,
REDCAP_INTERNAL_URL,
project_id,
record))
def get_redcap_external_link(link_text, project_id, record):
REDCAP_RECORD_URL = (
'[{}]({}/DataEntry/record_home.php'
'?pid={}&id={})')
return (REDCAP_RECORD_URL.format(
link_text,
REDCAP_EXTERNAL_URL,
project_id,
record))
def get_redcap_uol_crf_link(link_text, project_id, record):
REDCAP_RECORD_URL = (
'[{}]({}/DataEntry/record_home.php'
'?pid={}&id={})')
return (REDCAP_RECORD_URL.format(
link_text,
REDCAP_UOL_CRF_URL,
project_id,
record))
def get_redcap_uol_survey_link(link_text, project_id, record):
REDCAP_RECORD_URL = (
'[{}]({}/DataEntry/record_home.php'
'?pid={}&id={})')
return (REDCAP_RECORD_URL.format(
link_text,
REDCAP_UOL_SURVEY_URL,
project_id,
record))
class RedcapInstance(Enum):
@staticmethod
def internal():
return {
'staging_database': REDCAP_INTERNAL_DB + '.dbo',
'link_generator': get_redcap_link,
'base_url': REDCAP_INTERNAL_URL,
'connection': DatabaseConnection.redcap_internal,
}
@staticmethod
def external():
return {
'staging_database': REDCAP_EXTERNAL_DB + '.dbo',
'link_generator': get_redcap_external_link,
'base_url': REDCAP_EXTERNAL_URL,
'connection': DatabaseConnection.redcap_external,
}
@staticmethod
def uol_lamp():
return {
'staging_database': 'STG_uol_crf_redcap.dbo',
'link_generator': get_redcap_uol_crf_link,
'base_url': REDCAP_INTERNAL_URL,
'connection': DatabaseConnection.uol_lamp,
}
@staticmethod
def uol_survey():
return {
'staging_database': 'STG_redcap_Survey.dbo',
'link_generator': get_redcap_uol_survey_link,
'base_url': REDCAP_INTERNAL_URL,
'connection': DatabaseConnection.uol_survey,
}
def get_openspecimen_link(
link_text,
collection_protocol_id,
collection_protocol_reg_id,
):
OS_PARTICIPANT_URL = (
'[{}]({}#/cp-view/{}/participants/{}/detail/overview)'
)
return (OS_PARTICIPANT_URL.format(
link_text,
OPENSPECIMEN_URL,
collection_protocol_id,
collection_protocol_reg_id))
class OpenSpecimenInstance(Enum):
@staticmethod
def live():
return {
'link_generator': get_openspecimen_link,
'connection': DatabaseConnection.uol_os,
}
class DatabaseConnection(Enum):
@staticmethod
@contextmanager
def reporting():
conn = pymssql.connect(
host=os.environ["SQL_REPORTING_HOST"],
user=os.environ["SQL_REPORTING_USER"],
password=os.environ["SQL_REPORTING_PASSWORD"],
database=os.environ["SQL_REPORTING_DATABASE"],
)
try:
with conn.cursor(as_dict=True) as cursor:
yield cursor
finally:
conn.close()
@staticmethod
@contextmanager
def redcap_internal():
conn = pymssql.connect(
host=os.environ["SQL_REPORTING_HOST"],
user=os.environ["SQL_REPORTING_USER"],
password=os.environ["SQL_REPORTING_PASSWORD"],
database=REDCAP_INTERNAL_DB,
)
try:
with conn.cursor(as_dict=True) as cursor:
yield cursor
finally:
conn.close()
@staticmethod
@contextmanager
def redcap_external():
conn = pymssql.connect(
host=os.environ["SQL_REPORTING_HOST"],
user=os.environ["SQL_REPORTING_USER"],
password=os.environ["SQL_REPORTING_PASSWORD"],
database=REDCAP_EXTERNAL_DB,
)
try:
with conn.cursor(as_dict=True) as cursor:
yield cursor
finally:
conn.close()
@staticmethod
@contextmanager
def dwbriccs():
conn = pymssql.connect(
host=os.environ["SQL_DWBRICCS_HOST"],
user=os.environ["SQL_DWBRICCS_USER"],
password=os.environ["SQL_DWBRICCS_PASSWORD"],
database=os.environ["SQL_DWBRICCS_DATABASE"],
)
try:
with conn.cursor(as_dict=True) as cursor:
yield cursor
finally:
conn.close()
@staticmethod
@contextmanager
def uol_lamp():
conn = pymysql.connect(
host=os.environ["SQL_REPORTING_HOST"],
port=int(os.environ.get("SQL_REPORTING_PORT", '3306')),
user=os.environ["SQL_REPORTING_USER"],
database=REDCAP_UOL_DB,
password=os.environ["SQL_REPORTING_PASSWORD"],
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor,
)
try:
with conn.cursor() as cursor:
yield cursor
finally:
conn.close()
@staticmethod
@contextmanager
def uol_survey():
conn = pymysql.connect(
host=os.environ["SQL_REPORTING_HOST"],
port=int(os.environ.get("SQL_REPORTING_PORT", '3306')),
user=os.environ["SQL_REPORTING_USER"],
database=REDCAP_UOL_SURVEY_DB,
password=os.environ["SQL_REPORTING_PASSWORD"],
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor,
)
try:
with conn.cursor() as cursor:
yield cursor
finally:
conn.close()
@staticmethod
@contextmanager
def uol_os():
conn = pymysql.connect(
host=os.environ["SQL_REPORTING_HOST"],
port=int(os.environ.get("SQL_REPORTING_PORT", '3306')),
user=os.environ["SQL_REPORTING_USER"],
database=OS_UOL_DB,
password=os.environ["SQL_REPORTING_PASSWORD"],
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor,
)
try:
with conn.cursor() as cursor:
yield cursor
finally:
conn.close()
|
|
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An OAuth 2.0 client.
Tools for interacting with OAuth 2.0 protected resources.
"""
__author__ = '[email protected] (Joe Gregorio)'
import base64
import clientsecrets
import copy
import datetime
import httplib2
import logging
import os
import sys
import time
import urllib
import urlparse
from oauth2client import util
from oauth2client.anyjson import simplejson
HAS_OPENSSL = False
try:
from oauth2client.crypt import Signer
from oauth2client.crypt import make_signed_jwt
from oauth2client.crypt import verify_signed_jwt_with_certs
HAS_OPENSSL = True
except ImportError:
pass
try:
from urlparse import parse_qsl
except ImportError:
from cgi import parse_qsl
logger = logging.getLogger(__name__)
# Expiry is stored in RFC3339 UTC format
EXPIRY_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
# Which certs to use to validate id_tokens received.
ID_TOKEN_VERIFICATON_CERTS = 'https://www.googleapis.com/oauth2/v1/certs'
# Constant to use for the out of band OAuth 2.0 flow.
OOB_CALLBACK_URN = 'urn:ietf:wg:oauth:2.0:oob'
class Error(Exception):
"""Base error for this module."""
pass
class FlowExchangeError(Error):
"""Error trying to exchange an authorization grant for an access token."""
pass
class AccessTokenRefreshError(Error):
"""Error trying to refresh an expired access token."""
pass
class UnknownClientSecretsFlowError(Error):
"""The client secrets file called for an unknown type of OAuth 2.0 flow. """
pass
class AccessTokenCredentialsError(Error):
"""Having only the access_token means no refresh is possible."""
pass
class VerifyJwtTokenError(Error):
"""Could on retrieve certificates for validation."""
pass
def _abstract():
raise NotImplementedError('You need to override this function')
class MemoryCache(object):
"""httplib2 Cache implementation which only caches locally."""
def __init__(self):
self.cache = {}
def get(self, key):
return self.cache.get(key)
def set(self, key, value):
self.cache[key] = value
def delete(self, key):
self.cache.pop(key, None)
class Credentials(object):
"""Base class for all Credentials objects.
Subclasses must define an authorize() method that applies the credentials to
an HTTP transport.
Subclasses must also specify a classmethod named 'from_json' that takes a JSON
string as input and returns an instaniated Credentials object.
"""
NON_SERIALIZED_MEMBERS = ['store']
def authorize(self, http):
"""Take an httplib2.Http instance (or equivalent) and
authorizes it for the set of credentials, usually by
replacing http.request() with a method that adds in
the appropriate headers and then delegates to the original
Http.request() method.
"""
_abstract()
def refresh(self, http):
"""Forces a refresh of the access_token.
Args:
http: httplib2.Http, an http object to be used to make the refresh
request.
"""
_abstract()
def apply(self, headers):
"""Add the authorization to the headers.
Args:
headers: dict, the headers to add the Authorization header to.
"""
_abstract()
def _to_json(self, strip):
"""Utility function for creating a JSON representation of an instance of Credentials.
Args:
strip: array, An array of names of members to not include in the JSON.
Returns:
string, a JSON representation of this instance, suitable to pass to
from_json().
"""
t = type(self)
d = copy.copy(self.__dict__)
for member in strip:
if member in d:
del d[member]
if 'token_expiry' in d and isinstance(d['token_expiry'], datetime.datetime):
d['token_expiry'] = d['token_expiry'].strftime(EXPIRY_FORMAT)
# Add in information we will need later to reconsistitue this instance.
d['_class'] = t.__name__
d['_module'] = t.__module__
return simplejson.dumps(d)
def to_json(self):
"""Creating a JSON representation of an instance of Credentials.
Returns:
string, a JSON representation of this instance, suitable to pass to
from_json().
"""
return self._to_json(Credentials.NON_SERIALIZED_MEMBERS)
@classmethod
def new_from_json(cls, s):
"""Utility class method to instantiate a Credentials subclass from a JSON
representation produced by to_json().
Args:
s: string, JSON from to_json().
Returns:
An instance of the subclass of Credentials that was serialized with
to_json().
"""
data = simplejson.loads(s)
# Find and call the right classmethod from_json() to restore the object.
module = data['_module']
try:
m = __import__(module)
except ImportError:
# In case there's an object from the old package structure, update it
module = module.replace('.apiclient', '')
m = __import__(module)
m = __import__(module, fromlist=module.split('.')[:-1])
kls = getattr(m, data['_class'])
from_json = getattr(kls, 'from_json')
return from_json(s)
@classmethod
def from_json(cls, s):
"""Instantiate a Credentials object from a JSON description of it.
The JSON should have been produced by calling .to_json() on the object.
Args:
data: dict, A deserialized JSON object.
Returns:
An instance of a Credentials subclass.
"""
return Credentials()
class Flow(object):
"""Base class for all Flow objects."""
pass
class Storage(object):
"""Base class for all Storage objects.
Store and retrieve a single credential. This class supports locking
such that multiple processes and threads can operate on a single
store.
"""
def acquire_lock(self):
"""Acquires any lock necessary to access this Storage.
This lock is not reentrant.
"""
pass
def release_lock(self):
"""Release the Storage lock.
Trying to release a lock that isn't held will result in a
RuntimeError.
"""
pass
def locked_get(self):
"""Retrieve credential.
The Storage lock must be held when this is called.
Returns:
oauth2client.client.Credentials
"""
_abstract()
def locked_put(self, credentials):
"""Write a credential.
The Storage lock must be held when this is called.
Args:
credentials: Credentials, the credentials to store.
"""
_abstract()
def locked_delete(self):
"""Delete a credential.
The Storage lock must be held when this is called.
"""
_abstract()
def get(self):
"""Retrieve credential.
The Storage lock must *not* be held when this is called.
Returns:
oauth2client.client.Credentials
"""
self.acquire_lock()
try:
return self.locked_get()
finally:
self.release_lock()
def put(self, credentials):
"""Write a credential.
The Storage lock must be held when this is called.
Args:
credentials: Credentials, the credentials to store.
"""
self.acquire_lock()
try:
self.locked_put(credentials)
finally:
self.release_lock()
def delete(self):
"""Delete credential.
Frees any resources associated with storing the credential.
The Storage lock must *not* be held when this is called.
Returns:
None
"""
self.acquire_lock()
try:
return self.locked_delete()
finally:
self.release_lock()
class OAuth2Credentials(Credentials):
"""Credentials object for OAuth 2.0.
Credentials can be applied to an httplib2.Http object using the authorize()
method, which then adds the OAuth 2.0 access token to each request.
OAuth2Credentials objects may be safely pickled and unpickled.
"""
@util.positional(8)
def __init__(self, access_token, client_id, client_secret, refresh_token,
token_expiry, token_uri, user_agent, id_token=None):
"""Create an instance of OAuth2Credentials.
This constructor is not usually called by the user, instead
OAuth2Credentials objects are instantiated by the OAuth2WebServerFlow.
Args:
access_token: string, access token.
client_id: string, client identifier.
client_secret: string, client secret.
refresh_token: string, refresh token.
token_expiry: datetime, when the access_token expires.
token_uri: string, URI of token endpoint.
user_agent: string, The HTTP User-Agent to provide for this application.
id_token: object, The identity of the resource owner.
Notes:
store: callable, A callable that when passed a Credential
will store the credential back to where it came from.
This is needed to store the latest access_token if it
has expired and been refreshed.
"""
self.access_token = access_token
self.client_id = client_id
self.client_secret = client_secret
self.refresh_token = refresh_token
self.store = None
self.token_expiry = token_expiry
self.token_uri = token_uri
self.user_agent = user_agent
self.id_token = id_token
# True if the credentials have been revoked or expired and can't be
# refreshed.
self.invalid = False
def authorize(self, http):
"""Authorize an httplib2.Http instance with these credentials.
The modified http.request method will add authentication headers to each
request and will refresh access_tokens when a 401 is received on a
request. In addition the http.request method has a credentials property,
http.request.credentials, which is the Credentials object that authorized
it.
Args:
http: An instance of httplib2.Http
or something that acts like it.
Returns:
A modified instance of http that was passed in.
Example:
h = httplib2.Http()
h = credentials.authorize(h)
You can't create a new OAuth subclass of httplib2.Authenication
because it never gets passed the absolute URI, which is needed for
signing. So instead we have to overload 'request' with a closure
that adds in the Authorization header and then calls the original
version of 'request()'.
"""
request_orig = http.request
# The closure that will replace 'httplib2.Http.request'.
@util.positional(1)
def new_request(uri, method='GET', body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS,
connection_type=None):
if not self.access_token:
logger.info('Attempting refresh to obtain initial access_token')
self._refresh(request_orig)
# Modify the request headers to add the appropriate
# Authorization header.
if headers is None:
headers = {}
self.apply(headers)
if self.user_agent is not None:
if 'user-agent' in headers:
headers['user-agent'] = self.user_agent + ' ' + headers['user-agent']
else:
headers['user-agent'] = self.user_agent
resp, content = request_orig(uri, method, body, headers,
redirections, connection_type)
# Older API (GData) respond with 403
if resp.status in [401, 403]:
logger.info('Refreshing due to a %s' % str(resp.status))
self._refresh(request_orig)
self.apply(headers)
return request_orig(uri, method, body, headers,
redirections, connection_type)
else:
return (resp, content)
# Replace the request method with our own closure.
http.request = new_request
# Set credentials as a property of the request method.
setattr(http.request, 'credentials', self)
return http
def refresh(self, http):
"""Forces a refresh of the access_token.
Args:
http: httplib2.Http, an http object to be used to make the refresh
request.
"""
self._refresh(http.request)
def apply(self, headers):
"""Add the authorization to the headers.
Args:
headers: dict, the headers to add the Authorization header to.
"""
headers['Authorization'] = 'Bearer ' + self.access_token
def to_json(self):
return self._to_json(Credentials.NON_SERIALIZED_MEMBERS)
@classmethod
def from_json(cls, s):
"""Instantiate a Credentials object from a JSON description of it. The JSON
should have been produced by calling .to_json() on the object.
Args:
data: dict, A deserialized JSON object.
Returns:
An instance of a Credentials subclass.
"""
data = simplejson.loads(s)
if 'token_expiry' in data and not isinstance(data['token_expiry'],
datetime.datetime):
try:
data['token_expiry'] = datetime.datetime.strptime(
data['token_expiry'], EXPIRY_FORMAT)
except:
data['token_expiry'] = None
retval = OAuth2Credentials(
data['access_token'],
data['client_id'],
data['client_secret'],
data['refresh_token'],
data['token_expiry'],
data['token_uri'],
data['user_agent'],
id_token=data.get('id_token', None))
retval.invalid = data['invalid']
return retval
@property
def access_token_expired(self):
"""True if the credential is expired or invalid.
If the token_expiry isn't set, we assume the token doesn't expire.
"""
if self.invalid:
return True
if not self.token_expiry:
return False
now = datetime.datetime.utcnow()
if now >= self.token_expiry:
logger.info('access_token is expired. Now: %s, token_expiry: %s',
now, self.token_expiry)
return True
return False
def set_store(self, store):
"""Set the Storage for the credential.
Args:
store: Storage, an implementation of Stroage object.
This is needed to store the latest access_token if it
has expired and been refreshed. This implementation uses
locking to check for updates before updating the
access_token.
"""
self.store = store
def _updateFromCredential(self, other):
"""Update this Credential from another instance."""
self.__dict__.update(other.__getstate__())
def __getstate__(self):
"""Trim the state down to something that can be pickled."""
d = copy.copy(self.__dict__)
del d['store']
return d
def __setstate__(self, state):
"""Reconstitute the state of the object from being pickled."""
self.__dict__.update(state)
self.store = None
def _generate_refresh_request_body(self):
"""Generate the body that will be used in the refresh request."""
body = urllib.urlencode({
'grant_type': 'refresh_token',
'client_id': self.client_id,
'client_secret': self.client_secret,
'refresh_token': self.refresh_token,
})
return body
def _generate_refresh_request_headers(self):
"""Generate the headers that will be used in the refresh request."""
headers = {
'content-type': 'application/x-www-form-urlencoded',
}
if self.user_agent is not None:
headers['user-agent'] = self.user_agent
return headers
def _refresh(self, http_request):
"""Refreshes the access_token.
This method first checks by reading the Storage object if available.
If a refresh is still needed, it holds the Storage lock until the
refresh is completed.
Args:
http_request: callable, a callable that matches the method signature of
httplib2.Http.request, used to make the refresh request.
Raises:
AccessTokenRefreshError: When the refresh fails.
"""
if not self.store:
self._do_refresh_request(http_request)
else:
self.store.acquire_lock()
try:
new_cred = self.store.locked_get()
if (new_cred and not new_cred.invalid and
new_cred.access_token != self.access_token):
logger.info('Updated access_token read from Storage')
self._updateFromCredential(new_cred)
else:
self._do_refresh_request(http_request)
finally:
self.store.release_lock()
def _do_refresh_request(self, http_request):
"""Refresh the access_token using the refresh_token.
Args:
http_request: callable, a callable that matches the method signature of
httplib2.Http.request, used to make the refresh request.
Raises:
AccessTokenRefreshError: When the refresh fails.
"""
body = self._generate_refresh_request_body()
headers = self._generate_refresh_request_headers()
logger.info('Refreshing access_token')
resp, content = http_request(
self.token_uri, method='POST', body=body, headers=headers)
if resp.status == 200:
# TODO(jcgregorio) Raise an error if loads fails?
d = simplejson.loads(content)
self.access_token = d['access_token']
self.refresh_token = d.get('refresh_token', self.refresh_token)
if 'expires_in' in d:
self.token_expiry = datetime.timedelta(
seconds=int(d['expires_in'])) + datetime.datetime.utcnow()
else:
self.token_expiry = None
if self.store:
self.store.locked_put(self)
else:
# An {'error':...} response body means the token is expired or revoked,
# so we flag the credentials as such.
logger.info('Failed to retrieve access token: %s' % content)
error_msg = 'Invalid response %s.' % resp['status']
try:
d = simplejson.loads(content)
if 'error' in d:
error_msg = d['error']
self.invalid = True
if self.store:
self.store.locked_put(self)
except StandardError:
pass
raise AccessTokenRefreshError(error_msg)
class AccessTokenCredentials(OAuth2Credentials):
"""Credentials object for OAuth 2.0.
Credentials can be applied to an httplib2.Http object using the
authorize() method, which then signs each request from that object
with the OAuth 2.0 access token. This set of credentials is for the
use case where you have acquired an OAuth 2.0 access_token from
another place such as a JavaScript client or another web
application, and wish to use it from Python. Because only the
access_token is present it can not be refreshed and will in time
expire.
AccessTokenCredentials objects may be safely pickled and unpickled.
Usage:
credentials = AccessTokenCredentials('<an access token>',
'my-user-agent/1.0')
http = httplib2.Http()
http = credentials.authorize(http)
Exceptions:
AccessTokenCredentialsExpired: raised when the access_token expires or is
revoked.
"""
def __init__(self, access_token, user_agent):
"""Create an instance of OAuth2Credentials
This is one of the few types if Credentials that you should contrust,
Credentials objects are usually instantiated by a Flow.
Args:
access_token: string, access token.
user_agent: string, The HTTP User-Agent to provide for this application.
Notes:
store: callable, a callable that when passed a Credential
will store the credential back to where it came from.
"""
super(AccessTokenCredentials, self).__init__(
access_token,
None,
None,
None,
None,
None,
user_agent)
@classmethod
def from_json(cls, s):
data = simplejson.loads(s)
retval = AccessTokenCredentials(
data['access_token'],
data['user_agent'])
return retval
def _refresh(self, http_request):
raise AccessTokenCredentialsError(
"The access_token is expired or invalid and can't be refreshed.")
class AssertionCredentials(OAuth2Credentials):
"""Abstract Credentials object used for OAuth 2.0 assertion grants.
This credential does not require a flow to instantiate because it
represents a two legged flow, and therefore has all of the required
information to generate and refresh its own access tokens. It must
be subclassed to generate the appropriate assertion string.
AssertionCredentials objects may be safely pickled and unpickled.
"""
@util.positional(2)
def __init__(self, assertion_type, user_agent=None,
token_uri='https://accounts.google.com/o/oauth2/token',
**unused_kwargs):
"""Constructor for AssertionFlowCredentials.
Args:
assertion_type: string, assertion type that will be declared to the auth
server
user_agent: string, The HTTP User-Agent to provide for this application.
token_uri: string, URI for token endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
"""
super(AssertionCredentials, self).__init__(
None,
None,
None,
None,
None,
token_uri,
user_agent)
self.assertion_type = assertion_type
def _generate_refresh_request_body(self):
assertion = self._generate_assertion()
body = urllib.urlencode({
'assertion_type': self.assertion_type,
'assertion': assertion,
'grant_type': 'assertion',
})
return body
def _generate_assertion(self):
"""Generate the assertion string that will be used in the access token
request.
"""
_abstract()
if HAS_OPENSSL:
# PyOpenSSL is not a prerequisite for oauth2client, so if it is missing then
# don't create the SignedJwtAssertionCredentials or the verify_id_token()
# method.
class SignedJwtAssertionCredentials(AssertionCredentials):
"""Credentials object used for OAuth 2.0 Signed JWT assertion grants.
This credential does not require a flow to instantiate because it represents
a two legged flow, and therefore has all of the required information to
generate and refresh its own access tokens.
SignedJwtAssertionCredentials requires PyOpenSSL and because of that it does
not work on App Engine. For App Engine you may consider using
AppAssertionCredentials.
"""
MAX_TOKEN_LIFETIME_SECS = 3600 # 1 hour in seconds
@util.positional(4)
def __init__(self,
service_account_name,
private_key,
scope,
private_key_password='notasecret',
user_agent=None,
token_uri='https://accounts.google.com/o/oauth2/token',
**kwargs):
"""Constructor for SignedJwtAssertionCredentials.
Args:
service_account_name: string, id for account, usually an email address.
private_key: string, private key in P12 format.
scope: string or list of strings, scope(s) of the credentials being
requested.
private_key_password: string, password for private_key.
user_agent: string, HTTP User-Agent to provide for this application.
token_uri: string, URI for token endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
kwargs: kwargs, Additional parameters to add to the JWT token, for
example [email protected]."""
super(SignedJwtAssertionCredentials, self).__init__(
'http://oauth.net/grant_type/jwt/1.0/bearer',
user_agent=user_agent,
token_uri=token_uri,
)
if type(scope) is list:
scope = ' '.join(scope)
self.scope = scope
# Keep base64 encoded so it can be stored in JSON.
self.private_key = base64.b64encode(private_key)
self.private_key_password = private_key_password
self.service_account_name = service_account_name
self.kwargs = kwargs
@classmethod
def from_json(cls, s):
data = simplejson.loads(s)
retval = SignedJwtAssertionCredentials(
data['service_account_name'],
base64.b64decode(data['private_key']),
data['scope'],
private_key_password=data['private_key_password'],
user_agent=data['user_agent'],
token_uri=data['token_uri'],
**data['kwargs']
)
retval.invalid = data['invalid']
retval.access_token = data['access_token']
return retval
def _generate_assertion(self):
"""Generate the assertion that will be used in the request."""
now = long(time.time())
payload = {
'aud': self.token_uri,
'scope': self.scope,
'iat': now,
'exp': now + SignedJwtAssertionCredentials.MAX_TOKEN_LIFETIME_SECS,
'iss': self.service_account_name
}
payload.update(self.kwargs)
logger.debug(str(payload))
private_key = base64.b64decode(self.private_key)
return make_signed_jwt(
Signer.from_string(private_key, self.private_key_password), payload)
# Only used in verify_id_token(), which is always calling to the same URI
# for the certs.
_cached_http = httplib2.Http(MemoryCache())
@util.positional(2)
def verify_id_token(id_token, audience, http=None,
cert_uri=ID_TOKEN_VERIFICATON_CERTS):
"""Verifies a signed JWT id_token.
This function requires PyOpenSSL and because of that it does not work on
App Engine. For App Engine you may consider using AppAssertionCredentials.
Args:
id_token: string, A Signed JWT.
audience: string, The audience 'aud' that the token should be for.
http: httplib2.Http, instance to use to make the HTTP request. Callers
should supply an instance that has caching enabled.
cert_uri: string, URI of the certificates in JSON format to
verify the JWT against.
Returns:
The deserialized JSON in the JWT.
Raises:
oauth2client.crypt.AppIdentityError if the JWT fails to verify.
"""
if http is None:
http = _cached_http
resp, content = http.request(cert_uri)
if resp.status == 200:
certs = simplejson.loads(content)
return verify_signed_jwt_with_certs(id_token, certs, audience)
else:
raise VerifyJwtTokenError('Status code: %d' % resp.status)
def _urlsafe_b64decode(b64string):
# Guard against unicode strings, which base64 can't handle.
b64string = b64string.encode('ascii')
padded = b64string + '=' * (4 - len(b64string) % 4)
return base64.urlsafe_b64decode(padded)
def _extract_id_token(id_token):
"""Extract the JSON payload from a JWT.
Does the extraction w/o checking the signature.
Args:
id_token: string, OAuth 2.0 id_token.
Returns:
object, The deserialized JSON payload.
"""
segments = id_token.split('.')
if (len(segments) != 3):
raise VerifyJwtTokenError(
'Wrong number of segments in token: %s' % id_token)
return simplejson.loads(_urlsafe_b64decode(segments[1]))
def _parse_exchange_token_response(content):
"""Parses response of an exchange token request.
Most providers return JSON but some (e.g. Facebook) return a
url-encoded string.
Args:
content: The body of a response
Returns:
Content as a dictionary object. Note that the dict could be empty,
i.e. {}. That basically indicates a failure.
"""
resp = {}
try:
resp = simplejson.loads(content)
except StandardError:
# different JSON libs raise different exceptions,
# so we just do a catch-all here
resp = dict(parse_qsl(content))
# some providers respond with 'expires', others with 'expires_in'
if resp and 'expires' in resp:
resp['expires_in'] = resp.pop('expires')
return resp
@util.positional(4)
def credentials_from_code(client_id, client_secret, scope, code,
redirect_uri='postmessage', http=None, user_agent=None,
token_uri='https://accounts.google.com/o/oauth2/token'):
"""Exchanges an authorization code for an OAuth2Credentials object.
Args:
client_id: string, client identifier.
client_secret: string, client secret.
scope: string or list of strings, scope(s) to request.
code: string, An authroization code, most likely passed down from
the client
redirect_uri: string, this is generally set to 'postmessage' to match the
redirect_uri that the client specified
http: httplib2.Http, optional http instance to use to do the fetch
token_uri: string, URI for token endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
Returns:
An OAuth2Credentials object.
Raises:
FlowExchangeError if the authorization code cannot be exchanged for an
access token
"""
flow = OAuth2WebServerFlow(client_id, client_secret, scope,
redirect_uri=redirect_uri, user_agent=user_agent,
auth_uri='https://accounts.google.com/o/oauth2/auth',
token_uri=token_uri)
credentials = flow.step2_exchange(code, http=http)
return credentials
@util.positional(3)
def credentials_from_clientsecrets_and_code(filename, scope, code,
message = None,
redirect_uri='postmessage',
http=None,
cache=None):
"""Returns OAuth2Credentials from a clientsecrets file and an auth code.
Will create the right kind of Flow based on the contents of the clientsecrets
file or will raise InvalidClientSecretsError for unknown types of Flows.
Args:
filename: string, File name of clientsecrets.
scope: string or list of strings, scope(s) to request.
code: string, An authorization code, most likely passed down from
the client
message: string, A friendly string to display to the user if the
clientsecrets file is missing or invalid. If message is provided then
sys.exit will be called in the case of an error. If message in not
provided then clientsecrets.InvalidClientSecretsError will be raised.
redirect_uri: string, this is generally set to 'postmessage' to match the
redirect_uri that the client specified
http: httplib2.Http, optional http instance to use to do the fetch
cache: An optional cache service client that implements get() and set()
methods. See clientsecrets.loadfile() for details.
Returns:
An OAuth2Credentials object.
Raises:
FlowExchangeError if the authorization code cannot be exchanged for an
access token
UnknownClientSecretsFlowError if the file describes an unknown kind of Flow.
clientsecrets.InvalidClientSecretsError if the clientsecrets file is
invalid.
"""
flow = flow_from_clientsecrets(filename, scope, message=message, cache=cache,
redirect_uri=redirect_uri)
credentials = flow.step2_exchange(code, http=http)
return credentials
class OAuth2WebServerFlow(Flow):
"""Does the Web Server Flow for OAuth 2.0.
OAuth2WebServerFlow objects may be safely pickled and unpickled.
"""
@util.positional(4)
def __init__(self, client_id, client_secret, scope,
redirect_uri=None,
user_agent=None,
auth_uri='https://accounts.google.com/o/oauth2/auth',
token_uri='https://accounts.google.com/o/oauth2/token',
**kwargs):
"""Constructor for OAuth2WebServerFlow.
Args:
client_id: string, client identifier.
client_secret: string client secret.
scope: string or list of strings, scope(s) of the credentials being
requested.
redirect_uri: string, Either the string 'urn:ietf:wg:oauth:2.0:oob' for
a non-web-based application, or a URI that handles the callback from
the authorization server.
user_agent: string, HTTP User-Agent to provide for this application.
auth_uri: string, URI for authorization endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
token_uri: string, URI for token endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
**kwargs: dict, The keyword arguments are all optional and required
parameters for the OAuth calls.
"""
self.client_id = client_id
self.client_secret = client_secret
if type(scope) is list:
scope = ' '.join(scope)
self.scope = scope
self.redirect_uri = redirect_uri
self.user_agent = user_agent
self.auth_uri = auth_uri
self.token_uri = token_uri
self.params = {
'access_type': 'offline',
}
self.params.update(kwargs)
@util.positional(1)
def step1_get_authorize_url(self, redirect_uri=None):
"""Returns a URI to redirect to the provider.
Args:
redirect_uri: string, Either the string 'urn:ietf:wg:oauth:2.0:oob' for
a non-web-based application, or a URI that handles the callback from
the authorization server. This parameter is deprecated, please move to
passing the redirect_uri in via the constructor.
Returns:
A URI as a string to redirect the user to begin the authorization flow.
"""
if redirect_uri is not None:
logger.warning(('The redirect_uri parameter for'
'OAuth2WebServerFlow.step1_get_authorize_url is deprecated. Please'
'move to passing the redirect_uri in via the constructor.'))
self.redirect_uri = redirect_uri
if self.redirect_uri is None:
raise ValueError('The value of redirect_uri must not be None.')
query = {
'response_type': 'code',
'client_id': self.client_id,
'redirect_uri': self.redirect_uri,
'scope': self.scope,
}
query.update(self.params)
parts = list(urlparse.urlparse(self.auth_uri))
query.update(dict(parse_qsl(parts[4]))) # 4 is the index of the query part
parts[4] = urllib.urlencode(query)
return urlparse.urlunparse(parts)
@util.positional(2)
def step2_exchange(self, code, http=None):
"""Exhanges a code for OAuth2Credentials.
Args:
code: string or dict, either the code as a string, or a dictionary
of the query parameters to the redirect_uri, which contains
the code.
http: httplib2.Http, optional http instance to use to do the fetch
Returns:
An OAuth2Credentials object that can be used to authorize requests.
Raises:
FlowExchangeError if a problem occured exchanging the code for a
refresh_token.
"""
if not (isinstance(code, str) or isinstance(code, unicode)):
if 'code' not in code:
if 'error' in code:
error_msg = code['error']
else:
error_msg = 'No code was supplied in the query parameters.'
raise FlowExchangeError(error_msg)
else:
code = code['code']
body = urllib.urlencode({
'grant_type': 'authorization_code',
'client_id': self.client_id,
'client_secret': self.client_secret,
'code': code,
'redirect_uri': self.redirect_uri,
'scope': self.scope,
})
headers = {
'content-type': 'application/x-www-form-urlencoded',
}
if self.user_agent is not None:
headers['user-agent'] = self.user_agent
if http is None:
http = httplib2.Http()
resp, content = http.request(self.token_uri, method='POST', body=body,
headers=headers)
d = _parse_exchange_token_response(content)
if resp.status == 200 and 'access_token' in d:
access_token = d['access_token']
refresh_token = d.get('refresh_token', None)
token_expiry = None
if 'expires_in' in d:
token_expiry = datetime.datetime.utcnow() + datetime.timedelta(
seconds=int(d['expires_in']))
if 'id_token' in d:
d['id_token'] = _extract_id_token(d['id_token'])
logger.info('Successfully retrieved access token: %s' % content)
return OAuth2Credentials(access_token, self.client_id,
self.client_secret, refresh_token, token_expiry,
self.token_uri, self.user_agent,
id_token=d.get('id_token', None))
else:
logger.info('Failed to retrieve access token: %s' % content)
if 'error' in d:
# you never know what those providers got to say
error_msg = unicode(d['error'])
else:
error_msg = 'Invalid response: %s.' % str(resp.status)
raise FlowExchangeError(error_msg)
@util.positional(2)
def flow_from_clientsecrets(filename, scope, redirect_uri=None, message=None, cache=None):
"""Create a Flow from a clientsecrets file.
Will create the right kind of Flow based on the contents of the clientsecrets
file or will raise InvalidClientSecretsError for unknown types of Flows.
Args:
filename: string, File name of client secrets.
scope: string or list of strings, scope(s) to request.
redirect_uri: string, Either the string 'urn:ietf:wg:oauth:2.0:oob' for
a non-web-based application, or a URI that handles the callback from
the authorization server.
message: string, A friendly string to display to the user if the
clientsecrets file is missing or invalid. If message is provided then
sys.exit will be called in the case of an error. If message in not
provided then clientsecrets.InvalidClientSecretsError will be raised.
cache: An optional cache service client that implements get() and set()
methods. See clientsecrets.loadfile() for details.
Returns:
A Flow object.
Raises:
UnknownClientSecretsFlowError if the file describes an unknown kind of Flow.
clientsecrets.InvalidClientSecretsError if the clientsecrets file is
invalid.
"""
try:
client_type, client_info = clientsecrets.loadfile(filename, cache=cache)
if client_type in [clientsecrets.TYPE_WEB, clientsecrets.TYPE_INSTALLED]:
return OAuth2WebServerFlow(
client_info['client_id'],
client_info['client_secret'],
scope,
redirect_uri=redirect_uri,
user_agent=None,
auth_uri=client_info['auth_uri'],
token_uri=client_info['token_uri'])
except clientsecrets.InvalidClientSecretsError:
if message:
sys.exit(message)
else:
raise
else:
raise UnknownClientSecretsFlowError(
'This OAuth 2.0 flow is unsupported: "%s"' * client_type)
|
|
import asyncio, discord, time
from discord.ext import commands
from Cogs import DisplayName, ReadableTime, Utils, Nullify
def setup(bot):
# Add the bot
settings = bot.get_cog("Settings")
bot.add_cog(Invite(bot, settings))
class Invite(commands.Cog):
# Init with the bot reference, and a reference to the settings var
def __init__(self, bot, settings):
self.bot = bot
self.settings = settings
self.current_requests = []
self.temp_allowed = []
self.approval_time = 3600 # 1 hour for an approval to roll off
self.request_time = 604800 # 7 x 24 x 3600 = 1 week for a request to roll off
global Utils, DisplayName
Utils = self.bot.get_cog("Utils")
DisplayName = self.bot.get_cog("DisplayName")
def _is_submodule(self, parent, child):
return parent == child or child.startswith(parent + ".")
async def onserverjoin(self, server):
# First verify if we're joining servers
if not self.settings.getGlobalStat("AllowServerJoin",True):
# Not joining - see if we have temp access to a server
temp = next((x for x in self.temp_allowed if x[0] == server.id),None)
if temp:
self.temp_allowed.remove(temp)
# Add to our whitelist
self._whitelist_server(temp[0])
return False
try:
await server.leave()
except:
pass
return True
return False
@commands.Cog.listener()
async def on_guild_remove(self, server):
# Remove from the whitelist if it exists
self._unwhitelist_server(server.id)
async def remove_request(self,user_server):
# Wait for the allotted time and remove the request if it still exists
await asyncio.sleep(self.request_time)
try:
self.current_requests.remove(user_server)
except ValueError:
pass
async def remove_allow(self,server_id):
# Wait for the allotted time and remove the temp_allowed value if it still exists
await asyncio.sleep(self.approval_time)
try:
self.temp_allowed.remove(server_id)
except ValueError:
pass
def _check_whitelist(self):
# Helper method to whitelist all servers based on the "AllowServerJoin" setting - or else just revokes the whitelist entirely
self.settings.setGlobalStat("ServerWhitelist",None if self.settings.getGlobalStat("AllowServerJoin",True) else [x.id for x in self.bot.guilds])
def _whitelist_server(self, guild_id = None):
# Takes a guild id and ensures it's whitelisted
if not guild_id: return
current_whitelist = self.settings.getGlobalStat("ServerWhitelist",[])
current_whitelist = [] if not isinstance(current_whitelist,(list,tuple)) else current_whitelist
current_whitelist.append(guild_id)
self.settings.setGlobalStat("ServerWhitelist",current_whitelist)
def _unwhitelist_server(self, guild_id = None):
# Takes a guild id and removes it from the whitelist - if it finds it
if not guild_id: return
current_whitelist = self.settings.getGlobalStat("ServerWhitelist",[])
current_whitelist = [] if not isinstance(current_whitelist,(list,tuple)) else [x for x in current_whitelist if not x == guild_id]
self.settings.setGlobalStat("ServerWhitelist",current_whitelist if len(current_whitelist) else None)
@commands.Cog.listener()
async def on_loaded_extension(self, ext):
# See if we were loaded
if not self._is_submodule(ext.__name__, self.__module__):
return
await self.bot.wait_until_ready()
# Check if we have the whitelist setup - and if not, auto-whitlelist all joined servers
if self.settings.getGlobalStat("AllowServerJoin", True): return # No need to check - not restricting
print("Verifying server whitelist...")
current_whitelist = self.settings.getGlobalStat("ServerWhitelist",None)
if not current_whitelist:
print("No whitelist found - creating one with current servers.")
return self._check_whitelist() # If we don't have one saved - save one and bail
# Let's gather a list of any server we're on that's not in the whitelist
server_list = [x.id for x in self.bot.guilds]
bail_list = [x for x in server_list if not x in current_whitelist]
# Leave the unwhitelisted servers
t = time.time()
for x in bail_list:
server = self.bot.get_guild(x)
print(" - {} not in whitelist - leaving...".format(x))
try:
if server: await server.leave()
except: print(" --> I couldn't leave {} :(".format(x))
print("Whitelist verified - took {} seconds.".format(time.time() - t))
@commands.command()
async def invite(self, ctx, invite_url = None):
"""Outputs a url you can use to invite me to your server."""
if self.settings.getGlobalStat("AllowServerJoin", True):
return await ctx.send('Invite me to *your* server with this link: \n<{}>'.format(
discord.utils.oauth_url(self.bot.user.id, permissions=discord.Permissions(permissions=8))
))
# Check if we're temporarily allowing this server
server = ctx.guild
if invite_url:
try:
invite = await self.bot.fetch_invite(invite_url)
server = invite.guild
except:
pass
if server and any(x for x in self.temp_allowed if x[0] == server.id):
# Got an invite
return await ctx.send('Invite me to {} with this link: \n<{}>'.format(
Nullify.escape_all(ctx,server.name),
discord.utils.oauth_url(self.bot.user.id, permissions=discord.Permissions(permissions=8),guild=server)
))
return await ctx.send("You need approval from my owner{} to add me. You can request it with `{}requestjoin guild_invite_url`.".format(
"" if len(self.settings.getOwners()) == 1 else "s",
ctx.prefix
))
@commands.command()
async def requestjoin(self, ctx, invite_url = None):
"""Forwards the invite url to the bot's owners for review."""
if self.settings.getGlobalStat("AllowServerJoin", True):
return await ctx.invoke(self.invite)
# Get the list of owners - and account for any that have left
owners = self.settings.getOwners()
if not len(owners):
return await ctx.send("I have not been claimed, *yet*.")
if not invite_url:
return await ctx.send("Usage: `{}requestjoin discord.gg_invite_url`".format(ctx.prefix))
try:
invite = await self.bot.fetch_invite(invite_url)
except:
return await ctx.send("That invite url was not valid or expired.")
if invite.guild in self.bot.guilds:
return await ctx.send("I'm already in that server.")
temp = next((x for x in self.current_requests if x[1].id == invite.guild.id),None)
if temp:
return await ctx.send("I've already been requested for that server. Request rolls off in {}, or when approved.".format(
ReadableTime.getReadableTimeBetween(time.time(),temp[2])
))
temp = next((x for x in self.temp_allowed if x[0] == invite.guild.id),None)
if temp:
await ctx.invoke(self.invite,invite_url)
return await ctx.send("Valid for {}.".format(ReadableTime.getReadableTimeBetween(time.time(),temp[1])))
# Build a request to dm to up to the first 10 owners
msg = "{} ({} - {}#{} - {})\nhas requested the bot for: {} ({})\nvia the following invite: {}".format(
DisplayName.name(ctx.author),
ctx.author.mention,
ctx.author.name,
ctx.author.discriminator,
ctx.author.id,
invite.guild.name,
invite.guild.id,
invite
)
owners = owners if len(owners) < 11 else owners[:10]
for owner in owners:
target = self.bot.get_user(int(owner))
if not target:
continue
await target.send(msg)
request = (ctx.author,invite.guild,time.time()+self.request_time,ctx)
self.current_requests.append(request)
self.bot.loop.create_task(self.remove_request(request))
await ctx.send("I've forwarded the request to my owner{}. The request is valid for {}.".format(
"" if len(owners) == 1 else "s",
ReadableTime.getReadableTimeBetween(0,self.request_time)))
@commands.command()
async def approvejoin(self, ctx, server_id = None):
"""Temporarily allows the bot to join the passed server id or join url (owner-only)."""
# Only allow owner
isOwner = self.settings.isOwner(ctx.author)
if isOwner == None:
return await ctx.send('I have not been claimed, *yet*.')
elif isOwner == False:
return await ctx.send('You are not the *true* owner of me. Only the rightful owner can use this command.')
if server_id == None:
return await ctx.send("Usage: `{}approvejoin server_id`".format(ctx.prefix))
try:
server_id = int(server_id)
except:
try:
invite = await self.bot.fetch_invite(server_id)
server_id = invite.guild.id
except:
return await ctx.send("Invalid server id passed.")
guild_list = [x.id for x in self.bot.guilds]
# Check if we're already on that server, or if it's already been approved
if server_id in guild_list:
return await ctx.send("I'm already in that server.")
temp = next((x for x in self.temp_allowed if x[0] == server_id),None)
if temp:
# Let's remove the approval to allow it to re-add with a new time
try:
self.temp_allowed.remove(temp)
except:
pass
# Allow the guild
temp_allow = (server_id,time.time()+self.approval_time)
self.temp_allowed.append(temp_allow)
# Remove if it's been requested
request = next((x for x in self.current_requests if x[1].id == invite.guild.id),None)
if request:
await request[3].send("{}, your request for me to join {} has been approved for the next {}. You can invite me with this link:\n<{}>".format(
request[0].mention,
Nullify.escape_all(request[1].name),
ReadableTime.getReadableTimeBetween(0,self.approval_time),
discord.utils.oauth_url(self.bot.user.id, permissions=discord.Permissions(permissions=8),guild=request[1])
),allowed_mentions=discord.AllowedMentions.all())
try:
self.current_requests.remove(request)
except:
pass
self.bot.loop.create_task(self.remove_allow(temp_allow))
await ctx.send("I've been approved to join {} for the next {}.".format(
server_id,
ReadableTime.getReadableTimeBetween(0,self.approval_time)))
@commands.command()
async def revokejoin(self, ctx, server_id = None):
"""Revokes a previously approved temporary join (owner-only)."""
# Only allow owner
isOwner = self.settings.isOwner(ctx.author)
if isOwner == None:
return await ctx.send('I have not been claimed, *yet*.')
elif isOwner == False:
return await ctx.send('You are not the *true* owner of me. Only the rightful owner can use this command.')
if server_id == None:
return await ctx.send("Usage: `{}revokejoin server_id`".format(ctx.prefix))
try:
server_id = int(server_id)
except:
try:
invite = await self.bot.fetch_invite(server_id)
server_id = invite.guild.id
except:
return await ctx.send("Invalid server id passed.")
guild_list = [x.id for x in self.bot.guilds]
# Check if we're already on that server, or if it's already been approved
if server_id in guild_list:
return await ctx.send("I'm already in that server.")
temp = next((x for x in self.temp_allowed if x[0] == server_id),None)
if not temp:
return await ctx.send("That server is not in my temp approved list.")
self.temp_allowed.remove(temp)
return await ctx.send("Approval to join guild id {} has been revoked.".format(server_id))
@commands.command()
async def canjoin(self, ctx, *, yes_no = None):
"""Sets whether the bot is allowed to join new servers (owner-only and enabled by default)."""
# Only allow owner
isOwner = self.settings.isOwner(ctx.author)
if isOwner == None:
return await ctx.send('I have not been claimed, *yet*.')
elif isOwner == False:
return await ctx.send('You are not the *true* owner of me. Only the rightful owner can use this command.')
setting_name = "Allow new server joins"
setting_val = "AllowServerJoin"
current = self.settings.getGlobalStat(setting_val, True)
if yes_no == None:
msg = "{} currently *{}.*".format(setting_name,"enabled" if current else "disabled")
elif yes_no.lower() in [ "yes", "on", "true", "enabled", "enable" ]:
yes_no = True
msg = "{} {} *enabled*.".format(setting_name,"remains" if current else "is now")
elif yes_no.lower() in [ "no", "off", "false", "disabled", "disable" ]:
yes_no = False
msg = "{} {} *disabled*.".format(setting_name,"remains" if not current else "is now")
else:
msg = "That's not a valid setting."
yes_no = current
if not yes_no == None and not yes_no == current:
self.settings.setGlobalStat(setting_val, yes_no)
# Force the whitelist update
self._check_whitelist()
await ctx.send(msg)
@commands.command()
async def block(self, ctx, *, server : str = None):
"""Blocks the bot from joining a server - takes either a name or an id (owner-only).
Can also take the id or case-sensitive name + descriminator of the owner (eg. Bob#1234)."""
# Check if we're suppressing @here and @everyone mentions
suppress = True if self.settings.getServerStat(ctx.guild,"SuppressMentions",True) else False
# Only allow owner
isOwner = self.settings.isOwner(ctx.author)
if isOwner == None:
return await ctx.send('I have not been claimed, *yet*.')
elif isOwner == False:
return await ctx.send('You are not the *true* owner of me. Only the rightful owner can use this command.')
if server == None:
# No server provided
return await ctx.send("Usage: `{}block [server name/id or owner name#desc/id]`".format(ctx.prefix))
serverList = self.settings.getGlobalStat('BlockedServers',[])
for serv in serverList:
if str(serv).lower() == server.lower():
# Found a match - already blocked.
msg = "*{}* is already blocked!".format(Nullify.escape_all(serv))
return await ctx.send(msg)
# Not blocked
serverList.append(server)
self.settings.setGlobalStat("BlockedServers",serverList)
msg = "*{}* now blocked!".format(Nullify.escape_all(server))
await ctx.send(msg)
@commands.command()
async def unblock(self, ctx, *, server : str = None):
"""Unblocks a server or owner (owner-only)."""
# Check if we're suppressing @here and @everyone mentions
suppress = True if self.settings.getServerStat(ctx.guild,"SuppressMentions",True) else False
# Only allow owner
isOwner = self.settings.isOwner(ctx.author)
if isOwner == None:
return await ctx.send('I have not been claimed, *yet*.')
elif isOwner == False:
return await ctx.send('You are not the *true* owner of me. Only the rightful owner can use this command.')
if server == None:
# No server provided
return await ctx.send("Usage: `{}unblock [server name/id or owner name#desc/id]`".format(ctx.prefix))
serverList = self.settings.getGlobalStat('BlockedServers',[])
serverTest = [x for x in serverList if not str(x).lower() == server.lower()]
if len(serverList) != len(serverTest):
# Something changed
self.settings.setGlobalStat("BlockedServers",serverTest)
msg = "*{}* unblocked!".format(Nullify.escape_all(server))
return await ctx.send(msg)
# Not found
msg = "I couldn't find *{}* in my blocked list.".format(Nullify.escape_all(server))
await ctx.send(msg)
@commands.command()
async def unblockall(self, ctx):
"""Unblocks all blocked servers and owners (owner-only)."""
# Check if we're suppressing @here and @everyone mentions
suppress = True if self.settings.getServerStat(ctx.guild,"SuppressMentions",True) else False
# Only allow owner
isOwner = self.settings.isOwner(ctx.author)
if isOwner == None:
return await ctx.send('I have not been claimed, *yet*.')
elif isOwner == False:
return await ctx.send('You are not the *true* owner of me. Only the rightful owner can use this command.')
self.settings.setGlobalStat('BlockedServers',[])
await ctx.send("*All* servers and owners unblocked!")
@commands.command()
async def blocked(self, ctx):
"""Lists all blocked servers and owners (owner-only)."""
# Check if we're suppressing @here and @everyone mentions
suppress = True if self.settings.getServerStat(ctx.guild,"SuppressMentions",True) else False
# Only allow owner
isOwner = self.settings.isOwner(ctx.author)
if isOwner == None:
return await ctx.send('I have not been claimed, *yet*.')
elif isOwner == False:
return await ctx.send('You are not the *true* owner of me. Only the rightful owner can use this command.')
serverList = self.settings.getGlobalStat('BlockedServers',[])
if not len(serverList):
msg = "There are no blocked servers or owners!"
else:
msg = "__Currently Blocked:__\n\n{}".format(', '.join([Nullify.escape_all(x) for x in serverList]))
await ctx.send(msg)
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import six
import swiftclient.client as sc
from heat.common import exception
from heat.common import template_format
from heat.engine.resources.openstack.swift import swift
from heat.engine import scheduler
from heat.tests import common
from heat.tests import utils
SWIFT_TEMPLATE = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "Template to test OS::Swift::Container resources",
"Resources" : {
"SwiftContainerWebsite" : {
"Type" : "OS::Swift::Container",
"DeletionPolicy" : "Delete",
"Properties" : {
"X-Container-Read" : ".r:*",
"X-Container-Meta" : {
"Web-Index" : "index.html",
"Web-Error" : "error.html"
}
}
},
"SwiftAccountMetadata" : {
"Type" : "OS::Swift::Container",
"DeletionPolicy" : "Delete",
"Properties" : {
"X-Account-Meta" : {
"Temp-Url-Key" : "secret"
}
}
},
"S3Bucket" : {
"Type" : "AWS::S3::Bucket",
"Properties" : {
"SwiftContainer" : {"Ref" : "SwiftContainer"}
}
},
"SwiftContainer" : {
"Type" : "OS::Swift::Container",
"Properties" : {
}
}
}
}
'''
class SwiftTest(common.HeatTestCase):
def setUp(self):
super(SwiftTest, self).setUp()
self.t = template_format.parse(SWIFT_TEMPLATE)
def _create_container(self, stack, definition_name='SwiftContainer'):
resource_defns = stack.t.resource_definitions(stack)
container = swift.SwiftContainer('test_resource',
resource_defns[definition_name],
stack)
runner = scheduler.TaskRunner(container.create)
runner()
self.assertEqual((container.CREATE, container.COMPLETE),
container.state)
return container
@mock.patch('swiftclient.client.Connection.put_container')
def test_create_container_name(self, mock_put):
# Setup
self.t['Resources']['SwiftContainer']['Properties']['name'] = \
'the_name'
stack = utils.parse_stack(self.t)
# Test
container = self._create_container(stack)
container_name = container.physical_resource_name()
# Verify
self.assertEqual('the_name', container_name)
mock_put.assert_called_once_with('the_name', {})
def test_build_meta_headers(self):
# Setup
headers = {'Web-Index': 'index.html', 'Web-Error': 'error.html'}
# Test
self.assertEqual({}, swift.SwiftContainer._build_meta_headers(
'container', {}))
self.assertEqual({}, swift.SwiftContainer._build_meta_headers(
'container', None))
built = swift.SwiftContainer._build_meta_headers('container', headers)
# Verify
expected = {
'X-Container-Meta-Web-Index': 'index.html',
'X-Container-Meta-Web-Error': 'error.html'
}
self.assertEqual(expected, built)
@mock.patch('swiftclient.client.Connection.head_container')
@mock.patch('swiftclient.client.Connection.put_container')
def test_attributes(self, mock_put, mock_head):
# Setup
headers = {'content-length': '0',
'x-container-object-count': '82',
'accept-ranges': 'bytes',
'x-trans-id': 'tx08ea48ef2fa24e6da3d2f5c188fd938b',
'date': 'Wed, 23 Jan 2013 22:48:05 GMT',
'x-timestamp': '1358980499.84298',
'x-container-read': '.r:*',
'x-container-bytes-used': '17680980',
'content-type': 'text/plain; charset=utf-8'}
mock_head.return_value = headers
container_name = utils.PhysName('test_stack', 'test_resource')
stack = utils.parse_stack(self.t)
# Test
container = self._create_container(stack)
# Verify Attributes
self.assertEqual(container_name, container.FnGetRefId())
self.assertEqual('82', container.FnGetAtt('ObjectCount'))
self.assertEqual('17680980', container.FnGetAtt('BytesUsed'))
self.assertEqual('server.test', container.FnGetAtt('DomainName'))
self.assertEqual(headers, container.FnGetAtt('HeadContainer'))
self.assertEqual(headers, container.FnGetAtt('show'))
expected_url = 'http://server.test:5000/v3/%s' % container.FnGetRefId()
self.assertEqual(expected_url, container.FnGetAtt('WebsiteURL'))
self.assertRaises(exception.InvalidTemplateAttribute,
container.FnGetAtt, 'Foo')
# Verify Expected Calls
mock_put.assert_called_once_with(container_name, {})
self.assertTrue(mock_head.call_count > 0)
@mock.patch('swiftclient.client.Connection.put_container')
def test_public_read(self, mock_put):
# Setup
container_name = utils.PhysName('test_stack', 'test_resource')
properties = self.t['Resources']['SwiftContainer']['Properties']
properties['X-Container-Read'] = '.r:*'
stack = utils.parse_stack(self.t)
# Test
self._create_container(stack)
# Verify
expected = {'X-Container-Read': '.r:*'}
mock_put.assert_called_once_with(container_name, expected)
@mock.patch('swiftclient.client.Connection.put_container')
def test_public_read_write(self, mock_put):
# Setup
container_name = utils.PhysName('test_stack', 'test_resource')
properties = self.t['Resources']['SwiftContainer']['Properties']
properties['X-Container-Read'] = '.r:*'
properties['X-Container-Write'] = '.r:*'
stack = utils.parse_stack(self.t)
# Test
self._create_container(stack)
# Verify
expected = {'X-Container-Write': '.r:*', 'X-Container-Read': '.r:*'}
mock_put.assert_called_once_with(container_name, expected)
@mock.patch('swiftclient.client.Connection.put_container')
def test_container_headers(self, mock_put):
# Setup
container_name = utils.PhysName('test_stack', 'test_resource')
stack = utils.parse_stack(self.t)
# Test
self._create_container(stack,
definition_name='SwiftContainerWebsite')
# Verify
expected = {'X-Container-Meta-Web-Error': 'error.html',
'X-Container-Meta-Web-Index': 'index.html',
'X-Container-Read': '.r:*'}
mock_put.assert_called_once_with(container_name, expected)
@mock.patch('swiftclient.client.Connection.post_account')
@mock.patch('swiftclient.client.Connection.put_container')
def test_account_headers(self, mock_put, mock_post):
# Setup
container_name = utils.PhysName('test_stack', 'test_resource')
stack = utils.parse_stack(self.t)
# Test
self._create_container(stack,
definition_name='SwiftAccountMetadata')
# Verify
mock_put.assert_called_once_with(container_name, {})
expected = {'X-Account-Meta-Temp-Url-Key': 'secret'}
mock_post.assert_called_once_with(expected)
@mock.patch('swiftclient.client.Connection.put_container')
def test_default_headers_not_none_empty_string(self, mock_put):
# Setup
container_name = utils.PhysName('test_stack', 'test_resource')
stack = utils.parse_stack(self.t)
# Test
container = self._create_container(stack)
# Verify
mock_put.assert_called_once_with(container_name, {})
self.assertEqual({}, container.metadata_get())
@mock.patch('swiftclient.client.Connection.delete_container')
@mock.patch('swiftclient.client.Connection.get_container')
@mock.patch('swiftclient.client.Connection.put_container')
def test_delete_exception(self, mock_put, mock_get, mock_delete):
# Setup
container_name = utils.PhysName('test_stack', 'test_resource')
stack = utils.parse_stack(self.t)
mock_delete.side_effect = sc.ClientException('test-delete-failure')
mock_get.return_value = ({'name': container_name}, [])
# Test
container = self._create_container(stack)
runner = scheduler.TaskRunner(container.delete)
self.assertRaises(exception.ResourceFailure, runner)
# Verify
self.assertEqual((container.DELETE, container.FAILED),
container.state)
mock_put.assert_called_once_with(container_name, {})
mock_get.assert_called_once_with(container_name)
mock_delete.assert_called_once_with(container_name)
@mock.patch('swiftclient.client.Connection.delete_container')
@mock.patch('swiftclient.client.Connection.get_container')
@mock.patch('swiftclient.client.Connection.put_container')
def test_delete_not_found(self, mock_put, mock_get, mock_delete):
# Setup
container_name = utils.PhysName('test_stack', 'test_resource')
stack = utils.parse_stack(self.t)
mock_delete.side_effect = sc.ClientException('missing',
http_status=404)
mock_get.return_value = ({'name': container_name}, [])
# Test
container = self._create_container(stack)
runner = scheduler.TaskRunner(container.delete)
runner()
# Verify
self.assertEqual((container.DELETE, container.COMPLETE),
container.state)
mock_put.assert_called_once_with(container_name, {})
mock_get.assert_called_once_with(container_name)
mock_delete.assert_called_once_with(container_name)
@mock.patch('swiftclient.client.Connection.get_container')
@mock.patch('swiftclient.client.Connection.put_container')
def test_delete_non_empty_not_allowed(self, mock_put, mock_get):
# Setup
container_name = utils.PhysName('test_stack', 'test_resource')
stack = utils.parse_stack(self.t)
mock_get.return_value = ({'name': container_name},
[{'name': 'test_object'}])
# Test
container = self._create_container(stack)
runner = scheduler.TaskRunner(container.delete)
ex = self.assertRaises(exception.ResourceFailure, runner)
# Verify
self.assertEqual((container.DELETE, container.FAILED),
container.state)
self.assertIn('ResourceActionNotSupported: resources.test_resource: '
'Deleting non-empty container',
six.text_type(ex))
mock_put.assert_called_once_with(container_name, {})
mock_get.assert_called_once_with(container_name)
@mock.patch('swiftclient.client.Connection.delete_container')
@mock.patch('swiftclient.client.Connection.delete_object')
@mock.patch('swiftclient.client.Connection.get_container')
@mock.patch('swiftclient.client.Connection.put_container')
def test_delete_non_empty_allowed(self, mock_put, mock_get,
mock_delete_object,
mock_delete_container):
# Setup
container_name = utils.PhysName('test_stack', 'test_resource')
self.t['Resources']['SwiftContainer']['Properties']['PurgeOnDelete'] \
= True
stack = utils.parse_stack(self.t)
get_return_values = [
({'name': container_name},
[{'name': 'test_object1'},
{'name': 'test_object2'}]),
({'name': container_name}, [{'name': 'test_object1'}]),
]
mock_get.side_effect = get_return_values
# Test
container = self._create_container(stack)
runner = scheduler.TaskRunner(container.delete)
runner()
# Verify
self.assertEqual((container.DELETE, container.COMPLETE),
container.state)
mock_put.assert_called_once_with(container_name, {})
mock_delete_container.assert_called_once_with(container_name)
self.assertEqual(2, mock_get.call_count)
self.assertEqual(2, mock_delete_object.call_count)
@mock.patch('swiftclient.client.Connection.delete_container')
@mock.patch('swiftclient.client.Connection.delete_object')
@mock.patch('swiftclient.client.Connection.get_container')
@mock.patch('swiftclient.client.Connection.put_container')
def test_delete_non_empty_allowed_not_found(self, mock_put, mock_get,
mock_delete_object,
mock_delete_container):
# Setup
container_name = utils.PhysName('test_stack', 'test_resource')
self.t['Resources']['SwiftContainer']['Properties']['PurgeOnDelete'] \
= True
stack = utils.parse_stack(self.t)
mock_get.return_value = ({'name': container_name},
[{'name': 'test_object'}])
mock_delete_object.side_effect =\
sc.ClientException('object-is-gone', http_status=404)
mock_delete_container.side_effect =\
sc.ClientException('container-is-gone', http_status=404)
# Test
container = self._create_container(stack)
runner = scheduler.TaskRunner(container.delete)
runner()
# Verify
self.assertEqual((container.DELETE, container.COMPLETE),
container.state)
mock_put.assert_called_once_with(container_name, {})
mock_get.assert_called_once_with(container_name)
mock_delete_object.assert_called_once_with(container_name,
'test_object')
mock_delete_container.assert_called_once_with(container_name)
@mock.patch('swiftclient.client.Connection.delete_object')
@mock.patch('swiftclient.client.Connection.get_container')
@mock.patch('swiftclient.client.Connection.put_container')
def test_delete_non_empty_fails_delete_object(self, mock_put, mock_get,
mock_delete_object):
# Setup
container_name = utils.PhysName('test_stack', 'test_resource')
self.t['Resources']['SwiftContainer']['Properties']['PurgeOnDelete'] \
= True
stack = utils.parse_stack(self.t)
mock_get.return_value = ({'name': container_name},
[{'name': 'test_object'}])
mock_delete_object.side_effect =\
sc.ClientException('object-delete-failure')
# Test
container = self._create_container(stack)
runner = scheduler.TaskRunner(container.delete)
self.assertRaises(exception.ResourceFailure, runner)
# Verify
self.assertEqual((container.DELETE, container.FAILED),
container.state)
mock_put.assert_called_once_with(container_name, {})
mock_get.assert_called_once_with(container_name)
mock_delete_object.assert_called_once_with(container_name,
'test_object')
@mock.patch('swiftclient.client.Connection.put_container')
def test_delete_retain(self, mock_put):
# Setup
container_name = utils.PhysName('test_stack', 'test_resource')
self.t['Resources']['SwiftContainer']['DeletionPolicy'] = 'Retain'
stack = utils.parse_stack(self.t)
# Test
container = self._create_container(stack)
runner = scheduler.TaskRunner(container.delete)
runner()
# Verify
self.assertEqual((container.DELETE, container.COMPLETE),
container.state)
mock_put.assert_called_once_with(container_name, {})
@mock.patch('swiftclient.client.Connection.get_container')
@mock.patch('swiftclient.client.Connection.put_container')
def test_check(self, mock_put, mock_get):
# Setup
self.t['Resources']['SwiftContainer']['Properties']['PurgeOnDelete'] \
= True
stack = utils.parse_stack(self.t)
# Test
container = self._create_container(stack)
runner = scheduler.TaskRunner(container.check)
runner()
self.assertEqual((container.CHECK, container.COMPLETE),
container.state)
@mock.patch('swiftclient.client.Connection.get_container')
@mock.patch('swiftclient.client.Connection.put_container')
def test_check_fail(self, mock_put, mock_get):
# Setup
self.t['Resources']['SwiftContainer']['Properties']['PurgeOnDelete'] \
= True
stack = utils.parse_stack(self.t)
mock_get.side_effect = Exception('boom')
# Test
container = self._create_container(stack)
runner = scheduler.TaskRunner(container.check)
ex = self.assertRaises(exception.ResourceFailure, runner)
# Verify
self.assertIn('boom', six.text_type(ex))
self.assertEqual((container.CHECK, container.FAILED),
container.state)
def test_refid(self):
stack = utils.parse_stack(self.t)
rsrc = stack['SwiftContainer']
rsrc.resource_id = 'xyz'
self.assertEqual('xyz', rsrc.FnGetRefId())
def test_refid_convergence_cache_data(self):
cache_data = {'SwiftContainer': {
'uuid': mock.ANY,
'id': mock.ANY,
'action': 'CREATE',
'status': 'COMPLETE',
'reference_id': 'xyz_convg'
}}
stack = utils.parse_stack(self.t, cache_data=cache_data)
rsrc = stack['SwiftContainer']
self.assertEqual('xyz_convg', rsrc.FnGetRefId())
|
|
"""
The fields module defines various field classes all of which are derived from
BaseField.
Field Methods
~~~~~~~~~~~~~
.. automethod:: BaseField.validate(raw_data, **kwargs)
.. automethod:: BaseField.deserialize(raw_data, **kwargs)
.. automethod:: BaseField.serialize(py_data, **kwargs)
"""
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
import datetime
import logging
import re
from six import string_types
from .utils import CommonEqualityMixin
from .iso8601 import ParseError, parse, parse_time, parse_date
logger = logging.getLogger(__name__)
class ValidationException(Exception):
"""
Serves as custom exception for all field validations.
"""
def __init__(self, msg, value):
super(ValidationException, self).__init__(self, msg, repr(value))
self._msg = msg
self._value = value
def __str__(self):
return '%s: %s, value:%r' % (self.__class__.__name__, self._msg,
self._value)
@property
def msg(self):
return self._msg
class BaseField(CommonEqualityMixin):
"""Base class for all field types.
The ``source`` parameter sets the key that will be retrieved from the
source data. If ``source`` is not specified, the field instance will use
its own name as the key to retrieve the value from the source data.
The ``serial_format`` parameter controls the serialization format, e.g. in
DateTimeField etc.
A default value can be assigned through the ``default`` parameter.
:param bool kwargs['required']: indicates required field
:param str kwargs['default']: default value, used when raw_data is None
:param str kwargs['serial_format']: format string for serialization and \
deserialization
:param str kwargs['source']: field name for serialized version
"""
serial_format = None
_name_space = None
def __init__(self, **kwargs):
self.source = kwargs.get('source')
self.default = kwargs.get('default')
self.serial_format = kwargs.get('serial_format', self.serial_format)
self._name_space = kwargs.get('name_space', self._name_space)
self.isAttribute = False
self.required = kwargs.get('required', False)
def __str__(self):
return self.__class__.__name__
def __repr__(self):
return self.__str__()
def get_source(self, key, name_spaces=None, default_prefix=''):
"""Generates the dictionary key for the serialized representation
based on the instance variable source and a provided key.
:param str key: name of the field in model
:returns: self.source or key
"""
source = self.source or key
prefix = default_prefix
if name_spaces and self.name_space and self.name_space in name_spaces:
prefix = ''.join([name_spaces[self.name_space], ':'])
return ''.join([prefix, source])
def validate(self, raw_data, **kwargs):
"""The validate method validates raw_data against the field .
:param raw_data: raw data for field
:type raw_data: str or other valid formats
:returns: validated_data
:raises ValidationException: if self.required and raw_data is None
"""
return raw_data
def deserialize(self, raw_data, **kwargs):
return self.validate(raw_data, **kwargs)
def serialize(self, py_data, **kwargs):
return self.validate(py_data, **kwargs)
@property
def name_space(self):
return self._name_space
class AttributeField(BaseField):
"""Wrapper to describes a XML attribute. Adds prefix '@' to the result of
get_source if source does not already start with '@'. The '@' prefix
identifies attribute fields."""
field_instance = None
def __init__(self, field_instance, **kwargs):
super(AttributeField, self).__init__(**kwargs)
self.isAttribute = True
self.default = field_instance.default
self.source = field_instance.source
self._name_space = field_instance.name_space
self.messages = field_instance.messages
self.messages['required'] = 'Required attribute field has no data.'
self.field_instance = field_instance
def __setattr__(self, key, value):
if self.field_instance:
setattr(self.field_instance, key, value)
else:
self.__dict__[key] = value
def get_source(self, key, name_spaces=None, default_prefix=''):
source_key = self.field_instance.source or key
source = super(AttributeField, self).get_source(
source_key, name_spaces, default_prefix)
if source[0] == '@':
return source
return ''.join(['@', source])
def validate(self, raw_data, **kwargs):
if raw_data is None:
if self.field_instance.required:
raise ValidationException(self.messages['required'],
self.__str__())
else:
return self.field_instance.validate(raw_data, **kwargs)
def deserialize(self, raw_data, **kwargs):
return self.field_instance.deserialize(raw_data, **kwargs)
def serialize(self, py_data, **kwargs):
return self.field_instance.serialize(py_data, **kwargs)
class RequiredAttribute(AttributeField):
"""Wrapper to describe a required XML attribute."""
def __init__(self, field_instance, **kwargs):
super(RequiredAttribute, self).__init__(field_instance, **kwargs)
self.required = True
class CharField(BaseField):
"""Field to represent a simple Unicode string value.
.. doctest::
>>> from xmodels import CharField
>>> char_field = CharField()
>>> char_field.validate(' valid unicode string!\\n')
'valid unicode string!'
"""
# >>> CharField().validate(42)
# Traceback (most recent call last):
# ...
# ValidationException: ValidationException: Expecting a string, value:42
# >>> CharField(minLength=8).validate('0123')
# Traceback (most recent call last):
# ...
# ValidationException: ValidationException: \
# Expecting string longer than 8 characters, value:'0123'
# >>> CharField(maxLength=8).validate('0123456789')
# Traceback (most recent call last):
# ...
# ValidationException: ValidationException: \
# Expecting string shorter than 8 characters, value:'0123456789'
strip = True
minLength = None
maxLength = None
messages = dict(
invalid='Expecting a string',
tooShort='Expecting string longer than %d characters',
tooLong='Expecting string shorter than %d characters',
)
def __init__(self, **kwargs):
super(CharField, self).__init__(**kwargs)
self.strip = kwargs.get('strip', self.strip)
self.minLength = kwargs.get('minLength', self.minLength)
self.maxLength = kwargs.get('maxLength', self.maxLength)
def validate(self, raw_data, **kwargs):
super(CharField, self).validate(raw_data, **kwargs)
if not isinstance(raw_data, string_types):
raise ValidationException(self.messages['invalid'], raw_data)
stripped = raw_data.strip() if self.strip else raw_data
if self.minLength is not None:
if len(stripped) < self.minLength:
raise ValidationException(self.messages['tooShort']
% self.minLength, stripped)
if self.maxLength is not None:
if len(stripped) > self.maxLength:
raise ValidationException(self.messages['tooLong']
% self.maxLength, stripped)
return stripped
class RegexField(CharField):
"""Field to represent unicode strings matching a regular expression.
It raises ValidationException if there is no match.
:param regex: regular expression to match.
"""
regex = r''
messages = dict(
no_match='The input does not match the regex')
def __init__(self, **kwargs):
super(RegexField, self).__init__(**kwargs)
self.regex = kwargs.get('regex', self.regex)
self.messages.update(CharField.messages)
def validate(self, raw_data, **kwargs):
validated_string = super(RegexField, self).validate(raw_data, **kwargs)
regex = re.compile(self.regex)
if not regex.search(validated_string):
raise ValidationException(self.messages['no_match'], raw_data)
return validated_string
class Token(CharField):
"""CharField for xsd:token.
Tokens are strings without leading and trailing whitespaces. All other
whitespaces are collapsed.
"""
messages = dict(
whitespace="""Whitespaces should be collapsed in a token."""
)
def __init__(self, **kwargs):
super(Token, self).__init__(**kwargs)
self.messages.update(CharField.messages)
def validate(self, raw_data, **kwargs):
string_value = super(Token, self).validate(raw_data, **kwargs)
if ' '.join(string_value.split()) != string_value.strip():
raise ValidationException(self.messages['whitespace'], raw_data)
return string_value
class Name(RegexField):
"""Field for xsd:name.
Values of this type must start with a letter, underscore (_), or colon (:),
and may contain only letters, digits, underscores (_), colons (:), hyphens
(-), and periods (.). Colons should only be used to separate namespace
prefixes from local names.
.. doctest::
>>> from xmodels import Name
>>> name_field = Name()
>>> name_field.validate('valid_name')
'valid_name'
"""
# >>> name_field.validate('illegal!')
# Traceback (most recent call last):
# ...
# ValidationException: ValidationException: A name needs to begin with a
# letter, colon (:), or underscore (_) and shall only contain letters,
# numbers, and the colon (:), underscore (_), dash (-), and dot (.)
# characters. Only one colon (:) total., value:'illegal!'
regex = r'^[a-zA-Z:_][\w:_\-\.]*$'
messages = dict(
no_match="""A name needs to begin with a letter, colon (:), or
underscore (_) and shall only contain letters, numbers, and the colon (:),
underscore (_), dash (-), and dot (.) characters. Only one colon (:) total.""",
colons="There should only be ONE colon."
)
def validate(self, raw_data, **kwargs):
validated_string = super(Name, self).validate(raw_data, **kwargs)
if validated_string.count(':') > 1:
raise ValidationException(self.messages['colons'], raw_data)
return validated_string
class NCName(RegexField):
"""Field for xsd:ncname.
The type NCName represents an XML non-colonized name, which is simply a
name that does not contain colons. An NCName must start with either a
letter or underscore (_) and may contain only letters, digits, underscores
(_), hyphens (-), and periods (.). This is identical to the Name type,
except that colons are not permitted.
"""
regex = r'^[a-zA-Z_][\w_\-\.]*$'
messages = dict(
no_match="""A name needs to begin with a letter, or underscore (_) and
shall only contain letters, numbers, and the underscore (_), dash (-), and dot
(.) characters."""
)
class Language(RegexField):
"""Field for xsd:language.
The type language represents a natural language identifier, generally used
to indicate the language of a document or a part of a document. Before
creating a new attribute of type language, consider using the xml:lang
attribute that is intended to indicate the natural language of the element
and its content. Values of the language type conform to RFC 3066, Tags
for the Identification of Languages, in version 1.0 and to RFC 4646, Tags
for Identifying Languages, and RFC 4647, Matching of Language Tags, in
version 1.1. The three most common formats are: For ISO-recognized
languages, the format is a two- or three-letter (usually lowercase)
language code that conforms to ISO 639, optionally followed by a hyphen
and a two-letter, usually uppercase, country code that conforms to
ISO 3166. For example, en or en-US. For languages registered by the
Internet Assigned Numbers Authority (IANA), the format is i-langname,
where langname is the registered name. For example, i-navajo.
For unofficial languages, the format is x-langname, where langname is a
name of up to eight characters agreed upon by the two parties sharing the
document. For example, x-Newspeak. Any of these three formats may have
additional parts, each preceded by a hyphen, which identify more countries
or dialects. Schema processors will not verify that values of the language
type conform to the above rules. They will simply deserialize them based on
the pattern specified for this type, which says that it must consist of
one or more parts of up to eight characters each, separated by hyphens.
"""
regex = r'^([a-zA-Z]{1,8})(-[a-zA-Z]{1,8})*$'
messages = dict(
no_match="""A language identifier consists of parts of one to eight
letters separated by a dash (-)."""
)
class NMTOKEN(RegexField):
"""Field for xsd:NMTOKEN.
The type NMTOKEN represents a single string token. NMTOKEN values may
consist of letters, digits, periods (.), hyphens (-), underscores (_), and
colons (:). They may start with any of these characters. NMTOKEN has a
whitespace facet value of collapse, so any leading or trailing whitespace
will be removed. However, no whitespace may appear within the value itself.
"""
regex = r'^[\w:_\-\.]+$'
messages = dict(
no_match='A nmtoken shall only contain letters, numbers,\
and the colon (:), underscore (_), dash (-), and dot (.) characters.')
class RangeField(BaseField):
"""
Base class for IntegerField and FloatField.
:param int/float kwargs['min']: indicates minimum allow value (inclusive).
:param int/float kwargs['max']: indicates maximum allow value (inclusive).
"""
min = None
max = None
messages = dict(
tooSmall='Expecting value greater than %d',
tooLarge='Expecting value less than %d',
)
def __init__(self, **kwargs):
super(RangeField, self).__init__(**kwargs)
self.max = kwargs.get('max', self.max)
self.min = kwargs.get('min', self.min)
def validate(self, raw_data, **kwargs):
super(RangeField, self).validate(raw_data, **kwargs)
if self.min is not None:
if raw_data < self.min:
raise ValidationException(self.messages['tooSmall']
% self.min, raw_data)
if self.max is not None:
if raw_data > self.max:
raise ValidationException(self.messages['tooLarge']
% self.max, raw_data)
return raw_data
class IntegerField(RangeField):
"""Field to represent an integer value."""
messages = dict(
invalid="Could not convert to int:"
)
def __init__(self, **kwargs):
super(IntegerField, self).__init__(**kwargs)
self.messages.update(RangeField.messages)
def validate(self, raw_data, **kwargs):
"""Convert the raw_data to an integer.
"""
try:
converted_data = int(raw_data)
return super(IntegerField, self).validate(converted_data)
except ValueError:
raise ValidationException(self.messages['invalid'], repr(raw_data))
class NonNegativeInteger(IntegerField):
"""
Field to represent a non negative integer value.
"""
min = 0
class PositiveInteger(IntegerField):
"""
Field to represent a positive integer value.
"""
min = 1
class NegativeInteger(IntegerField):
"""
Field to represent a negative integer value.
"""
max = -1
class FloatField(RangeField):
"""Field to represent a floating point value. The serial_format uses the
standard string format notation with the surrounding curly brackets."""
messages = dict(
invalid="Could not convert to float:",
format="Could not convert float to string with format %(format)s.",
)
def __init__(self, **kwargs):
super(FloatField, self).__init__(**kwargs)
self.messages.update(RangeField.messages)
def validate(self, raw_data, **kwargs):
"""Convert the raw_data to a float.
"""
try:
converted_data = float(raw_data)
super(FloatField, self).validate(converted_data, **kwargs)
return raw_data
except ValueError:
raise ValidationException(self.messages['invalid'], repr(raw_data))
def deserialize(self, raw_data, **kwargs):
valid_data = super(FloatField, self).deserialize(raw_data, **kwargs)
return float(valid_data)
def serialize(self, py_data, **kwargs):
super(FloatField, self).serialize(py_data, **kwargs)
if self.serial_format:
try:
return self.serial_format.format(py_data)
except (KeyError, ValueError):
msg = self.messages['format'] % dict(format=self.serial_format)
raise ValidationException(msg, py_data)
return str(py_data)
class NonNegativeFloat(FloatField):
"""
Field to represent a non negative floating point value.
"""
min = 0
class BooleanField(BaseField):
"""Field to represent a boolean. The string ``'True'`` (case insensitive)
will be converted to ``True``, as will any positive integers and the
boolean value ``True``.
.. doctest::
>>> from xmodels import BooleanField
>>> BooleanField().validate('TRUE')
True
>>> BooleanField().validate('not true!')
False
>>> BooleanField().validate(42)
True
>>> BooleanField().validate(-3)
False
>>> BooleanField().validate(True)
True
"""
def validate(self, raw_data, **kwargs):
"""The string ``'True'`` (case insensitive) will be converted
to ``True``, as will any positive integers.
"""
super(BooleanField, self).validate(raw_data, **kwargs)
if isinstance(raw_data, string_types):
valid_data = raw_data.strip().lower() == 'true'
elif isinstance(raw_data, bool):
valid_data = raw_data
else:
valid_data = raw_data > 0
return valid_data
def serialize(self, py_data, **kwargs):
super(BooleanField, self).serialize(py_data, **kwargs)
if py_data:
return 'true'
return 'false'
class EnumField(CharField):
"""
Tests that the value is one of the members of a given list (options). There
can be no empty strings in options. value has to be a string.
If matchLower is True it will also compare value.lower() with the lower
case version of all strings in options.
"""
options = []
matchLower = True
messages = dict(
invalid='Invalid value',
notIn='Value must be one of: %(items)s (not %(value)r)')
def __init__(self, **kwargs):
super(EnumField, self).__init__(**kwargs)
self.options = kwargs.get('options', self.options)
self.messages.update(CharField.messages)
assert isinstance(self.options, list), \
'options need to be a list of strings.'
all_members_strings = True
for item in self.options:
all_members_strings = (all_members_strings and
isinstance(item, string_types))
assert all_members_strings, 'options need to be a list of strings.'
self.lookup = None
self.lookup_lower = None
def validate(self, raw_data, **kwargs):
string_value = super(EnumField, self).validate(raw_data, **kwargs)
if not self.lookup:
self.lookup = set(item for item in self.options)
if not self.lookup_lower:
self.lookup_lower = dict((item.lower(), item)
for item in self.options)
if string_value in self.lookup:
return string_value
lower_case_value = string_value.lower()
if lower_case_value in self.lookup_lower:
correct_value = self.lookup_lower[lower_case_value]
self._raw = correct_value
return correct_value
raise ValidationException(self.messages['notIn'] % dict(
items=self._options_str, value=raw_data), raw_data)
@property
def _options_str(self):
return '; '.join(map(str, self.options))
class DateTimeField(BaseField):
"""Field to represent a datetime
The ``format`` parameter dictates the format of the input strings, and is
used in the construction of the :class:`datetime.datetime` object.
The ``serial_format`` parameter is a strftime formatted string for
serialization. If ``serial_format`` isn't specified, an ISO formatted
string will be returned by :meth:`~xmodels.DateTimeField.to_serial`.
"""
messages = dict(
parse='%(cls)s Error Parsing %(data)s with format %(format)s'
)
def __init__(self, **kwargs):
super(DateTimeField, self).__init__(**kwargs)
self.converted = None
def validate(self, raw_data, **kwargs):
"""The raw_data is returned unchanged."""
super(DateTimeField, self).validate(raw_data, **kwargs)
try:
if isinstance(raw_data, datetime.datetime):
self.converted = raw_data
elif self.serial_format is None:
# parse as iso8601
self.converted = parse(raw_data)
else:
self.converted = datetime.datetime.strptime(raw_data,
self.serial_format)
return raw_data
except (ParseError, ValueError) as e:
msg = self.messages['parse'] % dict(cls=self.__class__.__name__,
data=raw_data,
format=self.serial_format)
raise ValidationException(msg, raw_data)
def deserialize(self, raw_data, **kwargs):
"""A :class:`datetime.datetime` object is returned."""
super(DateTimeField, self).deserialize(raw_data, **kwargs)
return self.converted
def serialize(self, py_data, **kwargs):
time_obj = self.deserialize(py_data, **kwargs)
if not self.serial_format:
return time_obj.isoformat()
return time_obj.strftime(self.serial_format)
class DateField(DateTimeField):
"""Field to represent a :mod:`datetime.date`"""
def validate(self, raw_data, **kwargs):
try:
if isinstance(raw_data, datetime.datetime):
valid_data = raw_data.date()
elif isinstance(raw_data, datetime.date):
valid_data = raw_data
elif self.serial_format is None:
# parse as iso8601
valid_data = parse_date(raw_data).date()
else:
valid_data = datetime.datetime.strptime(
raw_data, self.serial_format).date()
self.converted = valid_data
return raw_data
except (ParseError, ValueError) as e:
msg = self.messages['parse'] % dict(cls=self.__class__.__name__,
data=raw_data,
format=self.serial_format)
raise ValidationException(msg, raw_data)
class TimeField(DateTimeField):
"""Field to represent a :mod:`datetime.time`"""
def validate(self, raw_data, **kwargs):
try:
if isinstance(raw_data, datetime.datetime):
valid_data = raw_data.time()
elif isinstance(raw_data, datetime.time):
valid_data = raw_data
elif self.serial_format is None:
# parse as iso8601
valid_data = parse_time(raw_data).time()
else:
valid_data = datetime.datetime.strptime(
raw_data, self.serial_format).time()
self.converted = valid_data
return raw_data
except (ParseError, ValueError) as e:
msg = self.messages['parse'] % dict(cls=self.__class__.__name__,
data=raw_data,
format=self.serial_format)
raise ValidationException(msg, raw_data)
class WrappedObjectField(BaseField):
"""Superclass for any fields that wrap an object"""
def __init__(self, wrapped_class, **kwargs):
self._wrapped_class = wrapped_class
self.accept_none = kwargs.get('accept_none', False)
super(WrappedObjectField, self).__init__(**kwargs)
def __str__(self):
return ''.join([self.__class__.__name__, ': ',
self._wrapped_class.__name__])
def populate(self, raw_data, **kwargs):
if isinstance(raw_data, self._wrapped_class):
obj = raw_data
else:
obj = self._wrapped_class()
if isinstance(raw_data, (dict, OrderedDict)):
obj.populate(raw_data, **kwargs)
elif raw_data is not None:
obj.populate({'#text': raw_data}, **kwargs)
return obj
def validate(self, raw_data, **kwargs):
super(WrappedObjectField, self).validate(raw_data, **kwargs)
obj = self.populate(raw_data, **kwargs)
obj.validate(**kwargs)
return obj
def deserialize(self, raw_data, **kwargs):
obj = super(WrappedObjectField, self).deserialize(raw_data, **kwargs)
return obj.deserialize(**kwargs)
def serialize(self, py_data, **kwargs):
return py_data.serialize(**kwargs)
@property
def name_space(self):
meta = getattr(self._wrapped_class, '_meta', None)
if meta:
return getattr(meta, 'name_space', None)
class ModelField(WrappedObjectField):
"""Field containing a model instance
Use this field when you wish to nest one object inside another.
It takes a single required argument, which is the nested class.
For example, given the following dictionary::
some_data = {
'first_item': 'Some value',
'second_item': {
'nested_item': 'Some nested value',
},
}
You could build the following classes
(note that you have to define the inner nested models first)::
class MyNestedModel(xmodels.Model):
nested_item = xmodels.CharField()
class MyMainModel(xmodels.Model):
first_item = xmodels.CharField()
second_item = xmodels.ModelField(MyNestedModel)
"""
def __init__(self, wrapped_class, **kwargs):
super(ModelField, self).__init__(wrapped_class, **kwargs)
self._model_instance = None
def validate(self, raw_data, **kwargs):
kwargs.update(instance_index=None)
return super(ModelField, self).validate(raw_data, **kwargs)
class ModelCollectionField(WrappedObjectField):
"""Field containing a list of model instances.
Use this field when your source data dictionary contains a list of
dictionaries. It takes a single required argument, which is the name of the
nested class that each item in the list should be converted to.
For example::
some_data = {
'list': [
{'value': 'First value'},
{'value': 'Second value'},
{'value': 'Third value'},
]
}
class MyNestedModel(xmodels.Model):
value = xmodels.CharField()
class MyMainModel(xmodels.Model):
list = xmodels.ModelCollectionField(MyNestedModel)
"""
def __init__(self, wrapped_class, **kwargs):
super(ModelCollectionField, self).__init__(wrapped_class, **kwargs)
def populate(self, raw_data, **kwargs):
if not isinstance(raw_data, list):
raw_data = [raw_data]
result = []
for index, item in enumerate(raw_data):
path = kwargs.get('path', '<inst>') + '[%d]' % index
kwargs_copy = dict((key, value) for key, value in kwargs.items())
kwargs_copy.update(path=path)
obj = super(ModelCollectionField, self).populate(item,
**kwargs_copy)
result.append(obj)
return result
def validate(self, raw_data, **kwargs):
objects = self.populate(raw_data, **kwargs)
result = []
for index, item in enumerate(objects):
kwargs.update(instance_index=index)
item.validate(**kwargs)
result.append(item)
return result
def deserialize(self, raw_data, **kwargs):
objects = self.validate(raw_data, **kwargs)
return [obj.deserialize(**kwargs) for obj in objects]
def serialize(self, py_data, **kwargs):
objects = self.validate(py_data, **kwargs)
return [obj.serialize(**kwargs) for obj in objects]
class FieldCollectionField(BaseField):
"""Field containing a list of the same type of fields.
The constructor takes an instance of the field.
Here are some examples::
data = {
'legal_name': 'John Smith',
'aliases': ['Larry', 'Mo', 'Curly']
}
class Person(Model):
legal_name = CharField()
aliases = FieldCollectionField(CharField())
p = Person(data)
And now a quick REPL session::
FIXME doctest
Here is a bit more complicated example involving args and kwargs::
data = {
'name': 'San Andreas',
'dates': ['1906-05-11', '1948-11-02', '1970-01-01']
}
class FaultLine(Model):
name = CharField()
earthquake_dates = FieldCollectionField(DateField('%Y-%m-%d',
serial_format='%m-%d-%Y'),
source='dates')
f = FaultLine(data)
Notice that source is passed to to the
:class:`~xmodels.FieldCollectionField`, not the
:class:`~xmodels.DateField`.
Let's check out the resulting :class:`~xmodels.Model` instance with the
"""
def __init__(self, field_instance, **kwargs):
super(FieldCollectionField, self).__init__(**kwargs)
if not isinstance(field_instance, BaseField):
raise TypeError('Field instance of type BaseField expected.')
self._instance = field_instance
def __str__(self):
return '%s(%s)' % (self.__class__.__name__,
self._instance.__class__.__name__)
def validate(self, raw_data, **kwargs):
if not isinstance(raw_data, list):
raw_data = [raw_data]
result = []
for item in raw_data:
result.append(self._instance.validate(item))
return result
def deserialize(self, raw_data, **kwargs):
items = self.validate(raw_data, **kwargs)
result = []
for item in items:
result.append(self._instance.deserialize(item))
return result
def serialize(self, py_data, **kwargs):
if not isinstance(py_data, list):
py_data = [py_data]
result = []
for item in py_data:
result.append(self._instance.serialize(item))
return result
|
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/python
import os
import sys
import random
import subprocess as sub
import getopt
def identity(x):
return x
def cygpath(x):
command = ["cygpath", "-wp", x]
p = sub.Popen(command,stdout=sub.PIPE)
output, errors = p.communicate()
lines = output.split("\n")
return lines[0]
if sys.platform == "cygwin":
normclasspath = cygpath
else:
normclasspath = identity
CLIENT_CONF_FILE = ""
JSTORM_DIR = "/".join(os.path.realpath( __file__ ).split("/")[:-2])
JSTORM_CONF_DIR = os.getenv("JSTORM_CONF_DIR", JSTORM_DIR + "/conf" )
LOGBACK_CONF = JSTORM_CONF_DIR + "/jstorm.logback.xml"
CONFIG_OPTS = []
EXCLUDE_JARS = []
INCLUDE_JARS = []
STATUS = 0
def check_java():
check_java_cmd = 'which java'
ret = os.system(check_java_cmd)
if ret != 0:
print("Failed to find java, please add java to PATH")
sys.exit(-1)
def get_config_opts():
global CONFIG_OPTS
return "-Dstorm.options=" + (','.join(CONFIG_OPTS)).replace(' ', "%%%%")
def get_client_childopts():
ret = (" -Dstorm.root.logger=INFO,stdout -Dlogback.configurationFile=" + JSTORM_DIR +
"/conf/client_logback.xml -Dlog4j.configuration=File:" + JSTORM_DIR +
"/conf/client_log4j.properties")
if CLIENT_CONF_FILE != "":
ret += (" -Dstorm.conf.file=" + CLIENT_CONF_FILE)
return ret
def get_server_childopts(log_name):
ret = (" -Dlogfile.name=%s -Dlogback.configurationFile=%s" %(log_name, LOGBACK_CONF))
return ret
if not os.path.exists(JSTORM_DIR + "/RELEASE"):
print "******************************************"
print "The jstorm client can only be run from within a release. You appear to be trying to run the client from a checkout of JStorm's source code."
print "\nYou can download a JStorm release "
print "******************************************"
sys.exit(1)
def get_jars_full(adir):
files = os.listdir(adir)
ret = []
for f in files:
if f.endswith(".jar") == False:
continue
filter = False
for exclude_jar in EXCLUDE_JARS:
if f.find(exclude_jar) >= 0:
filter = True
break
if filter == True:
print "Don't add " + f + " to classpath"
else:
ret.append(adir + "/" + f)
return ret
def get_classpath(extrajars):
ret = []
ret.extend(extrajars)
ret.extend(get_jars_full(JSTORM_DIR))
ret.extend(get_jars_full(JSTORM_DIR + "/lib"))
ret.extend(INCLUDE_JARS)
return normclasspath(":".join(ret))
def confvalue(name, extrapaths):
command = [
"java", "-client", "-Xms256m", "-Xmx256m", get_config_opts(), "-cp", get_classpath(extrapaths), "backtype.storm.command.config_value", name
]
p = sub.Popen(command, stdout=sub.PIPE)
output, errors = p.communicate()
lines = output.split("\n")
for line in lines:
tokens = line.split(" ")
if tokens[0] == "VALUE:":
return " ".join(tokens[1:])
print "Failed to get config " + name
print errors
print output
def print_localconfvalue(name):
"""Syntax: [jstorm localconfvalue conf-name]
Prints out the value for conf-name in the local JStorm configs.
The local JStorm configs are the ones in ~/.jstorm/storm.yaml merged
in with the configs in defaults.yaml.
"""
print name + ": " + confvalue(name, [JSTORM_CONF_DIR])
def print_remoteconfvalue(name):
"""Syntax: [jstorm remoteconfvalue conf-name]
Prints out the value for conf-name in the cluster's JStorm configs.
The cluster's JStorm configs are the ones in $STORM-PATH/conf/storm.yaml
merged in with the configs in defaults.yaml.
This command must be run on a cluster machine.
"""
print name + ": " + confvalue(name, [JSTORM_CONF_DIR])
def exec_storm_class(klass, jvmtype="-server", childopts="", extrajars=[], args=[]):
nativepath = confvalue("java.library.path", extrajars)
args_str = " ".join(map(lambda s: "\"" + s + "\"", args))
command = "java " + jvmtype + " -Djstorm.home=" + JSTORM_DIR + " " + get_config_opts() + " -Djava.library.path=" + nativepath + " " + childopts + " -cp " + get_classpath(extrajars) + " " + klass + " " + args_str
print "Running: " + command
global STATUS
STATUS = os.system(command)
def jar(jarfile, klass, *args):
"""Syntax: [jstorm jar topology-jar-path class ...]
Runs the main method of class with the specified arguments.
The jstorm jars and configs in $JSTORM_CONF_DIR/storm.yaml are put on the classpath.
The process is configured so that StormSubmitter
(https://github.com/alibaba/jstorm/wiki/JStorm-Chinese-Documentation)
will upload the jar at topology-jar-path when the topology is submitted.
"""
childopts = "-Dstorm.jar=" + jarfile + get_client_childopts()
exec_storm_class(
klass,
jvmtype="-client -Xms256m -Xmx256m",
extrajars=[jarfile, JSTORM_CONF_DIR, JSTORM_DIR + "/bin", CLIENT_CONF_FILE],
args=args,
childopts=childopts)
def zktool(*args):
"""Syntax: [jstorm jar topology-jar-path class ...]
Runs the main method of class with the specified arguments.
The jstorm jars and configs in ~/.jstorm are put on the classpath.
The process is configured so that StormSubmitter
(https://github.com/alibaba/jstorm/wiki/JStorm-Chinese-Documentation)
will upload the jar at topology-jar-path when the topology is submitted.
"""
childopts = get_client_childopts()
exec_storm_class(
"com.alibaba.jstorm.zk.ZkTool",
jvmtype="-client -Xms256m -Xmx256m",
extrajars=[ JSTORM_CONF_DIR, CLIENT_CONF_FILE],
args=args,
childopts=childopts)
def kill(*args):
"""Syntax: [jstorm kill topology-name [wait-time-secs]]
Kills the topology with the name topology-name. JStorm will
first deactivate the topology's spouts for the duration of
the topology's message timeout to allow all messages currently
being processed to finish processing. JStorm will then shutdown
the workers and clean up their state. You can override the length
of time JStorm waits between deactivation and shutdown.
"""
childopts = get_client_childopts()
exec_storm_class(
"backtype.storm.command.kill_topology",
args=args,
jvmtype="-client -Xms256m -Xmx256m",
extrajars=[JSTORM_CONF_DIR, JSTORM_DIR + "/bin", CLIENT_CONF_FILE],
childopts=childopts)
def activate(*args):
"""Syntax: [jstorm activate topology-name]
Activates the specified topology's spouts.
"""
childopts = get_client_childopts()
exec_storm_class(
"backtype.storm.command.activate",
args=args,
jvmtype="-client -Xms256m -Xmx256m",
extrajars=[JSTORM_CONF_DIR, JSTORM_DIR + "/bin", CLIENT_CONF_FILE],
childopts=childopts)
def deactivate(*args):
"""Syntax: [jstorm deactivate topology-name]
Deactivates the specified topology's spouts.
"""
childopts = get_client_childopts()
exec_storm_class(
"backtype.storm.command.deactivate",
args=args,
jvmtype="-client -Xms256m -Xmx256m",
extrajars=[JSTORM_CONF_DIR, JSTORM_DIR + "/bin", CLIENT_CONF_FILE],
childopts=childopts)
def rebalance(*args):
"""Syntax: [jstorm rebalance topology-name [-w wait-time-secs]]
Sometimes you may wish to spread out where the workers for a topology
are running. For example, let's say you have a 10 node cluster running
4 workers per node, and then let's say you add another 10 nodes to
the cluster. You may wish to have JStorm spread out the workers for the
running topology so that each node runs 2 workers. One way to do this
is to kill the topology and resubmit it, but JStorm provides a "rebalance"
command that provides an easier way to do this.
Rebalance will first deactivate the topology for the duration of the
message timeout and then redistribute
the workers evenly around the cluster. The topology will then return to
its previous state of activation (so a deactivated topology will still
be deactivated and an activated topology will go back to being activated).
"""
childopts = get_client_childopts()
exec_storm_class(
"backtype.storm.command.rebalance",
args=args,
jvmtype="-client -Xms256m -Xmx256m",
extrajars=[JSTORM_CONF_DIR, JSTORM_DIR + "/bin", CLIENT_CONF_FILE],
childopts=childopts)
def restart(*args):
"""Syntax: [jstorm restart topology-name [conf]]
"""
childopts = get_client_childopts()
exec_storm_class(
"backtype.storm.command.restart",
args=args,
jvmtype="-client -Xms256m -Xmx256m",
extrajars=[JSTORM_CONF_DIR, JSTORM_DIR + "/bin", CLIENT_CONF_FILE],
childopts=childopts)
def update_config(*args):
"""Syntax: [jstorm restart topology-name [conf]]
"""
childopts = get_client_childopts()
exec_storm_class(
"backtype.storm.command.update_config",
args=args,
jvmtype="-client -Xms256m -Xmx256m",
extrajars=[JSTORM_CONF_DIR, JSTORM_DIR + "/bin", CLIENT_CONF_FILE],
childopts=childopts)
def nimbus():
"""Syntax: [jstorm nimbus]
Launches the nimbus daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a JStorm cluster for more information.
(https://github.com/alibaba/jstorm/wiki/JStorm-Chinese-Documentation)
"""
cppaths = [JSTORM_CONF_DIR]
nimbus_classpath = confvalue("nimbus.classpath", cppaths)
childopts = confvalue("nimbus.childopts", cppaths) + get_server_childopts("nimbus.log")
exec_storm_class(
"com.alibaba.jstorm.daemon.nimbus.NimbusServer",
jvmtype="-server",
extrajars=(cppaths+[nimbus_classpath]),
childopts=childopts)
def supervisor():
"""Syntax: [jstorm supervisor]
Launches the supervisor daemon. This command should be run
under supervision with a tool like daemontools or monit.
See Setting up a JStorm cluster for more information.
(https://github.com/alibaba/jstorm/wiki/JStorm-Chinese-Documentation)
"""
cppaths = [JSTORM_CONF_DIR]
childopts = confvalue("supervisor.childopts", cppaths) + get_server_childopts("supervisor.log")
exec_storm_class(
"com.alibaba.jstorm.daemon.supervisor.Supervisor",
jvmtype="-server",
extrajars=cppaths,
childopts=childopts)
def drpc():
"""Syntax: [jstorm drpc]
Launches a DRPC daemon. This command should be run under supervision
with a tool like daemontools or monit.
See Distributed RPC for more information.
(https://github.com/alibaba/jstorm/wiki/JStorm-Chinese-Documentation)
"""
cppaths = [JSTORM_CONF_DIR]
childopts = confvalue("drpc.childopts", cppaths) + get_server_childopts("drpc.log")
exec_storm_class(
"com.alibaba.jstorm.drpc.Drpc",
jvmtype="-server",
extrajars=cppaths,
childopts=childopts)
def print_classpath():
"""Syntax: [jstorm classpath]
Prints the classpath used by the jstorm client when running commands.
"""
print get_classpath([])
def print_commands():
"""Print all client commands and link to documentation"""
print "jstorm command [--config client_storm.yaml] [--exclude-jars exclude1.jar,exclude2.jar] [-c key1=value1,key2=value2][command parameter]"
print "Commands:\n\t", "\n\t".join(sorted(COMMANDS.keys()))
print "\n\t[--config client_storm.yaml]\t\t\t optional, setting client's storm.yaml"
print "\n\t[--exclude-jars exclude1.jar,exclude2.jar]\t optional, exclude jars, avoid jar conflict"
print "\n\t[-c key1=value1,key2=value2]\t\t\t optional, add key=value pair to configuration"
print "\nHelp:", "\n\thelp", "\n\thelp <command>"
print "\nDocumentation for the jstorm client can be found at https://github.com/alibaba/jstorm/wiki/JStorm-Chinese-Documentation\n"
def print_usage(command=None):
"""Print one help message or list of available commands"""
if command != None:
if COMMANDS.has_key(command):
print (COMMANDS[command].__doc__ or
"No documentation provided for <%s>" % command)
else:
print "<%s> is not a valid command" % command
else:
print_commands()
def unknown_command(*args):
print "Unknown command: [jstorm %s]" % ' '.join(sys.argv[1:])
print_usage()
def metrics_Monitor(*args):
"""Syntax: [jstorm metricsMonitor topologyname bool]
Enable or disable the metrics monitor of one topology.
"""
childopts = get_client_childopts()
exec_storm_class(
"backtype.storm.command.metrics_monitor",
args=args,
jvmtype="-client -Xms256m -Xmx256m",
extrajars=[JSTORM_CONF_DIR, JSTORM_DIR + "/bin", CLIENT_CONF_FILE],
childopts=childopts)
def list(*args):
"""Syntax: [jstorm list]
List cluster information
"""
childopts = get_client_childopts()
exec_storm_class(
"backtype.storm.command.list",
args=args,
jvmtype="-client -Xms256m -Xmx256m",
extrajars=[JSTORM_CONF_DIR, JSTORM_DIR + "/bin", CLIENT_CONF_FILE],
childopts=childopts)
COMMANDS = {"jar": jar, "kill": kill, "nimbus": nimbus, "zktool": zktool,
"drpc": drpc, "supervisor": supervisor, "localconfvalue": print_localconfvalue,
"remoteconfvalue": print_remoteconfvalue, "classpath": print_classpath,
"activate": activate, "deactivate": deactivate, "rebalance": rebalance, "help": print_usage,
"metricsMonitor": metrics_Monitor, "list": list, "restart": restart, "update_config": update_config}
def parse_config(config_list):
global CONFIG_OPTS
if len(config_list) > 0:
for config in config_list:
CONFIG_OPTS.append(config)
def parse_exclude_jars(jars):
global EXCLUDE_JARS
EXCLUDE_JARS = jars.split(",")
print " Excludes jars:"
print EXCLUDE_JARS
def parse_include_jars(jars):
global INCLUDE_JARS
INCLUDE_JARS = jars.split(",")
print " Include jars:"
print INCLUDE_JARS
def parse_config_opts(args):
curr = args[:]
curr.reverse()
config_list = []
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "-c":
config_list.append(curr.pop())
elif token == "--config":
global CLIENT_CONF_FILE
CLIENT_CONF_FILE = curr.pop()
elif token == "--exclude-jars":
parse_exclude_jars(curr.pop())
elif token == "--include-jars":
parse_include_jars(curr.pop())
else:
args_list.append(token)
return config_list, args_list
def main():
if len(sys.argv) <= 1:
print_usage()
sys.exit(-1)
global CONFIG_OPTS
config_list, args = parse_config_opts(sys.argv[1:])
parse_config(config_list)
COMMAND = args[0]
ARGS = args[1:]
if COMMANDS.get(COMMAND) == None:
unknown_command(COMMAND)
sys.exit(-1)
if len(ARGS) != 0 and ARGS[0] == "help":
print_usage(COMMAND)
sys.exit(0)
try:
(COMMANDS.get(COMMAND, "help"))(*ARGS)
except Exception, msg:
print(msg)
print_usage(COMMAND)
sys.exit(-1)
sys.exit(STATUS)
if __name__ == "__main__":
check_java()
main()
|
|
# -*- coding: utf-8 -*-
# Copyright 2015 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
felix.test.test_fiptables
~~~~~~~~~~~~~~~~~~~~~~~~~
Tests of iptables handling function.
"""
from collections import defaultdict
import copy
import logging
import re
from mock import patch, call, Mock, ANY
from calico.felix import fiptables
from calico.felix.fiptables import IptablesUpdater
from calico.felix.futils import FailedSystemCall
from calico.felix.test.base import BaseTestCase
_log = logging.getLogger(__name__)
EXTRACT_UNREF_TESTS = [
("""Chain INPUT (policy DROP)
target prot opt source destination
felix-INPUT all -- anywhere anywhere
ACCEPT tcp -- anywhere anywhere tcp dpt:domain
Chain FORWARD (policy DROP)
target prot opt source destination
felix-FORWARD all -- anywhere anywhere
ufw-track-forward all -- anywhere anywhere
Chain DOCKER (1 references)
target prot opt source destination
Chain felix-FORWARD (1 references)
target prot opt source destination
felix-FROM-ENDPOINT all -- anywhere anywhere
felix-TO-ENDPOINT all -- anywhere anywhere
Chain-with-bad-name all -- anywhere anywhere
ACCEPT all -- anywhere anywhere
Chain felix-temp (0 references)
target prot opt source destination
felix-FROM-ENDPOINT all -- anywhere anywhere
ACCEPT all -- anywhere anywhere
""",
set(["felix-temp"])),
]
MISSING_CHAIN_DROP = '--append %s --jump DROP -m comment --comment "WARNING Missing chain DROP:"'
class TestIptablesUpdater(BaseTestCase):
def setUp(self):
super(TestIptablesUpdater, self).setUp()
self.stub = IptablesStub("filter")
self.m_config = Mock()
self.m_config.REFRESH_INTERVAL = 0 # disable refresh thread
self.ipt = IptablesUpdater("filter", self.m_config, 4)
self.ipt._execute_iptables = self.stub.apply_iptables_restore
self.check_output_patch = patch("gevent.subprocess.check_output",
autospec=True)
self.m_check_output = self.check_output_patch.start()
self.m_check_output.side_effect = self.fake_check_output
def fake_check_output(self, cmd, *args, **kwargs):
_log.info("Stubbing out call to %s", cmd)
if cmd == ["iptables-save", "--table", "filter"]:
return self.stub.generate_iptables_save()
elif cmd == ['iptables', '--wait', '--list', '--table', 'filter']:
return self.stub.generate_iptables_list()
else:
raise AssertionError("Unexpected call %r" % cmd)
def tearDown(self):
self.check_output_patch.stop()
super(TestIptablesUpdater, self).tearDown()
def test_rewrite_chains_stub(self):
"""
Tests that referencing a chain causes it to get stubbed out.
"""
self.ipt.rewrite_chains(
{"foo": ["--append foo --jump bar"]},
{"foo": set(["bar"])},
async=True,
)
self.step_actor(self.ipt)
self.assertEqual(self.stub.chains_contents,
{"foo": ["--append foo --jump bar"],
'bar': [MISSING_CHAIN_DROP % "bar"]})
def test_rewrite_chains_cover(self):
"""
Hits remaining code paths in rewrite chains.
"""
cb = Mock()
self.ipt.rewrite_chains(
{"foo": ["--append foo --jump bar"]},
{"foo": set(["bar"])},
suppress_upd_log=True,
async=True,
callback=cb,
)
self.step_actor(self.ipt)
cb.assert_called_once_with(None)
def test_delete_required_chain_stub(self):
"""
Tests that deleting a required chain stubs it out instead.
"""
# Exit the graceful restart period, during which we do not stub out
# chains.
self.ipt.cleanup(async=True)
# Install a couple of chains. foo depends on bar.
self.ipt.rewrite_chains(
{"foo": ["--append foo --jump bar"],
"bar": ["--append bar --jump ACCEPT"]},
{"foo": set(["bar"]),
"bar": set()},
async=True,
)
self.step_actor(self.ipt)
# Both chains should be programmed as normal.
self.assertEqual(self.stub.chains_contents,
{"foo": ["--append foo --jump bar"],
'bar': ["--append bar --jump ACCEPT"] })
# Deleting bar should stub it out instead.
self.ipt.delete_chains(["bar"], async=True)
self.step_actor(self.ipt)
self.assertEqual(self.stub.chains_contents,
{"foo": ["--append foo --jump bar"],
'bar': [MISSING_CHAIN_DROP % "bar"] })
def test_cleanup_with_dependencies(self):
# Set up the dataplane with some chains that the IptablesUpdater
# doesn't know about and some that it will know about.
self.stub.apply_iptables_restore("""
*filter
:INPUT DROP [10:505]
:FORWARD DROP [0:0]
:OUTPUT ACCEPT [40:1600]
# These non-felix chains should be ignored
:ignore-me -
:ignore-me-too -
# These are left-over felix chains. Some depend on each other. They
# can only be cleaned up in the correct order.
:felix-foo - [0:0]
:felix-bar -
:felix-foo -
:felix-baz -
:felix-biff -
--append felix-foo --src 10.0.0.1/32 --jump felix-bar
# baz depends on biff; cleanup needs to detect that.
--append felix-baz --src 10.0.0.2/32 --jump felix-biff
--append felix-biff --src 10.0.0.3/32 --jump DROP
--append ignore-me --jump ignore-me-too
--append ignore-me-too --jump DROP
""".splitlines())
# IptablesUpdater hears about some chains before the cleanup. These
# partially overlap with the ones that are already there.
self.ipt.rewrite_chains(
{"felix-foo": ["--append felix-foo --jump felix-bar",
"--append felix-foo --jump felix-baz",
"--append felix-foo --jump felix-boff"],
"felix-bar": ["--append felix-bar --jump ACCEPT"]},
# felix-foo depends on:
# * a new chain that's also being programmed
# * a pre-existing chain that is present at start of day
# * a new chain that isn't present at all.
{"felix-foo": set(["felix-bar", "felix-baz", "felix-boff"]),
"felix-bar": set()},
async=True,
)
self.step_actor(self.ipt)
# Dataplane should now have all the new chains in place, including
# a stub for felix-boff. However, the old chains should not have been
# cleaned up.
self.stub.assert_chain_contents({
"INPUT": [],
"FORWARD": [],
"OUTPUT": [],
"ignore-me": ["--append ignore-me --jump ignore-me-too"],
"ignore-me-too": ["--append ignore-me-too --jump DROP"],
"felix-foo": ["--append felix-foo --jump felix-bar",
"--append felix-foo --jump felix-baz",
"--append felix-foo --jump felix-boff"],
"felix-bar": ["--append felix-bar --jump ACCEPT"],
"felix-baz": ["--append felix-baz --src 10.0.0.2/32 "
"--jump felix-biff"],
"felix-boff": [MISSING_CHAIN_DROP % "felix-boff"],
"felix-biff": ["--append felix-biff --src 10.0.0.3/32 --jump DROP"],
})
# Issue the cleanup.
self.ipt.cleanup(async=True)
self.step_actor(self.ipt)
# Should now have stubbed-out chains for all the ones that are not
# programmed.
self.stub.assert_chain_contents({
# Non felix chains ignored:
"INPUT": [],
"FORWARD": [],
"OUTPUT": [],
"ignore-me": ["--append ignore-me --jump ignore-me-too"],
"ignore-me-too": ["--append ignore-me-too --jump DROP"],
# Explicitly-programmed chains programmed.
"felix-foo": ["--append felix-foo --jump felix-bar",
"--append felix-foo --jump felix-baz",
"--append felix-foo --jump felix-boff"],
"felix-bar": ["--append felix-bar --jump ACCEPT"],
# All required but unknown chains stubbed.
"felix-baz": [MISSING_CHAIN_DROP % "felix-baz"],
"felix-boff": [MISSING_CHAIN_DROP % "felix-boff"],
# felix-biff deleted, even though it was referenced by felix-baz
# before.
})
def test_cleanup_bad_read_back(self):
# IptablesUpdater hears about some chains before the cleanup.
self.ipt.rewrite_chains(
{"felix-foo": ["--append felix-foo --jump felix-boff"]},
{"felix-foo": set(["felix-boff"])},
async=True,
)
self.step_actor(self.ipt)
self.stub.assert_chain_contents({
"felix-foo": ["--append felix-foo --jump felix-boff"],
"felix-boff": [MISSING_CHAIN_DROP % "felix-boff"],
})
# Some other process then breaks our chains.
self.stub.chains_contents = {}
self.stub.iptables_save_output = [
None, # Start of cleanup.
# End of cleanup. Out of sync:
"*filter\n"
":INPUT DROP [68:4885]\n"
":FORWARD DROP [0:0]\n"
":OUTPUT ACCEPT [20:888]\n"
":DOCKER - [0:0]\n"
"-A INPUT -i lxcbr0 -p tcp -m tcp --dport 53 -j ACCEPT\n"
"-A FORWARD -o lxcbr0 -j ACCEPT\n"
"COMMIT\n"
]
_log.info("Forcing iptables-save to always return %s",
self.stub.iptables_save_output)
# Issue the cleanup.
with patch.object(fiptables._log, "error") as m_error:
self.ipt.cleanup(async=True)
self.step_actor(self.ipt)
m_error.assert_called_once_with(
ANY,
set([]),
set([]),
set(["felix-foo", "felix-boff"])
)
self.stub.assert_chain_contents({
"felix-foo": ["--append felix-foo --jump felix-boff"],
"felix-boff": [MISSING_CHAIN_DROP % "felix-boff"],
})
def test_ensure_rule_inserted(self):
fragment = "FOO --jump DROP"
with patch.object(self.ipt, "_execute_iptables") as m_exec:
m_exec.side_effect = iter([FailedSystemCall("Message", [], 1, "",
"line 2 failed"),
None,
None])
self.ipt.ensure_rule_inserted(fragment, async=True)
self.step_actor(self.ipt)
self.assertEqual(
m_exec.mock_calls,
[
call(["*filter",
"--delete FOO --jump DROP",
"--insert FOO --jump DROP",
"COMMIT"],
fail_log_level=logging.DEBUG),
call(["*filter",
"--insert FOO --jump DROP",
"COMMIT"]),
])
self.assertTrue(fragment in self.ipt._inserted_rule_fragments)
def test_insert_remove_tracking(self):
fragment = "FOO --jump DROP"
with patch.object(self.ipt, "_execute_iptables") as m_exec:
m_exec.side_effect = [
# Insert.
None,
# Remove: requires an exception to terminate loop.
None,
FailedSystemCall("Message", [], 1, "", "line 2 failed"),
# Insert.
None,
]
self.ipt.ensure_rule_inserted(fragment, async=True)
self.step_actor(self.ipt)
self.assertTrue(fragment in self.ipt._inserted_rule_fragments)
self.assertTrue(fragment not in self.ipt._removed_rule_fragments)
self.ipt.ensure_rule_removed(fragment, async=True)
self.step_actor(self.ipt)
self.assertTrue(fragment not in self.ipt._inserted_rule_fragments)
self.assertTrue(fragment in self.ipt._removed_rule_fragments)
self.ipt.ensure_rule_inserted(fragment, async=True)
self.step_actor(self.ipt)
self.assertTrue(fragment in self.ipt._inserted_rule_fragments)
self.assertTrue(fragment not in self.ipt._removed_rule_fragments)
def test_ensure_rule_removed(self):
fragment = "FOO --jump DROP"
with patch.object(self.ipt, "_execute_iptables") as m_exec:
m_exec.side_effect = iter([None,
FailedSystemCall("Message", [], 1, "",
"line 2 failed")])
self.ipt.ensure_rule_removed(fragment, async=True)
self.step_actor(self.ipt)
exp_call = call([
'*filter',
'--delete FOO --jump DROP',
'COMMIT',
], fail_log_level=logging.DEBUG)
self.assertEqual(m_exec.mock_calls, [exp_call] * 2)
def test_ensure_rule_removed_not_present(self):
with patch.object(self.ipt, "_execute_iptables") as m_exec:
m_exec.side_effect = iter([FailedSystemCall("Message", [], 1, "",
"line 2 failed")])
self.ipt.ensure_rule_removed("FOO --jump DROP", async=True)
self.step_actor(self.ipt)
exp_call = call([
'*filter',
'--delete FOO --jump DROP',
'COMMIT',
], fail_log_level=logging.DEBUG)
self.assertEqual(m_exec.mock_calls, [exp_call])
def test_ensure_rule_removed_missing_dep(self):
with patch.object(self.ipt, "_execute_iptables") as m_exec:
m_exec.side_effect = iter([
FailedSystemCall("Message", [], 1, "",
"at line: 2\n"
"ipset doesn't exist")])
self.ipt.ensure_rule_removed("FOO --jump DROP", async=True)
self.step_actor(self.ipt)
exp_call = call([
'*filter',
'--delete FOO --jump DROP',
'COMMIT',
], fail_log_level=logging.DEBUG)
self.assertEqual(m_exec.mock_calls, [exp_call])
def test_ensure_rule_removed_error(self):
with patch.object(self.ipt, "_execute_iptables") as m_exec:
m_exec.side_effect = iter([FailedSystemCall("Message", [], 1, "",
"the foo is barred")])
f = self.ipt.ensure_rule_removed("FOO --jump DROP", async=True)
self.step_actor(self.ipt)
self.assertRaises(FailedSystemCall, f.get)
exp_call = call([
'*filter',
'--delete FOO --jump DROP',
'COMMIT',
], fail_log_level=logging.DEBUG)
self.assertEqual(m_exec.mock_calls, [exp_call])
def test_refresh_iptables(self):
self.ipt.ensure_rule_inserted("INPUT -j ACCEPT", async=True)
self.ipt.ensure_rule_inserted("INPUT -j DROP", async=True)
self.ipt.ensure_rule_removed("INPUT -j DROP", async=True)
self.step_actor(self.ipt)
self.ipt.refresh_iptables(async=True)
with patch.object(self.ipt, "_insert_rule") as m_insert_rule:
with patch.object(self.ipt, "_remove_rule") as m_remove_rule:
self.step_actor(self.ipt)
m_insert_rule.assert_called_once_with("INPUT -j ACCEPT",
log_level=logging.DEBUG)
m_remove_rule.assert_called_once_with("INPUT -j DROP",
log_level=logging.DEBUG)
class TestIptablesStub(BaseTestCase):
"""
Tests of our dummy iptables "stub". It's sufficiently complex
that giving it a few tests of its own adds a lot of confidence to
the tests that really rely on it.
"""
def setUp(self):
super(TestIptablesStub, self).setUp()
self.stub = IptablesStub("filter")
def test_gen_ipt_save(self):
self.stub.chains_contents = {
"foo": ["--append foo"]
}
self.assertEqual(
self.stub.generate_iptables_save(),
"*filter\n"
":foo - [0:0]\n"
"--append foo\n"
"COMMIT"
)
def test_gen_ipt_list(self):
self.stub.apply_iptables_restore("""
*filter
:foo - [0:0]
:bar -
--append foo --src 10.0.0.8/32 --jump bar
--append bar --jump DROP
""".splitlines())
self.assertEqual(
self.stub.generate_iptables_list(),
"Chain bar (1 references)\n"
"target prot opt source destination\n"
"DROP dummy -- anywhere anywhere\n"
"\n"
"Chain foo (0 references)\n"
"target prot opt source destination\n"
"bar dummy -- anywhere anywhere\n"
)
class TestUtilityFunctions(BaseTestCase):
def test_extract_unreffed_chains(self):
for inp, exp in EXTRACT_UNREF_TESTS:
output = fiptables._extract_our_unreffed_chains(inp)
self.assertEqual(exp, output, "Expected\n\n%s\n\nTo parse as: %s\n"
"but got: %s" % (inp, exp, output))
class IptablesStub(object):
"""
Fake version of the dataplane, accepts iptables-restore input and
stores it off. Can generate dummy versions of the corresponding
iptables-save and iptables --list output.
"""
def __init__(self, table):
self.table = table
self.chains_contents = defaultdict(list)
self.chain_dependencies = defaultdict(set)
self.new_contents = None
self.new_dependencies = None
self.declared_chains = None
self.iptables_save_output = []
def generate_iptables_save(self):
if self.iptables_save_output:
output = self.iptables_save_output.pop(0)
if output:
_log.debug("Forcing iptables-save output")
return output
lines = ["*" + self.table]
for chain_name in sorted(self.chains_contents.keys()):
lines.append(":%s - [0:0]" % chain_name)
for _, chain_content in sorted(self.chains_contents.items()):
lines.extend(chain_content)
lines.append("COMMIT")
return "\n".join(lines)
def generate_iptables_list(self):
_log.debug("Generating iptables --list for chains %s\n%s",
self.chains_contents, self.chain_dependencies)
chunks = []
for chain, entries in sorted(self.chains_contents.items()):
num_refs = 0
for deps in self.chain_dependencies.values():
if chain in deps:
num_refs += 1
chain_lines = [
"Chain %s (%s references)" % (chain, num_refs),
"target prot opt source destination"]
for rule in entries:
m = re.search(r'(?:--jump|-j|--goto|-g)\s+(\S+)', rule)
assert m, "Failed to generate listing for %r" % rule
action = m.group(1)
chain_lines.append(action + " dummy -- anywhere anywhere")
chunks.append("\n".join(chain_lines))
return "\n\n".join(chunks) + "\n"
def apply_iptables_restore(self, lines, **kwargs):
_log.debug("iptables-restore input:\n%s", "\n".join(lines))
table_name = None
self.new_contents = copy.deepcopy(self.chains_contents)
self.declared_chains = set()
self.new_dependencies = copy.deepcopy(self.chain_dependencies)
for line in lines:
line = line.strip()
if line.startswith("#") or not line:
continue
elif line.startswith("*"):
table_name = line[1:]
_log.debug("Processing table %s", table_name)
assert table_name == self.table
elif line.startswith(":"):
assert table_name, "Table should occur before chains."
splits = line[1:].split(" ")
_log.debug("Forward declaration %s, flushing chain", splits)
if len(splits) == 3:
chain_name, policy, counts = splits
if not re.match(r'\[\d+:\d+\]', counts):
raise AssertionError("Bad counts: %r" % line)
elif len(splits) == 2:
chain_name, policy = splits
else:
raise AssertionError(
"Invalid chain forward declaration line %r" % line)
if policy not in ("-", "DROP", "ACCEPT"):
raise AssertionError("Unexpected policy %r" % line)
self.declared_chains.add(chain_name)
self.new_contents[chain_name] = []
self.new_dependencies[chain_name] = set()
elif line.strip() == "COMMIT":
self._handle_commit()
else:
# Should be a rule fragment of some sort
assert table_name, "Table should occur before rules."
self._handle_rule(line)
# Implicit commit at end.
self._handle_commit()
def _handle_rule(self, rule):
splits = rule.split(" ")
ipt_op = splits[0]
chain = splits[1]
_log.debug("Rule op: %s, chain name: %s", ipt_op, chain)
if ipt_op in ("--append", "-A", "--insert", "-I"):
self.assert_chain_declared(chain, ipt_op)
if ipt_op in ("--append", "-A"):
self.new_contents[chain].append(rule)
else:
self.new_contents[chain].insert(0, rule)
m = re.search(r'(?:--jump|-j|--goto|-g)\s+(\S+)', rule)
if m:
action = m.group(1)
_log.debug("Action %s", action)
if action not in ("MARK", "ACCEPT", "DROP", "RETURN"):
# Assume a dependent chain.
self.new_dependencies[chain].add(action)
elif ipt_op in ("--delete-chain", "-X"):
self.assert_chain_declared(chain, ipt_op)
del self.new_contents[chain]
del self.new_dependencies[chain]
elif ipt_op in ("--flush", "-F"):
self.assert_chain_declared(chain, ipt_op)
self.new_contents[chain] = []
self.new_dependencies[chain] = set()
elif ipt_op in ("--delete", "-D"):
self.assert_chain_declared(chain, ipt_op)
for rule in self.new_contents.get(chain, []):
rule_fragment = " ".join(splits[1:])
if rule.endswith(rule_fragment):
self.new_contents[chain].remove(rule)
break
else:
raise FailedSystemCall("Delete for non-existent rule", [], 1,
"", "line 2 failed")
else:
raise AssertionError("Unknown operation %s" % ipt_op)
def assert_chain_declared(self, chain, ipt_op):
kernel_chains = set(["INPUT", "FORWARD", "OUTPUT"])
if chain not in self.declared_chains and chain not in kernel_chains:
raise AssertionError("%s to non-existent chain %s" %
(ipt_op, chain))
def _handle_commit(self):
for chain, deps in self.chain_dependencies.iteritems():
for dep in deps:
if dep not in self.new_contents:
raise AssertionError("Chain %s depends on %s but that "
"chain is not present" % (chain, dep))
self.chains_contents = self.new_contents
self.chain_dependencies = self.new_dependencies
def assert_chain_contents(self, expected):
differences = zip(sorted(self.chains_contents.items()),
sorted(expected.items()))
differences = ["%s != %s" % (p1, p2) for
(p1, p2) in differences
if p1 != p2]
if self.chains_contents != expected:
raise AssertionError("Differences:\n%s" % "\n".join(differences))
class TestTransaction(BaseTestCase):
def setUp(self):
super(TestTransaction, self).setUp()
self.txn = fiptables._Transaction(
{
"felix-a": [], "felix-b": [], "felix-c": []
},
defaultdict(set, {"felix-a": set(["felix-b", "felix-stub"])}),
defaultdict(set, {"felix-b": set(["felix-a"]),
"felix-stub": set(["felix-a"])}),
)
def test_rewrite_existing_chain_remove_stub_dependency(self):
"""
Test that a no-longer-required stub is deleted.
"""
self.txn.store_rewrite_chain("felix-a", ["foo"], set(["felix-b"]))
self.assertEqual(self.txn.affected_chains,
set(["felix-a", "felix-stub"]))
self.assertEqual(self.txn.chains_to_stub_out, set([]))
self.assertEqual(self.txn.chains_to_delete, set(["felix-stub"]))
self.assertEqual(self.txn.referenced_chains, set(["felix-b"]))
self.assertEqual(
self.txn.prog_chains,
{
"felix-a": ["foo"],
"felix-b": [],
"felix-c": []
})
self.assertEqual(self.txn.required_chns,
{"felix-a": set(["felix-b"])})
self.assertEqual(self.txn.requiring_chns,
{"felix-b": set(["felix-a"])})
def test_rewrite_existing_chain_remove_normal_dependency(self):
"""
Test that removing a dependency on an explicitly programmed chain
correctly updates the indices.
"""
self.txn.store_rewrite_chain("felix-a", ["foo"], set(["felix-stub"]))
self.assertEqual(self.txn.affected_chains, set(["felix-a"]))
self.assertEqual(self.txn.chains_to_stub_out, set([]))
self.assertEqual(self.txn.chains_to_delete, set([]))
self.assertEqual(self.txn.referenced_chains, set(["felix-stub"]))
self.assertEqual(
self.txn.prog_chains,
{
"felix-a": ["foo"],
"felix-b": [],
"felix-c": [],
})
self.assertEqual(self.txn.required_chns,
{"felix-a": set(["felix-stub"])})
self.assertEqual(self.txn.requiring_chns,
{"felix-stub": set(["felix-a"])})
def test_unrequired_chain_delete(self):
"""
Test that deleting an orphan chain triggers deletion and
updates the indices.
"""
self.txn.store_delete("felix-c")
self.assertEqual(self.txn.affected_chains, set(["felix-c"]))
self.assertEqual(self.txn.chains_to_stub_out, set([]))
self.assertEqual(self.txn.chains_to_delete, set(["felix-c"]))
self.assertEqual(self.txn.referenced_chains,
set(["felix-b", "felix-stub"]))
self.assertEqual(
self.txn.prog_chains,
{
"felix-a": [],
"felix-b": [],
})
self.assertEqual(self.txn.required_chns,
{"felix-a": set(["felix-b", "felix-stub"])})
self.assertEqual(self.txn.requiring_chns,
{"felix-b": set(["felix-a"]),
"felix-stub": set(["felix-a"])})
def test_required_deleted_chain_gets_stubbed(self):
"""
Test that deleting a chain that is still required results in it
being stubbed out.
"""
self.txn.store_delete("felix-b")
self.assertEqual(self.txn.affected_chains, set(["felix-b"]))
self.assertEqual(self.txn.chains_to_stub_out, set(["felix-b"]))
self.assertEqual(self.txn.chains_to_delete, set())
self.assertEqual(self.txn.referenced_chains,
set(["felix-b", "felix-stub"]))
self.assertEqual(
self.txn.prog_chains,
{
"felix-a": [],
"felix-c": [],
})
self.assertEqual(self.txn.required_chns,
{"felix-a": set(["felix-b", "felix-stub"])})
self.assertEqual(self.txn.requiring_chns,
{"felix-b": set(["felix-a"]),
"felix-stub": set(["felix-a"])})
def test_cache_invalidation(self):
self.assert_cache_dropped()
self.assert_properties_cached()
self.txn.store_delete("felix-a")
self.assert_cache_dropped()
def test_cache_invalidation_2(self):
self.assert_cache_dropped()
self.assert_properties_cached()
self.txn.store_rewrite_chain("felix-a", [], {})
self.assert_cache_dropped()
def assert_properties_cached(self):
self.assertEqual(self.txn.affected_chains, set())
self.assertEqual(self.txn.chains_to_stub_out, set())
self.assertEqual(self.txn.chains_to_delete, set())
self.assertEqual(self.txn._affected_chains, set())
self.assertEqual(self.txn._chains_to_stub, set())
self.assertEqual(self.txn._chains_to_delete, set())
def assert_cache_dropped(self):
self.assertEqual(self.txn._affected_chains, None)
self.assertEqual(self.txn._chains_to_stub, None)
self.assertEqual(self.txn._chains_to_delete, None)
|
|
import os
import time
import random
import threading
import socket
from TCAction import TCActionBase
from NativeLog import NativeLog
from NativeLog import ThroughputResult
from Utility import RSSICalibrator
from Utility import MakeFolder
LOG_FOLDER = os.path.join("Performance", "Throughput")
AP_PROP_KEY = ("ssid", "password", "apc")
class SendThread(threading.Thread):
def __init__(self, sock, send_len, target_addr):
threading.Thread.__init__(self)
self.setDaemon(True)
self.sock = sock
self.send_len = send_len
self.target_addr = target_addr
self.exit_event = threading.Event()
pass
def exit(self):
self.exit_event.set()
def run(self):
data = "A" * self.send_len
if self.sock is None:
return
while True:
if self.exit_event.isSet() is True:
break
try:
self.sock.sendto(data, self.target_addr)
except StandardError:
break
pass
class RecvThread(threading.Thread):
def __init__(self, sock):
threading.Thread.__init__(self)
self.setDaemon(True)
self.sock = sock
self.exit_event = threading.Event()
self.calc_event = threading.Event()
self.bytes_recv = 0
def start_calc(self):
self.calc_event.set()
def stop_calc(self):
self.calc_event.clear()
self.exit_event.set()
def run(self):
if self.sock is None:
return
while True:
if self.exit_event.isSet() is True:
break
try:
data, addr = self.sock.recvfrom(65535)
except StandardError:
break
if self.calc_event.isSet() is True:
self.bytes_recv += len(data)
def get_bytes_recv(self):
return self.bytes_recv
pass
class TestCase(TCActionBase.CommonTCActionBase):
def __init__(self, test_case, test_env, timeout=30, log_path=TCActionBase.LOG_PATH):
TCActionBase.CommonTCActionBase.__init__(self, test_case, test_env, timeout=timeout, log_path=log_path)
self.performance_folder_path = log_path
self.att_test_list = range(60)
# load param from excel
cmd_set = test_case["cmd set"]
for i in range(1, len(cmd_set)):
if cmd_set[i][0] != "dummy":
cmd_string = "self." + cmd_set[i][0]
exec cmd_string
self.result_cntx = TCActionBase.ResultCheckContext(self, test_env, self.tc_name)
pass
def execute(self):
TCActionBase.TCActionBase.execute(self)
self.result_cntx.start()
try:
# configurable params
ap_list = self.get_parameter("shield_box_ap_list")
pc_ip = self.get_parameter("pc_ip")
send_len = self.send_len
att_test_list = self.att_test_list
tx_enable = self.tx_enable
rx_enable = self.rx_enable
measure_period = self.measure_period
# configurable params
except StandardError, e:
NativeLog.add_trace_critical("Error configuration for TCPThroughput script, error is %s" % e)
raise StandardError("Error configuration")
udp_port = random.randint(40000, 50000)
# init throughput result data
test_item = ""
if tx_enable is True:
test_item += "Tx"
if rx_enable is True:
test_item += "Rx"
if test_item == "":
raise StandardError("no throughput test item")
folder_path = os.path.join(self.performance_folder_path, LOG_FOLDER)
file_name = os.path.join(folder_path,
"UDPThroughput_%s_%s" % (test_item, time.strftime("%d%H%M%S", time.localtime())))
result = ThroughputResult.ThroughputResult(file_name)
# restart before executing throughput
checker_stings = ["R SSC1 C !!!ready!!!"]
test_action_string = ["SSC SSC1 reboot"]
fail_string = "Fail, Fail to reboot"
if self.load_and_exe_one_step(checker_stings, test_action_string, fail_string) is False:
self.result_cntx.set_result("Fail")
return
# disable recv print during throughput test
checker_stings = ["R SSC1 C +RECVPRINT"]
test_action_string = ["SSC SSC1 soc -R -o 0"]
fail_string = "Fail, Fail to disable recv print"
if self.load_and_exe_one_step(checker_stings, test_action_string, fail_string) is False:
self.result_cntx.set_result("Fail")
return
ret = True
for ap_prop in ap_list:
ssid = ap_prop[0]
password = ap_prop[1]
apc = ap_prop[2]
if ap_prop[1] == "":
# set a default string for open ap
password = "1"
# switch off all outlet, switch on AP outlet
outlet_config_dict = dict.fromkeys(range(1, 9), "OFF")
outlet_config_dict[apc] = "ON"
apc_cmd = "APC <APC1>"
for outlet in outlet_config_dict:
apc_cmd += " %s %s" % (outlet_config_dict[outlet], outlet)
checker_stings = ["P PC_COM L OK"]
fail_string = "Fail, Fail to switch apc"
if self.load_and_exe_one_step(checker_stings, [apc_cmd], fail_string) is False:
ret = False
break
# wait AP ready
time.sleep(20)
# create server
udp_sock = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)
udp_sock.bind((pc_ip, udp_port))
udp_sock.settimeout(1)
if tx_enable is True:
result.add_test_item(ssid + "_tx")
if rx_enable is True:
result.add_test_item(ssid + "_rx")
# create RSSI Calibrator
calibrator = RSSICalibrator.Calibrator()
for att_value in att_test_list:
# step 0 set att value
checker_stings = ["R PC_COM L OK"]
test_action_string = ["ATT <att_port> %s" % att_value]
fail_string = "Fail, Fail to set att value"
if self.load_and_exe_one_step(checker_stings, test_action_string, fail_string) is False:
ret = False
break
# continue
# step 1 get AP RSSI
checker_stings = ["R SSC1 A <rssi>:\+SCAN:%s,[:\d\w]+,\d+,\d+,([-\d]+)\r" % ssid]
test_action_string = ["SSC SSC1 sta -S -s %s" % ssid]
fail_string = "Fail, Fail to scan"
rssi = scan_count = 0
for i in range(3):
if self.load_and_exe_one_step(checker_stings, test_action_string, fail_string) is False:
continue
rssi += int(self.test_env.get_variable_by_name("rssi")[1])
scan_count += 1
rssi = calibrator.calibrate_rssi(float(rssi)/scan_count if scan_count > 0 else 0, att_value)
# step 2 connect to AP
checker_stings = ["R SSC1 C +JAP:CONNECTED"]
test_action_string = ["SSC SSC1 sta -C -s %s -p %s" % (ssid, password)]
fail_string = "Fail, Fail to JAP"
if self.load_and_exe_one_step(checker_stings, test_action_string, fail_string,
check_freq=1, check_time=30) is False:
if rssi < -89:
continue
else:
ret = False
break
# continue
checker_stings = ["R SSC1 A <target_ip>:STAIP:(\d+\.\d+\.\d+\.\d+)"]
test_action_string = ["SSC SSC1 ip -Q"]
fail_string = "Fail, Fail to get ip"
if self.load_and_exe_one_step(checker_stings, test_action_string, fail_string,
check_freq=1, check_time=30) is False:
if rssi < -89:
continue
else:
ret = False
break
# continue
target_ip = self.get_parameter("target_ip")
# step 3 close all connections
checker_stings = ["R SSC1 C +CLOSEALL"]
test_action_string = ["SSC SSC1 soc -T"]
fail_string = "Fail, Fail to close socket"
if self.load_and_exe_one_step(checker_stings, test_action_string, fail_string) is False:
ret = False
break
# continue
# step 4 create UDP socket
checker_stings = ["R SSC1 A <client_sock>:\+BIND:(\d+),OK"]
test_action_string = ["SSC SSC1 soc -B -t UDP -i %s -p %s" % (target_ip, udp_port)]
fail_string = "Fail, Fail to bind"
if self.load_and_exe_one_step(checker_stings, test_action_string, fail_string) is False:
ret = False
break
# continue
# step 5 do throughput test
send_thread = SendThread(udp_sock if rx_enable is True else None,
send_len, (target_ip, udp_port))
send_thread.start()
recv_thread = RecvThread(udp_sock if tx_enable is True else None)
recv_thread.start()
if tx_enable is True:
# do send from target
test_action_string = ["SSC SSC1 soc -S -s <client_sock> -l %s -n 10000000 -i %s -p %s"
% (send_len, pc_ip, udp_port)]
fail_string = "Fail, Fail to send"
if self.load_and_exe_one_step([], test_action_string, fail_string) is False:
pass
# start throughput calculate
recv_thread.start_calc()
# sleep for measure period
time.sleep(measure_period)
# stop throughput calculate
recv_thread.stop_calc()
send_thread.exit()
send_thread.join()
recv_thread.join()
# output throughput result
# in Mbps
if rx_enable is True:
# get received data len from PC
self.load_and_exe_one_step(["R SSC1 A <recv_len>:RECVLEN:(\d+)"],
["SSC SSC1 soc -Q -s <client_sock> -o 1"],
"Fail, Fail to get recv data len")
try:
rx_data_len = int(self.get_parameter("recv_len"))
except StandardError:
rx_data_len = 0
result.log_throughput(ssid + "_rx", rssi, att_value,
float(rx_data_len * 8) / (measure_period * 1000000))
if recv_thread.get_bytes_recv() > 0:
result.log_throughput(ssid + "_tx", rssi, att_value,
float(recv_thread.get_bytes_recv() * 8) / (measure_period * 1000000))
result.output_to_file()
pass
udp_sock.close()
if not ret:
NativeLog.add_trace_critical("Test SUC for %s" % ssid)
elif ret:
NativeLog.add_trace_critical("Test FAIL for %s!!!" % ssid)
if ret:
self.result_cntx.set_result("Succeed")
else:
self.result_cntx.set_result("Fail")
# finally, execute done
def result_check(self, port_name, data):
TCActionBase.CommonTCActionBase.result_check(self, port_name, data)
self.result_cntx.append_data(port_name, data)
def main():
pass
if __name__ == '__main__':
main()
|
|
from yuuhpizzakebab import app, admin_required, login_required
from .models import Order
from yuuhpizzakebab.pizza.models import Pizza
from yuuhpizzakebab.kebab.models import Kebab
from yuuhpizzakebab.drink.models import Drink
from yuuhpizzakebab.user.database_functions import get_user_by_id
from flask import render_template, session, redirect, url_for, request, flash
import datetime
def get_pizzas_from_session():
"""Gets a list of pizzas saved in the user's session."""
pizzas = []
if session.get('selected_pizzas'):
for pizza_id in session.get('selected_pizzas'):
p = Pizza.get_by_id(pizza_id)
pizzas.append(p)
return pizzas
def get_kebabs_from_session():
"""Gets a list of kebabs saved in the user's session."""
kebabs = []
if session.get('selected_kebabs'):
for kebab_id in session['selected_kebabs']:
k = Kebab.get_by_id(kebab_id)
kebabs.append(k)
return kebabs
def get_drinks_from_session():
"""Gets a list of drinks saved in the user's session."""
drinks = []
if session.get('selected_drinks'):
for drink_id in session['selected_drinks']:
d = Drink.get_by_id(drink_id)
drinks.append(d)
return drinks
def get_total_price_of_items(pizzas, kebabs, drinks):
"""Calculates and returns the total price of items provided.
arguments:
pizzas - list of pizzas
kebabs - list of kebabs
drinks - list of drinks
"""
total = 0.0
for p in pizzas:
total += float(p.price_without_dollar_sign())
for k in kebabs:
total += float(k.price_without_dollar_sign())
for d in drinks:
total += float(d.price_without_dollar_sign())
return total
def get_delivery_address():
"""Returns the delivery address saved in the user's session."""
return session.get('delivery_address')
def clear_session():
"""Clears the user's session of any order related data."""
session.pop('selected_pizzas', None)
session.pop('selected_kebabs', None)
session.pop('selected_drinks', None)
session.pop('delivery_address', None)
@app.route('/new_order', methods=['GET'])
@login_required
def new_order():
"""Shows the active order."""
pizzas = get_pizzas_from_session()
kebabs = get_kebabs_from_session()
drinks = get_drinks_from_session()
total_price = get_total_price_of_items(pizzas, kebabs, drinks)
delivery_address = get_delivery_address()
return render_template('order/create_order.html',
pizzas=pizzas,
kebabs=kebabs,
drinks=drinks,
total_price=total_price,
delivery_address=delivery_address)
@app.route('/select/<string:item_type>', methods=['GET'])
@login_required
def select_item(item_type):
"""Redirects the user to select an item of specified type.
arguments:
item_type - type of the item (pizza, kebab or drink)
"""
if item_type not in ['pizza', 'kebab', 'drink']:
flash('Unknown item type', 'alert-warning')
return redirect(url_for('new_order'))
return redirect(url_for('list_{}s'.format(item_type), selecting=True))
@app.route('/select/pizza/<int:pizza_id>', methods=['GET'])
@login_required
def select_pizza(pizza_id):
"""Adds a selected pizza to the user's session.
arguments:
pizza_id - id of the pizza
"""
if not session.get('selected_pizzas'):
session['selected_pizzas'] = []
new_list = session['selected_pizzas']
new_list.append(pizza_id)
session['selected_pizzas'] = new_list
return redirect(url_for('new_order'))
@app.route('/select/kebab/<int:kebab_id>', methods=['GET'])
@login_required
def select_kebab(kebab_id):
"""Adds a selected kebab to the user's session.
arguments:
kebab_id - id of the kebab
"""
if not session.get('selected_kebabs'):
session['selected_kebabs'] = []
new_list = session['selected_kebabs']
new_list.append(kebab_id)
session['selected_kebabs'] = new_list
return redirect(url_for('new_order'))
@app.route('/select/drink/<int:drink_id>', methods=['GET'])
@login_required
def select_drink(drink_id):
"""Adds a selected drink to the user's session.
arguments:
drink_id - id of the drink
"""
if not session.get('selected_drinks'):
session['selected_drinks'] = []
new_list = session['selected_drinks']
new_list.append(drink_id)
session['selected_drinks'] = new_list
return redirect(url_for('new_order'))
@app.route('/place_order', methods=['GET'])
@login_required
def place_order():
"""Places an order for the user with the selected goods and the delivery address."""
pizzas = get_pizzas_from_session()
kebabs = get_kebabs_from_session()
drinks = get_drinks_from_session()
total_price = get_total_price_of_items(pizzas, kebabs, drinks)
user_id = session['user_id']
user = get_user_by_id(user_id)
ordered_at = datetime.datetime.utcnow()
delivery_address = get_delivery_address()
# Delivery is always in an hour for now.
delivery_at = datetime.datetime.utcnow() + datetime.timedelta(hours=1)
lunch_offer_active = False
o = Order(None, user, ordered_at, delivery_address, delivery_at, False,
False, lunch_offer_active)
o.save()
for p in pizzas:
o.add_pizza(p)
for k in kebabs:
o.add_kebab(k)
for d in drinks:
o.add_drink(d)
clear_session()
flash('Order placed', 'alert-success')
return redirect(url_for('list_orders'))
@app.route('/clear_order', methods=['GET'])
@login_required
def clear_order():
"""Clears all order related information from the session."""
clear_session()
return redirect(url_for('new_order'))
@app.route('/orders', methods=['GET'])
@admin_required
def list_orders():
"""Shows a list of orders.
Requires administrator privileges.
"""
return render_template('order/orders.html', orders=Order.get_all())
@app.route('/orders/active', methods=['GET'])
@admin_required
def list_active_orders():
"""Shows a list of active (not rejected or delivered) orders.
Requires administrator privileges.
"""
return render_template('order/orders.html', orders=Order.get_all_active())
@app.route('/order/<int:order_id>', methods=['GET'])
@admin_required
def order_details(order_id):
"""Shows details of an order.
arguments:
order_id - id of the order
Requires administrator privileges.
"""
order = Order.get_by_id(order_id)
return render_template('order/order_details.html', order=order)
@app.route('/order/add_discount/<int:order_id>', methods=['GET'])
@admin_required
def add_discount(order_id):
"""Activates a discount for an order.
arguments:
order_id - id of the order
Requires administrator privileges.
"""
flash('Not implemented yet', 'alert-info')
return redirect(url_for('list_orders'))
@app.route('/order/reject/<int:order_id>', methods=['GET'])
@admin_required
def reject_order(order_id):
"""Rejects an order.
arguments:
order_id - id of the order
Requires administrator privileges.
"""
o = Order.get_by_id(order_id)
o.mark_as_rejected()
return redirect(url_for('list_orders'))
@app.route('/order/deliver/<int:order_id>', methods=['GET', 'POST'])
@admin_required
def mark_order_as_delivered(order_id):
"""Marks an order as delivered.
arguments:
order_id - id of the order
Receives booleans of whether the customer was found and if there were
any problems with the delivery.
Requires administrator privileges.
"""
if request.method == 'POST':
o = Order.get_by_id(order_id)
customer_found = request.form['customer_found']
had_problems = request.form['had_problems']
o.mark_as_delivered(customer_found, had_problems)
return redirect(url_for('list_orders'))
return render_template('order/deliver_order.html')
@app.route('/order/edit/<int:order_id>', methods=['GET', 'POST'])
@admin_required
def edit_order(order_id):
"""Edits an order.
arguments:
order_id - id of the order
Requires administrator privileges.
"""
return render_template('order/edit_order.html', order=order)
@app.route('/order/delete/<int:order_id>', methods=['GET'])
@admin_required
def delete_order(order_id):
"""Deletes an order.
arguments:
order_id - id of the order
Requires administrator privileges.
"""
Order.delete_by_id(order_id)
return redirect(url_for('list_orders'))
@app.route('/order/remove/<string:item_type>/<int:item_id>', methods=['GET'])
@login_required
def remove_item_from_order(item_type, item_id):
"""Removes an item from the order.
arguments:
item_type - type of the item as a string (pizza, kebab, drink)
item_id - id of the item
"""
if item_type not in ['pizza', 'kebab', 'drink']:
flash('Unknown item type', 'alert-warning')
return redirect(url_for('new_order'))
session_key = 'selected_{}s'.format(item_type)
new_list = session[session_key]
new_list.remove(item_id)
session[session_key] = new_list
return redirect(url_for('new_order'))
@app.route('/order/set_delivery_address', methods=['POST'])
@login_required
def set_delivery_address():
"""Saves the delivery address to the session.
Receives the delivery address in POST.
"""
delivery_address = request.form['delivery_address']
session['delivery_address'] = delivery_address
return redirect(url_for('new_order'))
|
|
#!/usr/bin/env python3
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import contextlib
import copy
import glob
import io
import itertools
import os
import unittest
import re
import shutil
import subprocess
import sys
import tempfile
import zipfile
import archive
import describe
import diff
import file_format
import models
import test_util
_SCRIPT_DIR = os.path.dirname(__file__)
_TEST_DATA_DIR = os.path.join(_SCRIPT_DIR, 'testdata')
_TEST_SDK_DIR = os.path.join(_TEST_DATA_DIR, 'mock_sdk')
_TEST_SOURCE_DIR = os.path.join(_TEST_DATA_DIR, 'mock_source_directory')
_TEST_OUTPUT_DIR = os.path.join(_TEST_SOURCE_DIR, 'out', 'Release')
_TEST_TOOL_PREFIX = os.path.join(
os.path.abspath(_TEST_DATA_DIR), 'mock_toolchain', '')
_TEST_APK_ROOT_DIR = os.path.join(_TEST_DATA_DIR, 'mock_apk')
_TEST_MAP_PATH = os.path.join(_TEST_DATA_DIR, 'test.map')
_TEST_PAK_INFO_PATH = os.path.join(
_TEST_OUTPUT_DIR, 'size-info/test.apk.pak.info')
_TEST_ELF_FILE_BEGIN = os.path.join(_TEST_OUTPUT_DIR, 'elf.begin')
_TEST_APK_LOCALE_PAK_PATH = os.path.join(_TEST_APK_ROOT_DIR, 'assets/en-US.pak')
_TEST_APK_PAK_PATH = os.path.join(_TEST_APK_ROOT_DIR, 'assets/resources.pak')
# The following files are dynamically created.
_TEST_ELF_PATH = os.path.join(_TEST_OUTPUT_DIR, 'elf')
_TEST_APK_PATH = os.path.join(_TEST_OUTPUT_DIR, 'test.apk')
_TEST_MINIMAL_APKS_PATH = os.path.join(_TEST_OUTPUT_DIR, 'Bundle.minimal.apks')
# Generated file paths relative to apk
_TEST_APK_SO_PATH = 'test.so'
_TEST_APK_SMALL_SO_PATH = 'smalltest.so'
_TEST_APK_DEX_PATH = 'test.dex'
_TEST_APK_OTHER_FILE_PATH = 'assets/icudtl.dat'
_TEST_APK_RES_FILE_PATH = 'res/drawable-v13/test.xml'
def _CompareWithGolden(name=None):
def real_decorator(func):
basename = name
if not basename:
basename = func.__name__.replace('test_', '')
golden_path = os.path.join(_TEST_DATA_DIR, basename + '.golden')
def inner(self):
actual_lines = func(self)
actual_lines = (re.sub(r'(elf_mtime=).*', r'\1{redacted}', l)
for l in actual_lines)
actual_lines = (re.sub(r'(Loaded from ).*', r'\1{redacted}', l)
for l in actual_lines)
test_util.Golden.CheckOrUpdate(golden_path, actual_lines)
return inner
return real_decorator
@contextlib.contextmanager
def _AddMocksToPath():
prev_path = os.environ['PATH']
os.environ['PATH'] = _TEST_TOOL_PREFIX[:-1] + os.path.pathsep + prev_path
os.environ['APK_ANALYZER'] = os.path.join(_TEST_SDK_DIR, 'tools', 'bin',
'apkanalyzer')
try:
yield
finally:
os.environ['PATH'] = prev_path
del os.environ['APK_ANALYZER']
def _RunApp(name, args, debug_measures=False):
argv = [os.path.join(_SCRIPT_DIR, 'main.py'), name]
argv.extend(args)
with _AddMocksToPath():
env = None
if debug_measures:
env = os.environ.copy()
env['SUPERSIZE_DISABLE_ASYNC'] = '1'
env['SUPERSIZE_MEASURE_GZIP'] = '1'
return subprocess.check_output(argv, env=env).decode('utf-8').splitlines()
class IntegrationTest(unittest.TestCase):
maxDiff = None # Don't trucate diffs in errors.
cached_size_info = {}
@staticmethod
def _CreateBlankData(power_of_two):
data = '\0'
for _ in range(power_of_two):
data = data + data
return data
@staticmethod
def _SafeRemoveFiles(file_names):
for file_name in file_names:
if os.path.exists(file_name):
os.remove(file_name)
@classmethod
def setUpClass(cls):
shutil.copy(_TEST_ELF_FILE_BEGIN, _TEST_ELF_PATH)
# Exactly 128MB of data (2^27), extra bytes will be accounted in overhead.
with open(_TEST_ELF_PATH, 'a') as elf_file:
elf_file.write(IntegrationTest._CreateBlankData(27))
with zipfile.ZipFile(_TEST_APK_PATH, 'w') as apk_file:
apk_file.write(_TEST_ELF_PATH, _TEST_APK_SO_PATH)
# Exactly 4MB of data (2^22).
apk_file.writestr(
_TEST_APK_SMALL_SO_PATH, IntegrationTest._CreateBlankData(22))
# Exactly 1MB of data (2^20).
apk_file.writestr(
_TEST_APK_OTHER_FILE_PATH, IntegrationTest._CreateBlankData(20))
# Exactly 1KB of data (2^10).
apk_file.writestr(
_TEST_APK_RES_FILE_PATH, IntegrationTest._CreateBlankData(10))
locale_pak_rel_path = os.path.relpath(
_TEST_APK_LOCALE_PAK_PATH, _TEST_APK_ROOT_DIR)
apk_file.write(_TEST_APK_LOCALE_PAK_PATH, locale_pak_rel_path)
pak_rel_path = os.path.relpath(_TEST_APK_PAK_PATH, _TEST_APK_ROOT_DIR)
apk_file.write(_TEST_APK_PAK_PATH, pak_rel_path)
# Exactly 8MB of data (2^23).
apk_file.writestr(
_TEST_APK_DEX_PATH, IntegrationTest._CreateBlankData(23))
with zipfile.ZipFile(_TEST_MINIMAL_APKS_PATH, 'w') as apk_file:
apk_file.write(_TEST_APK_PATH, 'splits/base-master.apk')
apk_file.writestr('splits/base-en.apk', 'x' * 10)
apk_file.writestr('splits/vr-master.apk', 'x' * 20)
apk_file.writestr('splits/vr-en.apk', 'x' * 40)
apk_file.writestr('toc.pb', 'x' * 80)
@classmethod
def tearDownClass(cls):
IntegrationTest._SafeRemoveFiles([
_TEST_ELF_PATH,
_TEST_APK_PATH,
_TEST_MINIMAL_APKS_PATH,
])
def _CreateTestArgs(self):
return argparse.Namespace(
**{
'is_bundle': False,
'java_only': False,
'native_only': False,
'no_java': False,
'no_native': False,
'relocations': False,
'source_directory': _TEST_SOURCE_DIR,
})
def _CloneSizeInfo(self, use_output_directory=True, use_elf=True,
use_apk=False, use_minimal_apks=False, use_pak=False):
assert not use_elf or use_output_directory
assert not (use_apk and use_pak)
cache_key = (
use_output_directory, use_elf, use_apk, use_minimal_apks, use_pak)
if cache_key not in IntegrationTest.cached_size_info:
elf_path = _TEST_ELF_PATH if use_elf else None
output_directory = _TEST_OUTPUT_DIR if use_output_directory else None
knobs = archive.SectionSizeKnobs()
opts = archive.ContainerArchiveOptions(self._CreateTestArgs())
# Override for testing. Lower the bar for compacting symbols, to allow
# smaller test cases to be created.
knobs.max_same_name_alias_count = 3
apk_path = None
minimal_apks_path = None
apk_so_path = None
size_info_prefix = None
extracted_minimal_apk_path = None
if use_apk:
apk_path = _TEST_APK_PATH
elif use_minimal_apks:
minimal_apks_path = _TEST_MINIMAL_APKS_PATH
extracted_minimal_apk_path = _TEST_APK_PATH
if use_apk or use_minimal_apks:
apk_so_path = _TEST_APK_SO_PATH
if output_directory:
if use_apk:
orig_path = _TEST_APK_PATH
else:
orig_path = _TEST_MINIMAL_APKS_PATH.replace('.minimal.apks', '.aab')
size_info_prefix = os.path.join(
output_directory, 'size-info', os.path.basename(orig_path))
pak_files = None
pak_info_file = None
if use_pak:
pak_files = [_TEST_APK_LOCALE_PAK_PATH, _TEST_APK_PAK_PATH]
pak_info_file = _TEST_PAK_INFO_PATH
linker_name = 'gold'
with _AddMocksToPath():
metadata = archive.CreateMetadata(_TEST_MAP_PATH, elf_path, apk_path,
minimal_apks_path, _TEST_TOOL_PREFIX,
output_directory, linker_name)
section_sizes, raw_symbols = archive.CreateSectionSizesAndSymbols(
knobs=knobs,
opts=opts,
map_path=_TEST_MAP_PATH,
tool_prefix=_TEST_TOOL_PREFIX,
elf_path=elf_path,
output_directory=output_directory,
apk_path=apk_path or extracted_minimal_apk_path,
apk_so_path=apk_so_path,
metadata=metadata,
pak_files=pak_files,
pak_info_file=pak_info_file,
linker_name=linker_name,
size_info_prefix=size_info_prefix)
IntegrationTest.cached_size_info[cache_key] = archive.CreateSizeInfo(
[section_sizes], [raw_symbols], [metadata])
return copy.deepcopy(IntegrationTest.cached_size_info[cache_key])
def _DoArchive(self,
archive_path,
use_output_directory=True,
use_elf=True,
use_apk=False,
use_minimal_apks=False,
use_pak=False,
debug_measures=False,
include_padding=False):
args = [
archive_path,
'--map-file', _TEST_MAP_PATH,
'--source-directory', _TEST_SOURCE_DIR,
]
if use_output_directory:
# Let autodetection find output_directory when --elf-file is used.
if not use_elf:
args += ['--output-directory', _TEST_OUTPUT_DIR]
else:
args += ['--no-source-paths']
if use_apk:
args += ['-f', _TEST_APK_PATH]
elif use_minimal_apks:
args += ['-f', _TEST_MINIMAL_APKS_PATH]
if use_elf:
if use_apk or use_minimal_apks:
args += ['--elf-file', _TEST_ELF_PATH]
else:
args += ['-f', _TEST_ELF_PATH]
if use_pak:
args += ['--pak-file', _TEST_APK_LOCALE_PAK_PATH,
'--pak-file', _TEST_APK_PAK_PATH,
'--pak-info-file', _TEST_PAK_INFO_PATH]
if include_padding:
args += ['--include-padding']
_RunApp('archive', args, debug_measures=debug_measures)
def _DoArchiveTest(self,
use_output_directory=True,
use_elf=True,
use_apk=False,
use_minimal_apks=False,
use_pak=False,
debug_measures=False,
include_padding=False):
with tempfile.NamedTemporaryFile(suffix='.size') as temp_file:
self._DoArchive(
temp_file.name,
use_output_directory=use_output_directory,
use_elf=use_elf,
use_apk=use_apk,
use_minimal_apks=use_minimal_apks,
use_pak=use_pak,
debug_measures=debug_measures,
include_padding=include_padding)
size_info = archive.LoadAndPostProcessSizeInfo(temp_file.name)
# Check that saving & loading is the same as directly parsing.
expected_size_info = self._CloneSizeInfo(
use_output_directory=use_output_directory, use_elf=use_elf,
use_apk=use_apk, use_minimal_apks=use_minimal_apks, use_pak=use_pak)
self.assertEqual(expected_size_info.metadata, size_info.metadata)
# Don't cluster.
expected_size_info.symbols = expected_size_info.raw_symbols
size_info.symbols = size_info.raw_symbols
expected = list(describe.GenerateLines(expected_size_info, verbose=True))
actual = list(describe.GenerateLines(size_info, verbose=True))
self.assertEqual(expected, actual)
sym_strs = (repr(sym) for sym in size_info.symbols)
stats = describe.DescribeSizeInfoCoverage(size_info)
if size_info.metadata:
metadata = describe.DescribeMetadata(size_info.metadata)
else:
metadata = []
return itertools.chain(metadata, stats, sym_strs)
@_CompareWithGolden()
def test_Archive(self):
return self._DoArchiveTest(use_output_directory=False, use_elf=False)
@_CompareWithGolden()
def test_Archive_OutputDirectory(self):
return self._DoArchiveTest(use_elf=False)
@_CompareWithGolden()
def test_Archive_Elf(self):
return self._DoArchiveTest()
@_CompareWithGolden()
def test_Archive_Apk(self):
return self._DoArchiveTest(use_apk=True)
@_CompareWithGolden()
def test_Archive_MinimalApks(self):
return self._DoArchiveTest(use_minimal_apks=True)
@_CompareWithGolden()
def test_Archive_Pak_Files(self):
return self._DoArchiveTest(use_pak=True)
@_CompareWithGolden(name='Archive_Elf')
def test_Archive_Elf_DebugMeasures(self):
return self._DoArchiveTest(debug_measures=True)
@_CompareWithGolden(name='Archive')
def test_ArchiveSparse(self):
return self._DoArchiveTest(
use_output_directory=False, use_elf=False, include_padding=True)
@_CompareWithGolden()
def test_Console(self):
with tempfile.NamedTemporaryFile(suffix='.size') as size_file, \
tempfile.NamedTemporaryFile(suffix='.txt') as output_file:
file_format.SaveSizeInfo(self._CloneSizeInfo(), size_file.name)
query = [
'ShowExamples()',
'ExpandRegex("_foo_")',
'canned_queries.CategorizeGenerated()',
'canned_queries.CategorizeByChromeComponent()',
'canned_queries.LargeFiles()',
'canned_queries.TemplatesByName()',
'canned_queries.StaticInitializers()',
'canned_queries.PakByPath()',
'Print(ReadStringLiterals(elf_path={}))'.format(repr(_TEST_ELF_PATH)),
'Print(size_info, to_file=%r)' % output_file.name,
]
ret = _RunApp('console', [size_file.name, '--query', '; '.join(query)])
with open(output_file.name) as f:
ret.extend(l.rstrip() for l in f)
return ret
@_CompareWithGolden()
def test_Csv(self):
with tempfile.NamedTemporaryFile(suffix='.size') as size_file, \
tempfile.NamedTemporaryFile(suffix='.txt') as output_file:
file_format.SaveSizeInfo(self._CloneSizeInfo(), size_file.name)
query = [
'Csv(size_info, to_file=%r)' % output_file.name,
]
ret = _RunApp('console', [size_file.name, '--query', '; '.join(query)])
with open(output_file.name) as f:
ret.extend(l.rstrip() for l in f)
return ret
@_CompareWithGolden()
def test_Diff_NullDiff(self):
with tempfile.NamedTemporaryFile(suffix='.size') as temp_file:
file_format.SaveSizeInfo(self._CloneSizeInfo(), temp_file.name)
return _RunApp('diff', [temp_file.name, temp_file.name])
# Runs archive 3 times, and asserts the contents are the same each time.
def test_Idempotent(self):
prev_contents = None
for _ in range(3):
with tempfile.NamedTemporaryFile(suffix='.size') as temp_file:
self._DoArchive(temp_file.name)
contents = temp_file.read()
self.assertTrue(prev_contents is None or contents == prev_contents)
prev_contents = contents
@_CompareWithGolden()
def test_Diff_Basic(self):
size_info1 = self._CloneSizeInfo(use_elf=False, use_pak=True)
size_info2 = self._CloneSizeInfo(use_elf=False, use_pak=True)
size_info1.metadata = {"foo": 1, "bar": [1,2,3], "baz": "yes"}
size_info2.metadata = {"foo": 1, "bar": [1,3], "baz": "yes"}
size_info1.raw_symbols -= size_info1.raw_symbols[:2]
size_info2.raw_symbols -= size_info2.raw_symbols[-3:]
changed_sym = size_info1.raw_symbols.WhereNameMatches('Patcher::Name_')[0]
changed_sym.size -= 10
padding_sym = size_info2.raw_symbols.WhereNameMatches('symbol gap 0')[0]
padding_sym.padding += 20
padding_sym.size += 20
pak_sym = size_info2.raw_symbols.WhereInSection(
models.SECTION_PAK_NONTRANSLATED)[0]
pak_sym.full_name = 'foo: ' + pak_sym.full_name.split()[-1]
# Serialize & de-serialize so that name normalization runs again for the pak
# symbol.
bytesio = io.BytesIO()
file_format.SaveSizeInfo(size_info2, 'path', file_obj=bytesio)
bytesio.seek(0)
size_info2 = archive.LoadAndPostProcessSizeInfo('path', file_obj=bytesio)
d = diff.Diff(size_info1, size_info2)
d.raw_symbols = d.raw_symbols.Sorted()
self.assertEqual(d.raw_symbols.CountsByDiffStatus()[1:], (2, 2, 3))
changed_sym = d.raw_symbols.WhereNameMatches('Patcher::Name_')[0]
padding_sym = d.raw_symbols.WhereNameMatches('symbol gap 0')[0]
bss_sym = d.raw_symbols.WhereInSection(models.SECTION_BSS)[0]
# Padding-only deltas should sort after all non-padding changes.
padding_idx = d.raw_symbols.index(padding_sym)
changed_idx = d.raw_symbols.index(changed_sym)
bss_idx = d.raw_symbols.index(bss_sym)
self.assertLess(changed_idx, padding_idx)
# And before bss.
self.assertLess(padding_idx, bss_idx)
return describe.GenerateLines(d, verbose=True)
@_CompareWithGolden()
def test_FullDescription(self):
size_info = self._CloneSizeInfo()
# Show both clustered and non-clustered so that they can be compared.
size_info.symbols = size_info.raw_symbols
return itertools.chain(
describe.GenerateLines(size_info, verbose=True),
describe.GenerateLines(size_info.symbols._Clustered(), recursive=True,
verbose=True),
)
@_CompareWithGolden()
def test_SymbolGroupMethods(self):
all_syms = self._CloneSizeInfo().symbols
global_syms = all_syms.WhereNameMatches('GLOBAL')
# Tests Filter(), Inverted(), and __sub__().
non_global_syms = global_syms.Inverted()
self.assertEqual(non_global_syms, (all_syms - global_syms))
# Tests Sorted() and __add__().
self.assertEqual(all_syms.Sorted(),
(global_syms + non_global_syms).Sorted())
# Tests GroupedByName() and __len__().
return itertools.chain(
['GroupedByName()'],
describe.GenerateLines(all_syms.GroupedByName()),
['GroupedByName(depth=1)'],
describe.GenerateLines(all_syms.GroupedByName(depth=1)),
['GroupedByName(depth=-1)'],
describe.GenerateLines(all_syms.GroupedByName(depth=-1)),
['GroupedByName(depth=1, min_count=2)'],
describe.GenerateLines(all_syms.GroupedByName(depth=1, min_count=2)),
)
def main():
argv = sys.argv
if len(argv) > 1 and argv[1] == '--update':
argv.pop(0)
test_util.Golden.EnableUpdate()
for f in glob.glob(os.path.join(_TEST_DATA_DIR, '*.golden')):
os.unlink(f)
unittest.main(argv=argv, verbosity=2)
if __name__ == '__main__':
main()
|
|
from __future__ import with_statement
try:
import MySQLdb
from MySQLdb.cursors import DictCursor
except ImportError:
import pymysql as MySQLdb
from pymysql.cursors import DictCursor
from flask import (
Flask, request, redirect, session, url_for, abort,
render_template, _app_ctx_stack, Response,
after_this_request,
)
import memcache
from flask_memcache_session import Session
from werkzeug.contrib.fixers import ProxyFix
# git clone https://github.com/dart-lang/py-gfm.git
# cd py-gfm
# python setup.py install
from markdown import markdown
import json, os, hashlib, tempfile, subprocess
config = {}
app = Flask(__name__, static_url_path='')
app.cache = memcache.Client(['unix:/tmp/memcached.sock'], debug=0)
app.session_interface = Session()
app.session_cookie_name = "isucon_session"
app.wsgi_app = ProxyFix(app.wsgi_app)
# log
import logging
logging.basicConfig(filename='log.txt')
#logging.basicConfig(filename='log.txt', level=logging.DEBUG)
def load_config():
global config
print("Loading configuration")
env = os.environ.get('ISUCON_ENV') or 'local'
with open('../config/' + env + '.json') as fp:
config = json.load(fp)
def connect_db():
global config
host = config['database']['host']
port = config['database']['port']
username = config['database']['username']
password = config['database']['password']
dbname = config['database']['dbname']
db = MySQLdb.connect(host=host, port=port, db=dbname, user=username, passwd=password, cursorclass=DictCursor, charset="utf8")
return db
def get_user():
user_id = session.get('user_id')
user_username = session.get('user_username')
user = {'id':user_id, 'username':user_username}
if not user_id:
user = None
#if user_id:
# cur = get_db().cursor()
# cur.execute("SELECT * FROM users WHERE id=%s", user_id)
# user = cur.fetchone()
# cur.close()
if user:
@after_this_request
def add_header(response):
response.headers['Cache-Control'] = 'private'
return response
return user
def anti_csrf():
if request.form['sid'] != session['token']:
abort(400)
def require_user(user):
if not user:
redirect(url_for("top_page"))
abort()
def gen_markdown(memo_id, md):
mid = "memo_content_" + str(memo_id)
html = app.cache.get(mid)
if html == None:
html = markdown(md)
app.cache.set(mid, html)
return html
def get_db():
top = _app_ctx_stack.top
if not hasattr(top, 'db'):
top.db = connect_db()
return top.db
@app.teardown_appcontext
def close_db_connection(exception):
top = _app_ctx_stack.top
if hasattr(top, 'db'):
top.db.close()
@app.route("/")
def top_page():
user = get_user()
cur = get_db().cursor()
cur.execute('SELECT count(1) AS c FROM public_memos')
total = cur.fetchone()['c']
cur.execute("SELECT memo_inf.id, memo_inf.content, memo_inf.created_at, usr.username FROM (SELECT id, user, title as content,created_at , is_private FROM memos where is_private = 0 ORDER BY created_at DESC, id DESC LIMIT 100) as memo_inf inner join users usr on memo_inf.user = usr.id")
memos = cur.fetchall()
cur.close()
return render_template(
'index.html',
total=total,
memos=memos,
page=0,
user=user
)
@app.route("/recent/<int:page>")
def recent(page):
user = get_user()
cur = get_db().cursor()
cur.execute('SELECT count(1) AS c FROM public_memos')
total = cur.fetchone()['c']
cur.execute("SELECT memo_inf.id, memo_inf.user, memo_inf.title as content, memo_inf.created_at, usr.username FROM memos as memo_inf inner join users usr on memo_inf.user = usr.id inner join (SELECT memo FROM public_memos WHERE id BETWEEN " + str(page * 100 + 1) + " and " + str(page * 100 + 100) + ") as memo_order on memo_inf.id = memo_order.memo")
#cur.execute("SELECT memo_inf.id, memo_inf.user, memo_inf.content, memo_inf.created_at, usr.username FROM (SELECT id,user, title as content,created_at , is_private FROM memos where is_private = 0 and id >= " + str(page * 100) + " ORDER BY created_at DESC, id DESC LIMIT 100 ) as memo_inf inner join users usr on memo_inf.user = usr.id")
memos = cur.fetchall()
if len(memos) == 0:
abort(404)
cur.close()
return render_template(
'index.html',
total=total,
memos=memos,
page=page,
user=user
)
@app.route("/mypage")
def mypage():
user = get_user()
require_user(user)
cur = get_db().cursor()
cur.execute("SELECT id, title as content, is_private, created_at, updated_at FROM memos WHERE user=%s ORDER BY created_at DESC", user["id"])
memos = cur.fetchall()
cur.close()
return render_template(
'mypage.html',
user=user,
memos=memos,
)
@app.route("/signin", methods=['GET','HEAD'])
def signin():
user = get_user()
return render_template('signin.html', user=user)
@app.route("/signin", methods=['POST'])
def signin_post():
db = get_db()
cur = db.cursor()
username = request.form['username']
password = request.form['password']
cur.execute('SELECT id, username, password, salt FROM users WHERE username=%s', username)
user = cur.fetchone()
if user and user["password"] == hashlib.sha256(bytes(user["salt"] + password, 'UTF-8')).hexdigest():
session["user_id"] = user["id"]
session["user_username"] = user["username"]
session["token"] = hashlib.sha256(os.urandom(40)).hexdigest()
set_mem(user["id"]) # for memcached
#cur.execute("UPDATE users SET last_access=now() WHERE id=%s", user["id"])
cur.close()
db.commit()
return redirect(url_for("mypage"))
else:
return render_template('signin.html', user=None)
@app.route("/signout", methods=['POST'])
def signout():
anti_csrf()
session.clear()
@after_this_request
def remove_cookie(response):
response.set_cookie(app.session_cookie_name, "", expires=0)
return response
return redirect(url_for("top_page"))
@app.route("/memo/<int:memo_id>")
def memo(memo_id):
user = get_user()
cur = get_db().cursor()
#cur.execute('SELECT id, user, content, is_private, created_at, updated_at FROM memos WHERE id=%s', memo_id)
cur.execute("SELECT memo.id, memo.user, memo.content, memo.is_private, memo. created_at, memo.updated_at, usr.username FROM (SELECT id, user, content, is_private, created_at, updated_at FROM memos WHERE id=" + str(memo_id) + ") memo inner join users usr on memo.user = usr.id")
memo = cur.fetchone()
if not memo:
abort(404)
if memo["is_private"] == 1:
if not user or user["id"] != memo["user"]:
abort(404)
#cur.execute('SELECT username FROM users WHERE id=%s', memo["user"])
#memo["username"] = cur.fetchone()["username"]
memo["content_html"] = gen_markdown(memo_id, memo["content"])
#memo["content_html"] = markdown(memo["content"])
if user and user["id"] == memo["user"]:
cond = ""
mem_index = "list_memo_pri_" + str(memo["user"]) # e.g. list_memo_pri_80
logging.debug("get private")
else:
cond = "AND is_private=0"
mem_index = "list_memo_" + str(memo["user"]) # e.g. list_memo_80
logging.debug("get public")
memos = []
older = None
newer = None
# save memcached
if app.cache.get(mem_index) == None:
# 1st
list_memo = [] # memcached
logging.debug("mem_index is not exist")
cur.execute("SELECT id FROM memos WHERE user=%s " + cond + " ORDER BY created_at", memo["user"])
memos = cur.fetchall()
for i in range(len(memos)):
list_memo.append(memos[i]["id"]) #memcached
#cur.close()
app.cache.set(mem_index, list_memo)
str_res = app.cache.get(mem_index)
else:
# after 2nd
logging.debug("mem_index is exist")
str_res = app.cache.get(mem_index)
if str_res != "":
res = list(map(int, str_res.split(','))) # String to list
now = res.index(memo["id"])
older = {'id': res[ now - 1 ]}
if res[ now ] != res[-1]:
newer = {'id': res[ now + 1 ]}
#cur.execute("SELECT id FROM memos WHERE user=%s " + cond + " ORDER BY created_at", memo["user"])
#memos = cur.fetchall()
#for i in range(len(memos)):
# if memos[i]["id"] == memo["id"]:
# if i > 0:
# older = memos[i - 1]
# if i < len(memos) - 1:
# newer = memos[i + 1]
cur.close()
return render_template(
"memo.html",
user=user,
memo=memo,
older=older,
newer=newer,
)
@app.route("/memo", methods=['POST'])
def memo_post():
user = get_user()
require_user(user)
anti_csrf()
db = get_db()
cur = db.cursor()
cur.execute(
"INSERT INTO memos (user, content, is_private, created_at, title) VALUES (%s, %s, %s, now(), %s)",
( user["id"],
request.form["content"],
int(request.form.get("is_private") or 0),
request.form["content"].split('\n')[0]
)
)
memo_id = db.insert_id()
logging.debug(memo_id)
if request.form.get("is_private") != 1:
cur.execute(
"INSERT INTO public_memos (memo) VALUES (%s)",
(memo_id)
)
cur.close()
db.commit()
#pri
mem_key_pri = "list_memo_pri_" + str(user["id"]) # e.g. list_memo_pri_80
if len(app.cache.get(mem_key_pri)) > 0:
app.cache.append(mem_key_pri, ',' + str(memo_id))
else:
app.cache.add(mem_key_pri, str(memo_id))
#public
mem_key_pub = "list_memo_" + str(user["id"]) # e.g. list_memo_pri_80
if len(app.cache.get(mem_key_pub)) > 0:
app.cache.append(mem_key_pub, ',' + str(memo_id))
else:
app.cache.add(mem_key_pub, str(memo_id))
return redirect(url_for('memo', memo_id=memo_id))
def set_mem(user_id):
cur = get_db().cursor()
#private
cur.execute("SELECT id FROM memos WHERE user=%s ORDER BY created_at", user_id)
memo_pri = cur.fetchall()
list_memo_pri = []
for i in range(len(memo_pri)):
list_memo_pri.append(memo_pri[i]["id"]) #memcached
# list to String
str_memo_pri=','.join(map(str, list_memo_pri))
app.cache.set("list_memo_pri_" + str(user_id), str_memo_pri)
# sample list to String
#str_memo_pri=','.join(map(str, list_memo_pri))
# String to list
#test_memo_pri= list(map(int, str_memo_pri.split(',')))
# sample end
#public
cur.execute("SELECT id FROM memos WHERE user=%s AND is_private=0 ORDER BY created_at", user_id)
memo_pub = cur.fetchall()
list_memo_pub = []
for i in range(len(memo_pub)):
list_memo_pub.append(memo_pub[i]["id"]) #memcached
str_memo_pub=','.join(map(str, list_memo_pub))
app.cache.set("list_memo_" + str(user_id), str_memo_pub)
cur.close()
if __name__ == "__main__":
load_config()
port = int(os.environ.get("PORT", '5000'))
app.run(debug=1, host='0.0.0.0', port=port)
else:
load_config()
|
|
__author__ = 'saeedamen' # Saeed Amen
#
# Copyright 2016-2020 Cuemacro - https://www.cuemacro.com / @cuemacro
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and limitations under the License.
#
"""
Here we show how to use VolStats to calculate various volatility metrics (like realized volatility, volatility risk
premium and the implied volatility addons)
Note, you will need to have a Bloomberg terminal (with blpapi Python library) to download the FX market data in order
to run most of these examples (FX spot, FX forwards, FX implied_vol volatility quotes and deposits)
"""
import pandas as pd
# For plotting
from chartpy import Chart, Style
# For loading market data
from findatapy.market import Market, MarketDataGenerator, MarketDataRequest
from findatapy.util.loggermanager import LoggerManager
# For doing the various volatility _calculations
from finmarketpy.curve.volatility.volstats import VolStats
logger = LoggerManager().getLogger(__name__)
chart = Chart(engine='plotly')
market = Market(market_data_generator=MarketDataGenerator())
# Choose run_example = 0 for everything
# run_example = 1 - calculating difference between realized and implied volatility over Brexit for GBPUSD
# run_example = 2 - calculating realized volatility using different minute frequencies over Brexit for GBPUSD
# run_example = 3 - calculating implied volatility addon associated with days
# run_example = 4 - compare recent implied vs realized volatility for EURUSD
run_example = 0
###### Looking at realized and implied volatility over GBPUSD in the overnight (ON) tenor
if run_example == 1 or run_example == 0:
# Download the whole all market data for GBPUSD for pricing options (vol surface)
md_request = MarketDataRequest(start_date='01 Jun 2016', finish_date='02 Jul 2016',
data_source='bloomberg', cut='10AM', category='fx-vol-market',
tickers=['GBPUSD'],
cache_algo='cache_algo_return')
market_df = market.fetch_market(md_request)
# Download FX tick data for GBPUSD over Brexit vote and then convert into 1 minute data (open/high/low/close)
# which are necessary for calculating realised volatility
md_request = MarketDataRequest(start_date='01 Jun 2016', finish_date='02 Jul 2016',
data_source='dukascopy', freq='tick', category='fx', fields=['bid', 'ask'],
tickers=['GBPUSD'],
cache_algo='cache_algo_return')
from findatapy.timeseries import Calculations
calc = Calculations()
tick_data = market.fetch_market(md_request)
intraday_spot_df = calc.resample_tick_data_ohlc(tick_data, 'GBPUSD', freq='1min')
vol_stats = VolStats(market_df=market_df, intraday_spot_df=intraday_spot_df)
realized_vol = vol_stats.calculate_realized_vol('GBPUSD', tenor_label="ON", freq='intraday', freq_min_mult=1,
hour_of_day=10, minute_of_day=0, field='close', timezone_hour_minute='America/New_York') * 100
implied_vol = pd.DataFrame(market_df['GBPUSDVON.close'])
vrp = vol_stats.calculate_vol_risk_premium('GBPUSD', tenor_label='ON', implied_vol=implied_vol, realized_vol=realized_vol)
style = Style()
style.title = 'GBPUSD ON volatility over Brexit'
style.scale_factor = 3
style.source = 'Bloomberg'
# Plot all the volatility metrics
chart.plot(vrp, style=style)
# Plot the implied volatility bumped forward a day against the realized volatility calculated over that day
chart.plot(vrp[['GBPUSDUON.close', 'GBPUSDHON.close']], style=style)
###### Calculating realized volatility over Brexit vote in GBPUSD in ON/overnight tenor
# Showing the difference between time frequencies
if run_example == 2 or run_example == 0:
# Download FX tick data for GBPUSD over Brexit vote and then convert into 1 minute data (open/high/low/close)
# which are necessary for calculating realised volatility
md_request = MarketDataRequest(start_date='01 Jun 2016', finish_date='02 Jul 2016',
data_source='dukascopy', freq='tick', category='fx', fields=['bid', 'ask'],
tickers=['GBPUSD'],
cache_algo='cache_algo_return')
from findatapy.timeseries import Calculations
calc = Calculations()
intraday_spot_df = calc.resample_tick_data_ohlc(market.fetch_market(md_request), 'GBPUSD', freq='1min')['GBPUSD.close']
vol_stats = VolStats()
# Resample spot data at different minute intervals, and then calculate realized vols
minute_frequencies = [1, 2, 5, 10, 15, 30, 60]
realized_vol = []
for min in minute_frequencies:
min_df = pd.DataFrame(intraday_spot_df.resample(str(min) + 'min').last().dropna())
rv = vol_stats.calculate_realized_vol('GBPUSD', spot_df=min_df,
tenor_label="ON", freq='intraday', freq_min_mult=min,
hour_of_day=10, minute_of_day=0, field='close', timezone_hour_minute='America/New_York') * 100
rv.columns=[str(min) + 'min']
realized_vol.append(rv)
realized_vol = calc.join(realized_vol, how='outer')
style = Style()
style.title = 'GBPUSD ON realized volatility over Brexit with different minute sampling frequencies'
style.scale_factor = 3
style.source = 'Bloomberg'
style.color = 'Blues'
# Plot the volatilities with different sampling frequencies
chart.plot(realized_vol, style=style)
###### Look at the addon in the ON GBPUSD implied vol around Brexit, note the first month will be empty given the nature
# of the model
if run_example == 3 or run_example == 0:
# Download the whole all market data for GBPUSD for pricing options (vol surface)
# Note: 10AM prints for vol no longer published by Bloomberg, so later values are a weighted average of TOK and LDN
# closes
md_request = MarketDataRequest(start_date='01 May 2016', finish_date='02 Jul 2016',
data_source='bloomberg', cut='10AM', category='fx-vol-market',
tickers=['GBPUSD'],
cache_algo='cache_algo_return')
market_df = market.fetch_market(md_request)
from findatapy.timeseries import Calculations
calc = Calculations()
vol_stats = VolStats(market_df=market_df)
implied_addon = vol_stats.calculate_implied_vol_addon('GBPUSD', tenor_label='ON').dropna()
style = Style()
style.title = 'GBPUSD ON implied volatility addon over Brexit'
style.scale_factor = 3
style.source = 'Bloomberg'
# Plot the implied volatility addon, note the large addon just before Brexit vote!
chart.plot(implied_addon, style=style)
###### Look at the statistics for recent period for EURUSD comparing implied vs realized
if run_example == 4 or run_example == 0:
import datetime
from datetime import timedelta
# Download past few months of data (BBG usually keeps a few months of intraday data)
# for FX vol and FX spot. We are downloading intraday vol data, because we want to get a snapshot
# at 1000 ET, which is the time at which FX options expire, so our dataset will cover every event
today = datetime.datetime.utcnow().date()
month_before = today - timedelta(days=60)
# month_before = '01 Nov 2020'; today = '01 Dec 2020'
asset = 'EURUSD'
# Download the whole all market data for pricing options (vol surface)
md_request = MarketDataRequest(start_date=month_before, finish_date=today,
data_source='bloomberg', freq='intraday', fields='open',
tickers=[asset + 'VON'], vendor_tickers=[asset + 'VON BGN Curncy'],
cache_algo='cache_algo_return')
from findatapy.timeseries import Calculations, Filter
calc = Calculations()
filter = Filter()
freq_min_mult = 5
# Resample into 1 minute data and fill down all points
implied_vol_df = market.fetch_market(md_request)[asset +'VON.open'].resample('1min').first().fillna(method='ffill')
# Filter data by 1000 New York time, and return back to UTC, remove any out of trading hours
# Then strip of time of day from the timestamp
implied_vol_df = filter.filter_time_series_by_time_of_day_timezone(10, 0, implied_vol_df, timezone_of_snap='America/New_York')
implied_vol_df = filter.remove_out_FX_out_of_hours(implied_vol_df)
implied_vol_df.index = pd.to_datetime(implied_vol_df.index.date)
implied_vol_df = pd.DataFrame(implied_vol_df)
implied_vol_df.columns = [asset + 'VON.close']
# Download FX intraday spot data, which will be used to calculate realized volatility
md_request.tickers = asset; md_request.vendor_tickers = asset + ' BGN Curncy'
intraday_spot_df = market.fetch_market(md_request).resample(str(freq_min_mult) + 'min').first()
intraday_spot_df = filter.remove_out_FX_out_of_hours(intraday_spot_df).dropna()
intraday_spot_df.columns = [asset + '.close']
vol_stats = VolStats()
# Calculate realized vol with the intraday data, with daily cutoffs
realized_vol = vol_stats.calculate_realized_vol(
asset, tenor_label='ON', spot_df=intraday_spot_df, hour_of_day=10, minute_of_day=0,
freq='intraday', timezone_hour_minute='America/New_York', freq_min_mult=freq_min_mult) * 100.0
implied_vol_addon = vol_stats.calculate_implied_vol_addon(asset, implied_vol=implied_vol_df, tenor_label='ON',
adj_ON_friday=True).dropna()
vrp = vol_stats.calculate_vol_risk_premium(asset, tenor_label='ON', implied_vol=implied_vol_df, realized_vol=realized_vol,
adj_ON_friday=True)
style = Style()
style.title = asset + ' ON implied volatility vs realized'
style.scale_factor = 3
style.source = 'Bloomberg'
to_plot = vrp[[asset + 'UON.close', asset +'HON.close']].dropna()
chart.plot(to_plot, style=style)
style.title = asset + 'ON implied volatility addon'
chart.plot(implied_vol_addon, style=style)
|
|
from django.db import migrations
class MigrationOptimizer(object):
"""
Powers the optimization process, where you provide a list of Operations
and you are returned a list of equal or shorter length - operations
are merged into one if possible.
For example, a CreateModel and an AddField can be optimized into a
new CreateModel, and CreateModel and DeleteModel can be optimized into
nothing.
"""
def optimize(self, operations, app_label=None):
"""
Main optimization entry point. Pass in a list of Operation instances,
get out a new list of Operation instances.
Unfortunately, due to the scope of the optimization (two combinable
operations might be separated by several hundred others), this can't be
done as a peephole optimization with checks/output implemented on
the Operations themselves; instead, the optimizer looks at each
individual operation and scans forwards in the list to see if there
are any matches, stopping at boundaries - operations which can't
be optimized over (RunSQL, operations on the same field/model, etc.)
The inner loop is run until the starting list is the same as the result
list, and then the result is returned. This means that operation
optimization must be stable and always return an equal or shorter list.
The app_label argument is optional, but if you pass it you'll get more
efficient optimization.
"""
# Internal tracking variable for test assertions about # of loops
self._iterations = 0
while True:
result = self.optimize_inner(operations, app_label)
self._iterations += 1
if result == operations:
return result
operations = result
def optimize_inner(self, operations, app_label=None):
"""
Inner optimization loop.
"""
new_operations = []
for i, operation in enumerate(operations):
# Compare it to each operation after it
for j, other in enumerate(operations[i + 1:]):
result = self.reduce(operation, other)
if result is not None:
# Optimize! Add result, then remaining others, then return
new_operations.extend(result)
new_operations.extend(operations[i + 1:i + 1 + j])
new_operations.extend(operations[i + j + 2:])
return new_operations
if not self.can_optimize_through(operation, other, app_label):
new_operations.append(operation)
break
else:
new_operations.append(operation)
return new_operations
#### REDUCTION ####
def reduce(self, operation, other):
"""
Either returns a list of zero, one or two operations,
or None, meaning this pair cannot be optimized.
"""
submethods = [
(
migrations.CreateModel,
migrations.DeleteModel,
self.reduce_model_create_delete,
),
(
migrations.AlterModelTable,
migrations.DeleteModel,
self.reduce_model_alter_delete,
),
(
migrations.AlterUniqueTogether,
migrations.DeleteModel,
self.reduce_model_alter_delete,
),
(
migrations.AlterIndexTogether,
migrations.DeleteModel,
self.reduce_model_alter_delete,
),
(
migrations.CreateModel,
migrations.RenameModel,
self.reduce_model_create_rename,
),
(
migrations.RenameModel,
migrations.RenameModel,
self.reduce_model_rename_self,
),
(
migrations.CreateModel,
migrations.AddField,
self.reduce_create_model_add_field,
),
(
migrations.CreateModel,
migrations.AlterField,
self.reduce_create_model_alter_field,
),
(
migrations.CreateModel,
migrations.RemoveField,
self.reduce_create_model_remove_field,
),
(
migrations.AddField,
migrations.AlterField,
self.reduce_add_field_alter_field,
),
(
migrations.AddField,
migrations.RemoveField,
self.reduce_add_field_delete_field,
),
(
migrations.AlterField,
migrations.RemoveField,
self.reduce_alter_field_delete_field,
),
(
migrations.AddField,
migrations.RenameField,
self.reduce_add_field_rename_field,
),
(
migrations.AlterField,
migrations.RenameField,
self.reduce_alter_field_rename_field,
),
(
migrations.CreateModel,
migrations.RenameField,
self.reduce_create_model_rename_field,
),
(
migrations.RenameField,
migrations.RenameField,
self.reduce_rename_field_self,
),
]
for ia, ib, om in submethods:
if isinstance(operation, ia) and isinstance(other, ib):
return om(operation, other)
return None
def reduce_model_create_delete(self, operation, other):
"""
Folds a CreateModel and a DeleteModel into nothing.
"""
if operation.name.lower() == other.name.lower():
return []
def reduce_model_alter_delete(self, operation, other):
"""
Folds an AlterModelSomething and a DeleteModel into just delete.
"""
if operation.name.lower() == other.name.lower():
return [other]
def reduce_model_create_rename(self, operation, other):
"""
Folds a model rename into its create
"""
if operation.name.lower() == other.old_name.lower():
return [
migrations.CreateModel(
other.new_name,
fields=operation.fields,
options=operation.options,
bases=operation.bases,
)
]
def reduce_model_rename_self(self, operation, other):
"""
Folds a model rename into another one
"""
if operation.new_name.lower() == other.old_name.lower():
return [
migrations.RenameModel(
operation.old_name,
other.new_name,
)
]
def reduce_create_model_add_field(self, operation, other):
if operation.name.lower() == other.model_name.lower():
return [
migrations.CreateModel(
operation.name,
fields=operation.fields + [(other.name, other.field)],
options=operation.options,
bases=operation.bases,
)
]
def reduce_create_model_alter_field(self, operation, other):
if operation.name.lower() == other.model_name.lower():
return [
migrations.CreateModel(
operation.name,
fields=[
(n, other.field if n == other.name else v)
for n, v in operation.fields
],
options=operation.options,
bases=operation.bases,
)
]
def reduce_create_model_rename_field(self, operation, other):
if operation.name.lower() == other.model_name.lower():
return [
migrations.CreateModel(
operation.name,
fields=[
(other.new_name if n == other.old_name else n, v)
for n, v in operation.fields
],
options=operation.options,
bases=operation.bases,
)
]
def reduce_create_model_remove_field(self, operation, other):
if operation.name.lower() == other.model_name.lower():
return [
migrations.CreateModel(
operation.name,
fields=[
(n, v)
for n, v in operation.fields
if n.lower() != other.name.lower()
],
options=operation.options,
bases=operation.bases,
)
]
def reduce_add_field_alter_field(self, operation, other):
if operation.model_name.lower() == other.model_name.lower() and operation.name.lower() == other.name.lower():
return [
migrations.AddField(
model_name=operation.model_name,
name=operation.name,
field=other.field,
)
]
def reduce_add_field_delete_field(self, operation, other):
if operation.model_name.lower() == other.model_name.lower() and operation.name.lower() == other.name.lower():
return []
def reduce_alter_field_delete_field(self, operation, other):
if operation.model_name.lower() == other.model_name.lower() and operation.name.lower() == other.name.lower():
return [other]
def reduce_add_field_rename_field(self, operation, other):
if operation.model_name.lower() == other.model_name.lower() and operation.name.lower() == other.old_name.lower():
return [
migrations.AddField(
model_name=operation.model_name,
name=other.new_name,
field=operation.field,
)
]
def reduce_alter_field_rename_field(self, operation, other):
if operation.model_name.lower() == other.model_name.lower() and operation.name.lower() == other.old_name.lower():
return [
other,
migrations.AlterField(
model_name=operation.model_name,
name=other.new_name,
field=operation.field,
),
]
def reduce_rename_field_self(self, operation, other):
if operation.model_name.lower() == other.model_name.lower() and operation.new_name.lower() == other.old_name.lower():
return [
migrations.RenameField(
operation.model_name,
operation.old_name,
other.new_name,
),
]
#### THROUGH CHECKS ####
def can_optimize_through(self, operation, other, app_label=None):
"""
Returns True if it's possible to optimize 'operation' with something
the other side of 'other'. This is possible if, for example, they
affect different models.
"""
MODEL_LEVEL_OPERATIONS = (
migrations.CreateModel,
migrations.AlterModelTable,
migrations.AlterUniqueTogether,
migrations.AlterIndexTogether,
)
FIELD_LEVEL_OPERATIONS = (
migrations.AddField,
migrations.AlterField,
)
# If it's a model level operation, let it through if there's
# nothing that looks like a reference to us in 'other'.
if isinstance(operation, MODEL_LEVEL_OPERATIONS):
if not other.references_model(operation.name, app_label):
return True
# If it's field level, only let it through things that don't reference
# the field (which includes not referencing the model)
if isinstance(operation, FIELD_LEVEL_OPERATIONS):
if not other.references_field(operation.model_name, operation.name, app_label):
return True
return False
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.