gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Openstack logging handler.
This module adds to logging functionality by adding the option to specify
a context object when calling the various log methods. If the context object
is not specified, default formatting is used. Additionally, an instance uuid
may be passed as part of the log message, which is intended to make it easier
for admins to find messages related to a specific instance.
It also allows setting of formatting information through conf.
"""
import inspect
import itertools
import logging
import logging.config
import logging.handlers
import os
import sys
import traceback
from oslo.config import cfg
from six import moves
from quaker.openstack.common.gettextutils import _ # noqa
from quaker.openstack.common import importutils
from quaker.openstack.common import jsonutils
from quaker.openstack.common import local
_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
common_cli_opts = [
cfg.BoolOpt('debug',
short='d',
default=False,
help='Print debugging output (set logging level to '
'DEBUG instead of default WARNING level).'),
cfg.BoolOpt('verbose',
short='v',
default=False,
help='Print more verbose output (set logging level to '
'INFO instead of default WARNING level).'),
]
logging_cli_opts = [
cfg.StrOpt('log-config',
metavar='PATH',
help='If this option is specified, the logging configuration '
'file specified is used and overrides any other logging '
'options specified. Please see the Python logging module '
'documentation for details on logging configuration '
'files.'),
cfg.StrOpt('log-format',
default=None,
metavar='FORMAT',
help='DEPRECATED. '
'A logging.Formatter log message format string which may '
'use any of the available logging.LogRecord attributes. '
'This option is deprecated. Please use '
'logging_context_format_string and '
'logging_default_format_string instead.'),
cfg.StrOpt('log-date-format',
default=_DEFAULT_LOG_DATE_FORMAT,
metavar='DATE_FORMAT',
help='Format string for %%(asctime)s in log records. '
'Default: %(default)s'),
cfg.StrOpt('log-file',
metavar='PATH',
deprecated_name='logfile',
help='(Optional) Name of log file to output to. '
'If no default is set, logging will go to stdout.'),
cfg.StrOpt('log-dir',
deprecated_name='logdir',
help='(Optional) The base directory used for relative '
'--log-file paths'),
cfg.BoolOpt('use-syslog',
default=False,
help='Use syslog for logging.'),
cfg.StrOpt('syslog-log-facility',
default='LOG_USER',
help='syslog facility to receive log lines')
]
generic_log_opts = [
cfg.BoolOpt('use_stderr',
default=True,
help='Log output to standard error')
]
log_opts = [
cfg.StrOpt('logging_context_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [%(request_id)s %(user)s %(tenant)s] '
'%(instance)s%(message)s',
help='format string to use for log messages with context'),
cfg.StrOpt('logging_default_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [-] %(instance)s%(message)s',
help='format string to use for log messages without context'),
cfg.StrOpt('logging_debug_format_suffix',
default='%(funcName)s %(pathname)s:%(lineno)d',
help='data to append to log format when level is DEBUG'),
cfg.StrOpt('logging_exception_prefix',
default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s '
'%(instance)s',
help='prefix each line of exception output with this format'),
cfg.ListOpt('default_log_levels',
default=[
'amqplib=WARN',
'sqlalchemy=WARN',
'boto=WARN',
'suds=INFO',
'keystone=INFO',
'eventlet.wsgi.server=WARN'
],
help='list of logger=LEVEL pairs'),
cfg.BoolOpt('publish_errors',
default=False,
help='publish error events'),
cfg.BoolOpt('fatal_deprecations',
default=False,
help='make deprecations fatal'),
# NOTE(mikal): there are two options here because sometimes we are handed
# a full instance (and could include more information), and other times we
# are just handed a UUID for the instance.
cfg.StrOpt('instance_format',
default='[instance: %(uuid)s] ',
help='If an instance is passed with the log message, format '
'it like this'),
cfg.StrOpt('instance_uuid_format',
default='[instance: %(uuid)s] ',
help='If an instance UUID is passed with the log message, '
'format it like this'),
]
CONF = cfg.CONF
CONF.register_cli_opts(common_cli_opts)
CONF.register_cli_opts(logging_cli_opts)
CONF.register_opts(generic_log_opts)
CONF.register_opts(log_opts)
# our new audit level
# NOTE(jkoelker) Since we synthesized an audit level, make the logging
# module aware of it so it acts like other levels.
logging.AUDIT = logging.INFO + 1
logging.addLevelName(logging.AUDIT, 'AUDIT')
try:
NullHandler = logging.NullHandler
except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7
class NullHandler(logging.Handler):
def handle(self, record):
pass
def emit(self, record):
pass
def createLock(self):
self.lock = None
def _dictify_context(context):
if context is None:
return None
if not isinstance(context, dict) and getattr(context, 'to_dict', None):
context = context.to_dict()
return context
def _get_binary_name():
return os.path.basename(inspect.stack()[-1][1])
def _get_log_file_path(binary=None):
logfile = CONF.log_file
logdir = CONF.log_dir
if logfile and not logdir:
return logfile
if logfile and logdir:
return os.path.join(logdir, logfile)
if logdir:
binary = binary or _get_binary_name()
return '%s.log' % (os.path.join(logdir, binary),)
class BaseLoggerAdapter(logging.LoggerAdapter):
def audit(self, msg, *args, **kwargs):
self.log(logging.AUDIT, msg, *args, **kwargs)
class LazyAdapter(BaseLoggerAdapter):
def __init__(self, name='unknown', version='unknown'):
self._logger = None
self.extra = {}
self.name = name
self.version = version
@property
def logger(self):
if not self._logger:
self._logger = getLogger(self.name, self.version)
return self._logger
class ContextAdapter(BaseLoggerAdapter):
warn = logging.LoggerAdapter.warning
def __init__(self, logger, project_name, version_string):
self.logger = logger
self.project = project_name
self.version = version_string
@property
def handlers(self):
return self.logger.handlers
def deprecated(self, msg, *args, **kwargs):
stdmsg = _("Deprecated: %s") % msg
if CONF.fatal_deprecations:
self.critical(stdmsg, *args, **kwargs)
raise DeprecatedConfig(msg=stdmsg)
else:
self.warn(stdmsg, *args, **kwargs)
def process(self, msg, kwargs):
# NOTE(mrodden): catch any Message/other object and
# coerce to unicode before they can get
# to the python logging and possibly
# cause string encoding trouble
if not isinstance(msg, basestring):
msg = unicode(msg)
if 'extra' not in kwargs:
kwargs['extra'] = {}
extra = kwargs['extra']
context = kwargs.pop('context', None)
if not context:
context = getattr(local.store, 'context', None)
if context:
extra.update(_dictify_context(context))
instance = kwargs.pop('instance', None)
instance_uuid = (extra.get('instance_uuid', None) or
kwargs.pop('instance_uuid', None))
instance_extra = ''
if instance:
instance_extra = CONF.instance_format % instance
elif instance_uuid:
instance_extra = (CONF.instance_uuid_format
% {'uuid': instance_uuid})
extra.update({'instance': instance_extra})
extra.update({"project": self.project})
extra.update({"version": self.version})
extra['extra'] = extra.copy()
return msg, kwargs
class JSONFormatter(logging.Formatter):
def __init__(self, fmt=None, datefmt=None):
# NOTE(jkoelker) we ignore the fmt argument, but its still there
# since logging.config.fileConfig passes it.
self.datefmt = datefmt
def formatException(self, ei, strip_newlines=True):
lines = traceback.format_exception(*ei)
if strip_newlines:
lines = [itertools.ifilter(
lambda x: x,
line.rstrip().splitlines()) for line in lines]
lines = list(itertools.chain(*lines))
return lines
def format(self, record):
message = {'message': record.getMessage(),
'asctime': self.formatTime(record, self.datefmt),
'name': record.name,
'msg': record.msg,
'args': record.args,
'levelname': record.levelname,
'levelno': record.levelno,
'pathname': record.pathname,
'filename': record.filename,
'module': record.module,
'lineno': record.lineno,
'funcname': record.funcName,
'created': record.created,
'msecs': record.msecs,
'relative_created': record.relativeCreated,
'thread': record.thread,
'thread_name': record.threadName,
'process_name': record.processName,
'process': record.process,
'traceback': None}
if hasattr(record, 'extra'):
message['extra'] = record.extra
if record.exc_info:
message['traceback'] = self.formatException(record.exc_info)
return jsonutils.dumps(message)
def _create_logging_excepthook(product_name):
def logging_excepthook(type, value, tb):
extra = {}
if CONF.verbose:
extra['exc_info'] = (type, value, tb)
getLogger(product_name).critical(str(value), **extra)
return logging_excepthook
class LogConfigError(Exception):
message = _('Error loading logging config %(log_config)s: %(err_msg)s')
def __init__(self, log_config, err_msg):
self.log_config = log_config
self.err_msg = err_msg
def __str__(self):
return self.message % dict(log_config=self.log_config,
err_msg=self.err_msg)
def _load_log_config(log_config):
try:
logging.config.fileConfig(log_config)
except moves.configparser.Error as exc:
raise LogConfigError(log_config, str(exc))
def setup(product_name):
"""Setup logging."""
if CONF.log_config:
_load_log_config(CONF.log_config)
else:
_setup_logging_from_conf()
sys.excepthook = _create_logging_excepthook(product_name)
def set_defaults(logging_context_format_string):
cfg.set_defaults(log_opts,
logging_context_format_string=
logging_context_format_string)
def _find_facility_from_conf():
facility_names = logging.handlers.SysLogHandler.facility_names
facility = getattr(logging.handlers.SysLogHandler,
CONF.syslog_log_facility,
None)
if facility is None and CONF.syslog_log_facility in facility_names:
facility = facility_names.get(CONF.syslog_log_facility)
if facility is None:
valid_facilities = facility_names.keys()
consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON',
'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS',
'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP',
'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3',
'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7']
valid_facilities.extend(consts)
raise TypeError(_('syslog facility must be one of: %s') %
', '.join("'%s'" % fac
for fac in valid_facilities))
return facility
def _setup_logging_from_conf():
log_root = getLogger(None).logger
for handler in log_root.handlers:
log_root.removeHandler(handler)
if CONF.use_syslog:
facility = _find_facility_from_conf()
syslog = logging.handlers.SysLogHandler(address='/dev/log',
facility=facility)
log_root.addHandler(syslog)
logpath = _get_log_file_path()
if logpath:
filelog = logging.handlers.WatchedFileHandler(logpath)
log_root.addHandler(filelog)
if CONF.use_stderr:
streamlog = ColorHandler()
log_root.addHandler(streamlog)
elif not CONF.log_file:
# pass sys.stdout as a positional argument
# python2.6 calls the argument strm, in 2.7 it's stream
streamlog = logging.StreamHandler(sys.stdout)
log_root.addHandler(streamlog)
if CONF.publish_errors:
handler = importutils.import_object(
"quaker.openstack.common.log_handler.PublishErrorsHandler",
logging.ERROR)
log_root.addHandler(handler)
datefmt = CONF.log_date_format
for handler in log_root.handlers:
# NOTE(alaski): CONF.log_format overrides everything currently. This
# should be deprecated in favor of context aware formatting.
if CONF.log_format:
handler.setFormatter(logging.Formatter(fmt=CONF.log_format,
datefmt=datefmt))
log_root.info('Deprecated: log_format is now deprecated and will '
'be removed in the next release')
else:
handler.setFormatter(ContextFormatter(datefmt=datefmt))
if CONF.debug:
log_root.setLevel(logging.DEBUG)
elif CONF.verbose:
log_root.setLevel(logging.INFO)
else:
log_root.setLevel(logging.WARNING)
for pair in CONF.default_log_levels:
mod, _sep, level_name = pair.partition('=')
level = logging.getLevelName(level_name)
logger = logging.getLogger(mod)
logger.setLevel(level)
_loggers = {}
def getLogger(name='unknown', version='unknown'):
if name not in _loggers:
_loggers[name] = ContextAdapter(logging.getLogger(name),
name,
version)
return _loggers[name]
def getLazyLogger(name='unknown', version='unknown'):
"""Returns lazy logger.
Creates a pass-through logger that does not create the real logger
until it is really needed and delegates all calls to the real logger
once it is created.
"""
return LazyAdapter(name, version)
class WritableLogger(object):
"""A thin wrapper that responds to `write` and logs."""
def __init__(self, logger, level=logging.INFO):
self.logger = logger
self.level = level
def write(self, msg):
self.logger.log(self.level, msg)
class ContextFormatter(logging.Formatter):
"""A context.RequestContext aware formatter configured through flags.
The flags used to set format strings are: logging_context_format_string
and logging_default_format_string. You can also specify
logging_debug_format_suffix to append extra formatting if the log level is
debug.
For information about what variables are available for the formatter see:
http://docs.python.org/library/logging.html#formatter
"""
def format(self, record):
"""Uses contextstring if request_id is set, otherwise default."""
# NOTE(sdague): default the fancier formating params
# to an empty string so we don't throw an exception if
# they get used
for key in ('instance', 'color'):
if key not in record.__dict__:
record.__dict__[key] = ''
if record.__dict__.get('request_id', None):
self._fmt = CONF.logging_context_format_string
else:
self._fmt = CONF.logging_default_format_string
if (record.levelno == logging.DEBUG and
CONF.logging_debug_format_suffix):
self._fmt += " " + CONF.logging_debug_format_suffix
# Cache this on the record, Logger will respect our formated copy
if record.exc_info:
record.exc_text = self.formatException(record.exc_info, record)
return logging.Formatter.format(self, record)
def formatException(self, exc_info, record=None):
"""Format exception output with CONF.logging_exception_prefix."""
if not record:
return logging.Formatter.formatException(self, exc_info)
stringbuffer = moves.StringIO()
traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
None, stringbuffer)
lines = stringbuffer.getvalue().split('\n')
stringbuffer.close()
if CONF.logging_exception_prefix.find('%(asctime)') != -1:
record.asctime = self.formatTime(record, self.datefmt)
formatted_lines = []
for line in lines:
pl = CONF.logging_exception_prefix % record.__dict__
fl = '%s%s' % (pl, line)
formatted_lines.append(fl)
return '\n'.join(formatted_lines)
class ColorHandler(logging.StreamHandler):
LEVEL_COLORS = {
logging.DEBUG: '\033[00;32m', # GREEN
logging.INFO: '\033[00;36m', # CYAN
logging.AUDIT: '\033[01;36m', # BOLD CYAN
logging.WARN: '\033[01;33m', # BOLD YELLOW
logging.ERROR: '\033[01;31m', # BOLD RED
logging.CRITICAL: '\033[01;31m', # BOLD RED
}
def format(self, record):
record.color = self.LEVEL_COLORS[record.levelno]
return logging.StreamHandler.format(self, record)
class DeprecatedConfig(Exception):
message = _("Fatal call to deprecated config: %(msg)s")
def __init__(self, msg):
super(Exception, self).__init__(self.message % dict(msg=msg))
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''PyVows' main entry point. Contains code for command-line I/O,
running tests, and the almighty `if __name__ == '__main__': main()`.
'''
# pyVows testing engine
# https://github.com/heynemann/pyvows
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 Bernardo Heynemann [email protected]
from __future__ import division, print_function
import argparse
import inspect
import os
from os.path import isfile, split
import sys
import tempfile
try:
from coverage import coverage
COVERAGE_AVAILABLE = True
except ImportError:
COVERAGE_AVAILABLE = False
from pyvows.color import yellow, Style, Fore
from pyvows.reporting import VowsDefaultReporter
from pyvows.reporting.xunit import XUnitReporter
from pyvows import version
#-------------------------------------------------------------------------------------------------
class Messages(object): # pragma: no cover
'''A simple container for command-line interface strings.'''
summary = 'Run PyVows tests.'
path = 'Directory to look for vows recursively. If a file is passed,' + \
'the file will be the target for vows. (default: %(default)r).'
pattern = 'Pattern of vows files. (default: %(default)r)'
verbosity = 'Verbosity. May be specified many times to increase verbosity (default: -vv)'
cover = 'Show the code coverage of tests. (default: %(default)s)'
cover_package = 'Verify coverage of %(metavar)s. May be specified many times. (default: all packages)'
cover_omit = 'Exclude %(metavar)s from coverage. May be specified many times. (default: no files)'
cover_threshold = 'Coverage below %(metavar)s is considered a failure. (default: %(default)s)'
cover_report = 'Store coverage report as %(metavar)s. (default: %(default)r)'
xunit_output = 'Enable XUnit output. (default: %(default)s)'
xunit_file = 'Store XUnit output as %(metavar)s. (default: %(default)r)'
exclude = 'Exclude tests and contexts that match regex-pattern %(metavar)s [Mutually exclusive with --include]'
include = 'Include only tests and contexts that match regex-pattern %(metavar)s [Mutually exclusive with --exclude]'
profile = 'Prints the 10 slowest topics. (default: %(default)s)'
profile_threshold = 'Tests taking longer than %(metavar)s seconds are considered slow. (default: %(default)s)'
no_color = 'Turn off colorized output. (default: %(default)s)'
progress = 'Show progress ticks during testing. (default: %(default)s)'
template = 'Print a PyVows test file template. (Disables testing)'
capture_output = 'Capture stdout and stderr during test execution (default: %(default)s)'
class Parser(argparse.ArgumentParser):
def __init__(self, description=Messages.summary, **kwargs):
super(Parser, self).__init__(
description=description,
**kwargs)
#Easy underlining, if we ever need it in the future
#uline = lambda text: '\033[4m{0}\033[24m'.format(text)
metavar = lambda metavar: '{0}{metavar}{0}'.format(Style.RESET_ALL, metavar=metavar.upper())
self.add_argument('-p', '--pattern', default='*_vows.py', help=Messages.pattern, metavar=metavar('pattern'))
### Filtering
self.add_argument('-e', '--exclude', action='append', default=[], help=Messages.exclude, metavar=metavar('exclude'))
self.add_argument('-i', '--include', action='append', default=[], help=Messages.include, metavar=metavar('include'))
### Coverage
cover_group = self.add_argument_group('Test Coverage')
cover_group.add_argument('-c', '--cover', action='store_true', default=False, help=Messages.cover)
cover_group.add_argument(
'-l', '--cover-package', action='append', default=[],
help=Messages.cover_package, metavar=metavar('package')
)
cover_group.add_argument(
'-o', '--cover-omit', action='append', default=[],
help=Messages.cover_omit, metavar=metavar('file')
)
cover_group.add_argument(
'-t', '--cover-threshold', type=float, default=80.0,
help=Messages.cover_threshold, metavar=metavar('number')
)
cover_group.add_argument(
'-r', '--cover-report', action='store', default=None,
help=Messages.cover_report, metavar=metavar('file')
)
### XUnit
xunit_group = self.add_argument_group('XUnit')
xunit_group.add_argument('-x', '--xunit-output', action='store_true', default=False, help=Messages.xunit_output)
xunit_group.add_argument(
'-f', '--xunit-file', action='store', default='pyvows.xml',
help=Messages.xunit_file, metavar=metavar('file')
)
### Profiling
profile_group = self.add_argument_group('Profiling')
profile_group.add_argument('--profile', action='store_true', dest='profile', default=False, help=Messages.profile)
profile_group.add_argument(
'--profile-threshold', type=float, default=0.1,
help=Messages.profile_threshold, metavar=metavar('num')
)
### Aux/Unconventional
aux_group = self.add_argument_group('Utility')
aux_group.add_argument('--template', action='store_true', dest='template', default=False, help=Messages.template)
### Misc
self.add_argument('--no-color', action='store_true', default=False, help=Messages.no_color)
self.add_argument('--progress', action='store_true', dest='progress', default=False, help=Messages.progress)
self.add_argument('--version', action='version', version='%(prog)s {0}'.format(version.to_str()))
self.add_argument('--capture-output', action='store_true', default=False, help=Messages.capture_output)
self.add_argument('-v', action='append_const', dest='verbosity', const=1, help=Messages.verbosity)
self.add_argument('path', nargs='?', default=os.curdir, help=Messages.path)
def run(path, pattern, verbosity, show_progress, exclusion_patterns=None, inclusion_patterns=None, capture_output=False):
# FIXME: Add Docstring
# This calls Vows.run(), which then calls VowsRunner.run()
# needs to be imported here, else the no-color option won't work
from pyvows.core import Vows
if exclusion_patterns:
Vows.exclude(exclusion_patterns)
if inclusion_patterns:
Vows.include(inclusion_patterns)
Vows.collect(path, pattern)
on_success = show_progress and VowsDefaultReporter.on_vow_success or None
on_error = show_progress and VowsDefaultReporter.on_vow_error or None
result = Vows.run(on_success, on_error, capture_output)
return result
def main():
'''PyVows' runtime implementation.
'''
# needs to be imported here, else the no-color option won't work
from pyvows.reporting import VowsDefaultReporter
arguments = Parser().parse_args()
if arguments.template:
from pyvows.utils import template
template()
sys.exit() # Exit after printing template, since it's
# supposed to be redirected from STDOUT by the user
path, pattern = arguments.path, arguments.pattern
if path and isfile(path):
path, pattern = split(path)
if not path:
path = os.curdir
if arguments.no_color:
for color_name, value in inspect.getmembers(Fore):
if not color_name.startswith('_'):
setattr(Fore, color_name, '')
if arguments.cover and COVERAGE_AVAILABLE:
cov = coverage(source=arguments.cover_package,
omit=arguments.cover_omit)
cov.erase()
cov.start()
verbosity = len(arguments.verbosity) if arguments.verbosity else 2
result = run(
path,
pattern,
verbosity,
arguments.progress,
exclusion_patterns=arguments.exclude,
inclusion_patterns=arguments.include,
capture_output=arguments.capture_output
)
reporter = VowsDefaultReporter(result, verbosity)
# Print test results first
reporter.pretty_print()
# Print profile if necessary
if arguments.profile:
reporter.print_profile(arguments.profile_threshold)
# Print coverage if necessary
if result.successful and arguments.cover:
# if coverage was requested, but unavailable, warn the user
if not COVERAGE_AVAILABLE:
print()
print(yellow('WARNING: Cover disabled because coverage could not be found.'))
print(yellow('Make sure it is installed and accessible.'))
print()
# otherwise, we're good
else:
cov.stop()
xml = ''
try:
with tempfile.NamedTemporaryFile() as tmp:
cov.xml_report(outfile=tmp.name)
tmp.seek(0)
xml = tmp.read()
except Exception:
err = sys.exc_info()[1]
print("Could not run coverage. Error: %s" % err)
if xml:
if arguments.cover_report:
with open(arguments.cover_report, 'wb') as report:
report.write(xml)
arguments.cover_threshold /= 100.0
reporter.print_coverage(xml, arguments.cover_threshold)
# Write XUnit if necessary
if arguments.xunit_output:
xunit = XUnitReporter(result)
xunit.write_report(arguments.xunit_file)
sys.exit(result.errored_tests)
if __name__ == '__main__':
main()
|
|
#===========================================================================
#
# Copyright (c) 2014, California Institute of Technology.
# U.S. Government Sponsorship under NASA Contract NAS7-03001 is
# acknowledged. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#===========================================================================
""": A class for managing styles."""
__version__ = "$Revision: #1 $"
#===========================================================================
import os
import os.path
from . import types as S
from .MplStyle import MplStyle
import matplotlib as MPL
#===========================================================================
__all__ = [ 'MplStyleManager' ]
# Some global variables
MPLSTYLE_CLASS = MplStyle
MPLSTYLE_EXTENSION = "mplstyle"
MPLSTYLE_PREFIX = "MPL"
MPLSTYLE_CUSTOM_FUNC = "applyStyle"
MPLSTYLE_HEADER = """
#======================================================================
#
# matplotlib style file
#
# This file is automatically generated by mplStyle.MplStyleManager
# additional python code that is not directly setting properties on the
# style in this file will be lost the next time this file is written.
#
#======================================================================
"""
#===========================================================================
class MplStyleManager( S.StyleManager ):
""": An object used to manage one or more Style classes.
"""
#-----------------------------------------------------------------------
def __init__( self ):
""": Create a new MplStyleManager object.
"""
S.StyleManager.__init__( self, MPLSTYLE_CLASS,
MPLSTYLE_EXTENSION,
MPLSTYLE_PREFIX )
#-----------------------------------------------------------------------
def _loadFromFile( self, fname ):
""": Load the specified style file.
= INPUT VARIABLES
- fname The path of the file to load.
= RETURN VALUE
- Returns the new style that results from loading from the specified file.
"""
# Allow style files to use some variables.
createData = lambda : {
'MplStyle' : MplStyle,
}
data = createData()
try:
execfile( fname, data )
except Exception, e:
msg = "MplStyleManager had an error loading the file '%s'" % fname
raise S.util.mergeExceptions( e, msg )
if 'style' in data:
style = data['style']
else:
msg = "MplStyleManager is unable to load the style file '%s' " \
"because there was no value named 'style' of type 'MplStyle' " \
"found." % (fname,)
raise Exception( msg )
if not isinstance( style, MplStyle ):
msg = "MplStyleManager is unable to load the style file '%s' " \
"because the value named 'style' was expected to be of type " \
"'MplStyle', but was instead of type '%s'" % \
(fname, style.__class__.__name__)
raise Exception( msg )
# Load the custom file
custom = os.path.dirname( fname )
customBase, customExt = os.path.splitext( fname )
custom = os.path.join( custom, ( "%s_custom%s" % (customBase,
customExt) ) )
if os.path.exists( custom ):
customData = createData()
execfile( custom, customData )
if MPLSTYLE_CUSTOM_FUNC in customData:
style.custom = customData[MPLSTYLE_CUSTOM_FUNC]
else:
msg = "MplStyleManager encountered an error while loading the " \
"style '%s'. A custom script was found, but the expected " \
"entry point '%s' was not found in the file.\nCustom File: " \
"'%s'" % (style.name, MPLSTYLE_CUSTOM_FUNC, custom)
raise Exception( msg )
return style
#-----------------------------------------------------------------------
def _writeSubStyle( self, fout, style, prefix ):
""": Write the style to the file
= INPUT VARIABLES
- fout The output file we are writing to
- style The sub-style to write.
- prefix The prefix to add to the beginning of each line.
"""
propertyNames = style.propertyNames()
for name in propertyNames:
value = getattr( style, name )
if value is None:
continue
if isinstance( value, str ) or isinstance( value, unicode ):
value = "'%s'" % value
if isinstance( value, S.SubStyle ):
self._writeSubStyle( fout, value, "%s.%s" % (prefix, name) )
else:
fout.write( "%s.%s = %s\n" % (prefix, name, value) )
#-----------------------------------------------------------------------
def _saveToFile( self, style, fname ):
""": Save the style to persistent file.
This will write the given style to the named file overwriting the file if
it already exists.
= INPUT VARIABLES
- style The style to save to a file.
- fname The name of the file to save the style to.
"""
with open( fname, 'w' ) as fout:
fout.write( MPLSTYLE_HEADER )
fout.write( "style = MplStyle( '%s' )\n" % (style.name,) )
self._writeSubStyle( fout, style, 'style' )
#-----------------------------------------------------------------------
def _deleteStyleFile( self, fname ):
""": Delete the persistent files for a style.
= INPUT VARIABLES
- fname The name of the style file to delete.
"""
# Remove the file
os.remove( fname )
# Check for a custom script file and remove it.
custom = os.path.dirname( fname )
customBase, customExt = os.path.splitext( fname )
custom = os.path.join( custom, ( "%s_custom%s" % (customBase,
customExt) ) )
if os.path.exists( custom ):
os.remove( custom )
#-----------------------------------------------------------------------
def _create( self, name, properties, parent, custom, **kwargs ):
""": Create a new style with the given name.
= INPUT VARIABLES
- name The name to give to the newly created style.
- properties Initial property values to set in the newly created style.
- parent The name of an existing style to use as the parent of the
newly created style.
- custom A callable object or function that will be passed the object
that needs styling.
- kwargs Any extra keyword arguments are passed into the style
constructor.
"""
return MplStyle( name, properties, parent, custom )
#-----------------------------------------------------------------------
|
|
"""The tests for the manual Alarm Control Panel component."""
from datetime import timedelta
import unittest
from unittest.mock import patch
from homeassistant.setup import setup_component
from homeassistant.const import (
STATE_ALARM_DISARMED, STATE_ALARM_ARMED_HOME, STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_NIGHT, STATE_ALARM_PENDING, STATE_ALARM_TRIGGERED)
from homeassistant.components import alarm_control_panel
import homeassistant.util.dt as dt_util
from tests.common import fire_time_changed, get_test_home_assistant
CODE = 'HELLO_CODE'
class TestAlarmControlPanelManual(unittest.TestCase):
"""Test the manual alarm module."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
def tearDown(self): # pylint: disable=invalid-name
"""Stop down everything that was started."""
self.hass.stop()
def test_arm_home_no_pending(self):
"""Test arm home method."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual',
'name': 'test',
'code': CODE,
'pending_time': 0,
'disarm_after_trigger': False
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_arm_home(self.hass, CODE)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_ARMED_HOME,
self.hass.states.get(entity_id).state)
def test_arm_home_with_pending(self):
"""Test arm home method."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual',
'name': 'test',
'code': CODE,
'pending_time': 1,
'disarm_after_trigger': False
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_arm_home(self.hass, CODE, entity_id)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_PENDING,
self.hass.states.get(entity_id).state)
future = dt_util.utcnow() + timedelta(seconds=1)
with patch(('homeassistant.components.alarm_control_panel.manual.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_ARMED_HOME,
self.hass.states.get(entity_id).state)
def test_arm_home_with_invalid_code(self):
"""Attempt to arm home without a valid code."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual',
'name': 'test',
'code': CODE,
'pending_time': 1,
'disarm_after_trigger': False
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_arm_home(self.hass, CODE + '2')
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
def test_arm_away_no_pending(self):
"""Test arm home method."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual',
'name': 'test',
'code': CODE,
'pending_time': 0,
'disarm_after_trigger': False
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_arm_away(self.hass, CODE, entity_id)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_ARMED_AWAY,
self.hass.states.get(entity_id).state)
def test_arm_away_with_pending(self):
"""Test arm home method."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual',
'name': 'test',
'code': CODE,
'pending_time': 1,
'disarm_after_trigger': False
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_arm_away(self.hass, CODE)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_PENDING,
self.hass.states.get(entity_id).state)
future = dt_util.utcnow() + timedelta(seconds=1)
with patch(('homeassistant.components.alarm_control_panel.manual.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_ARMED_AWAY,
self.hass.states.get(entity_id).state)
def test_arm_away_with_invalid_code(self):
"""Attempt to arm away without a valid code."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual',
'name': 'test',
'code': CODE,
'pending_time': 1,
'disarm_after_trigger': False
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_arm_away(self.hass, CODE + '2')
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
def test_arm_night_no_pending(self):
"""Test arm night method."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual',
'name': 'test',
'code': CODE,
'pending_time': 0,
'disarm_after_trigger': False
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_arm_night(self.hass, CODE)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_ARMED_NIGHT,
self.hass.states.get(entity_id).state)
def test_arm_night_with_pending(self):
"""Test arm night method."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual',
'name': 'test',
'code': CODE,
'pending_time': 1,
'disarm_after_trigger': False
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_arm_night(self.hass, CODE, entity_id)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_PENDING,
self.hass.states.get(entity_id).state)
future = dt_util.utcnow() + timedelta(seconds=1)
with patch(('homeassistant.components.alarm_control_panel.manual.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_ARMED_NIGHT,
self.hass.states.get(entity_id).state)
def test_arm_night_with_invalid_code(self):
"""Attempt to night home without a valid code."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual',
'name': 'test',
'code': CODE,
'pending_time': 1,
'disarm_after_trigger': False
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_arm_night(self.hass, CODE + '2')
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
def test_trigger_no_pending(self):
"""Test triggering when no pending submitted method."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual',
'name': 'test',
'trigger_time': 1,
'disarm_after_trigger': False
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_trigger(self.hass, entity_id=entity_id)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_PENDING,
self.hass.states.get(entity_id).state)
future = dt_util.utcnow() + timedelta(seconds=60)
with patch(('homeassistant.components.alarm_control_panel.manual.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_TRIGGERED,
self.hass.states.get(entity_id).state)
def test_trigger_with_pending(self):
"""Test arm home method."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual',
'name': 'test',
'pending_time': 2,
'trigger_time': 3,
'disarm_after_trigger': False
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_trigger(self.hass)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_PENDING,
self.hass.states.get(entity_id).state)
future = dt_util.utcnow() + timedelta(seconds=2)
with patch(('homeassistant.components.alarm_control_panel.manual.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_TRIGGERED,
self.hass.states.get(entity_id).state)
future = dt_util.utcnow() + timedelta(seconds=5)
with patch(('homeassistant.components.alarm_control_panel.manual.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
def test_trigger_with_disarm_after_trigger(self):
"""Test disarm after trigger."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual',
'name': 'test',
'trigger_time': 5,
'pending_time': 0,
'disarm_after_trigger': True
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_trigger(self.hass, entity_id=entity_id)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_TRIGGERED,
self.hass.states.get(entity_id).state)
future = dt_util.utcnow() + timedelta(seconds=5)
with patch(('homeassistant.components.alarm_control_panel.manual.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
def test_disarm_while_pending_trigger(self):
"""Test disarming while pending state."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual',
'name': 'test',
'trigger_time': 5,
'disarm_after_trigger': False
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_trigger(self.hass)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_PENDING,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_disarm(self.hass, entity_id=entity_id)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
future = dt_util.utcnow() + timedelta(seconds=5)
with patch(('homeassistant.components.alarm_control_panel.manual.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
def test_disarm_during_trigger_with_invalid_code(self):
"""Test disarming while code is invalid."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual',
'name': 'test',
'pending_time': 5,
'code': CODE + '2',
'disarm_after_trigger': False
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_trigger(self.hass)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_PENDING,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_disarm(self.hass, entity_id=entity_id)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_PENDING,
self.hass.states.get(entity_id).state)
future = dt_util.utcnow() + timedelta(seconds=5)
with patch(('homeassistant.components.alarm_control_panel.manual.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_TRIGGERED,
self.hass.states.get(entity_id).state)
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================================
"""Library of TPU helper functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.tpu.python.ops import tpu_ops
from tensorflow.contrib.tpu.python.tpu import tpu_function
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
_SUMMARY_OPS = ("ScalarSummary",)
_PLACEHOLDER_OPS = ("Placeholder",)
def initialize_system(embedding_config=None, job=None):
"""Initializes a distributed TPU system for use with TensorFlow.
Args:
embedding_config: If not None, an EmbeddingLayerConfiguration proto
describing the desired configuration of the hardware embedding lookup
tables. If embedding_config is None, no hardware embeddings can be used.
job: The job (the XXX in TensorFlow device specification /job:XXX)
that contains the TPU devices that will be initialized. If job=None
it is assumed there is only one job in the TensorFlow flock, and an
error will be returned if this assumption does not hold.
Returns:
Op which, when executed, will initialize the system.
"""
if job is None:
device_name = "/device:TPU_SYSTEM:0"
else:
device_name = "/job:%s/device:TPU_SYSTEM:0" % job
config_string = ("" if embedding_config is None else
embedding_config.SerializeToString())
with ops.device(device_name):
init_distributed_tpu = tpu_ops.configure_distributed_tpu(
embedding_config=config_string)
return init_distributed_tpu
def shutdown_system(job=None):
"""Shuts down a running a distributed TPU system."""
if job is None:
device_name = "/device:TPU_SYSTEM:0"
else:
device_name = "/job:%s/device:TPU_SYSTEM:0" % job
with ops.device(device_name):
shutdown_distributed_tpu = tpu_ops.shutdown_distributed_tpu()
return shutdown_distributed_tpu
def core(num):
"""Returns the device name for a core in a replicated TPU computation.
Args:
num: the virtual core number within each replica to which operators should
be assigned.
Returns:
A device name, suitable for passing to tf.device().
"""
return "device:TPU_REPLICATED_CORE:{}".format(num)
class TPUReplicateContext(control_flow_ops.ControlFlowContext):
"""A ControlFlowContext for nodes inside a TPU computation.
The primary role of TPUReplicateContext is to mark operators inside a
tpu.replicate() computation with the attribute "_tpu_replicate=XYZ", where XYZ
is a unique name.
We use a ControlFlowContext to perform the annotation since it
integrates with Tensorflow constructs like ResourceVariables. For example,
if a ResourceVariable is constructed inside a tpu.replicate() block, the
ResourceVariable implementation can use "with ops.control_dependencies(None)"
to build the variable's definition outside the replicated computation.
"""
def __init__(self, name):
control_flow_ops.ControlFlowContext.__init__(self)
self._name = name
def AddOp(self, op):
self._AddOpInternal(op)
def _AddOpInternal(self, op):
# pylint: disable=protected-access
if op.type in _PLACEHOLDER_OPS:
raise ValueError("Placeholder %s is not supported." % op.name)
if op.type in _SUMMARY_OPS:
logging.warning(
"Summary operations are not currently supported (%s)" % op.name)
if any(x.dtype._is_ref_dtype for x in op.inputs):
raise NotImplementedError(
"Non-resource Variables are not supported inside TPU computations "
"(operator name: %s)" % op.name)
# pylint: enable=protected-access
if "_tpu_replicate" in op.node_def.attr:
raise ValueError("TPU computations cannot be nested")
op.node_def.attr["_tpu_replicate"].s = self._name
op.graph.prevent_feeding(op)
op.graph.prevent_fetching(op)
def AddValue(self, val):
result = val
if self._outer_context:
result = self._outer_context.AddValue(val)
return result
def AddInnerOp(self, op):
self._AddOpInternal(op)
if self._outer_context:
self._outer_context.AddInnerOp(op)
@property
def grad_state(self):
# Define the gradient loop state associated with the TPUReplicateContext to
# be None as the TPUReplicateContext does not get nested nor does the
# grad_state outside the TPUReplicateContext affect the graph inside so the
# grad_state should be as if this is the top-level gradient state.
return None
def replicate(computation,
inputs=None,
infeed_queue=None,
global_tpu_id=None,
name=None):
"""Builds a graph operator that runs a replicated TPU computation.
Args:
computation: a Python function that builds the computation to replicate.
inputs: a list of lists of input tensors or None (equivalent to
[[]]), indexed by [replica_num][input_num]. All replicas must
have the same number of inputs.
infeed_queue: if not None, the InfeedQueue from which to append a tuple
of arguments as inputs to computation.
global_tpu_id: if not None, a Numpy 2D array indicating the global
id of each TPU device in the system. The outer dimension of the
array is host task id, and the inner dimension is device ordinal,
so e.g., global_tpu_id[x][y] indicates the global id of device
/task:x/device:TPU_NODE:y.
name: name of the operator.
Returns:
A list of lists of output tensors, indexed by [replica_num][output_num].
Raises:
ValueError: if all replicas do not have equal numbers of input tensors.
ValueError: if the number of inputs per replica does not match
the number of formal parameters to `computation`.
"""
if name is None:
name = "TPUReplicate"
inputs = [[]] if inputs is None else inputs
if global_tpu_id is not None:
# Turn the Numpy array into a flattened list.
global_tpu_id = global_tpu_id.flatten().tolist()
if ((not isinstance(inputs, list)) or
any(not isinstance(inp, (list, tuple)) for inp in inputs)):
raise TypeError("tpu.replicate() inputs must be a list of lists/tuples")
num_replicas = len(inputs)
# No replicas? Nothing to do.
if num_replicas == 0:
return []
# Converts inputs to Tensors.
inputs = [[ops.convert_to_tensor(x) for x in inp] for inp in inputs]
# Verifies that all replicas have matching numbers and types of inputs
input_types = [x.dtype for x in inputs[0]]
input_arity = len(input_types)
for i in range(num_replicas):
if len(inputs[i]) != input_arity:
raise ValueError("Replicas must have the same number of inputs. "
"Replica 0 had {} inputs, replica {} had {} "
"inputs.".format(input_arity, i, len(inputs[i])))
types = [x.dtype for x in inputs[i]]
if types != input_types:
raise ValueError(
"Replicas must have matching input types. Replica 0 had "
"input types {}, replica {} had input types {}".format(
input_types, i, types))
arg_error = tpu_function.check_function_argument_count(
computation, input_arity, infeed_queue)
if arg_error is not None:
if infeed_queue is None:
raise TypeError(
"Supplied computation cannot be called with the specified inputs. "
"You specified %d inputs: %s, but the computation needs %s" % (
input_arity, str([i.name for i in inputs[0]]), arg_error))
else:
raise TypeError(
"Supplied computation cannot be called with the specified inputs. "
"You specified %d inputs: %s and %d additional inputs from infeed,"
" but the computation needs %s" % (input_arity, str(
[i.name
for i in inputs[0]]), infeed_queue.number_of_tuple_elements,
arg_error))
graph = ops.get_default_graph()
with ops.name_scope(name, "replicate"):
# Fan-in: Builds a TPUReplicatedInput node for each input.
computation_inputs = []
for i in range(0, input_arity):
replicas = [inputs[replica][i] for replica in xrange(num_replicas)]
computation_inputs.append(
tpu_ops.tpu_replicated_input(replicas, name="input{}".format(i)))
context = TPUReplicateContext(name=graph.unique_name("cluster"))
try:
context.Enter()
metadata = tpu_ops.tpu_replicate_metadata(
num_replicas=num_replicas, global_tpu_id=global_tpu_id)
with tpu_function.tpu_shard_context(
num_replicas), ops.control_dependencies([metadata]):
# The EncapsulateTPUComputations rewrite needs to identify the
# replicated arguments inside each computation. Adds identity operators
# tagged with an attribute _tpu_replicated_input to identify the
# replicated inputs.
# pylint: disable=protected-access
with graph._attr_scope({"_tpu_replicated_input":
attr_value_pb2.AttrValue(b=True)}):
computation_inputs = [
array_ops.identity(x, name="replicated_input_{}".format(i))
for i, x in enumerate(computation_inputs)]
# pylint: enable=protected-access
# If there is an infeed queue, adds the dequeued values to the
# computation's inputs.
if infeed_queue is not None:
infeed_queue.set_number_of_shards(num_replicas)
for t in infeed_queue.generate_dequeue_op():
computation_inputs.append(t)
# Only resource variables work inside a TPU computation, so turn on
# resource variables for the computation.
# TODO(phawkins): consider removing this code. It will
# be less confusing to clients if they knowingly choose to use resource
# variables.
vscope = variable_scope.get_variable_scope()
saved_use_resource = vscope.use_resource
vscope.set_use_resource(True)
outputs = computation(*computation_inputs)
vscope.set_use_resource(saved_use_resource)
# If the computation only returned one value, makes it a tuple.
if not isinstance(outputs, (list, tuple)):
outputs = (outputs,)
try:
with ops.device(core(0)):
outputs = [
o if isinstance(o, ops.Operation) else ops.convert_to_tensor(o)
for o in outputs
]
except Exception as e:
raise ValueError(
"TPU function return values must all either be Operations or "
"convertible to Tensors. Got '%s'" % str(e))
# Separates the returned Operations and Tensors.
output_operations = [o for o in outputs if isinstance(o, ops.Operation)]
output_tensors = [o for o in outputs
if not isinstance(o, ops.Operation)]
if outputs != output_tensors + output_operations:
raise ValueError(
"TPU functions must return zero-or more Tensor values followed by "
"zero or more Operations.")
output_arity = len(output_tensors)
# Wraps outputs in Identity ops. Otherwise a replicated input copied
# straight to an output would bypass the replicate(). This would be bad
# because the TPUReplicatedInput/TPUReplicatedOutput operator would not
# be rewritten away, leading to a runtime error.
# TODO(phawkins): extend the rewrite to elide these nodes instead.
new_output_tensors = []
for t in output_tensors:
with ops.device(t.device if t.device else core(0)):
new_output_tensors.append(array_ops.identity(t))
output_tensors = new_output_tensors
finally:
context.Exit()
# Fan-out: Builds a TPUReplicatedOutput node for each output.
outputs = [tpu_ops.tpu_replicated_output(output_tensors[i], num_replicas,
name="output{}".format(i))
for i in xrange(output_arity)]
with ops.control_dependencies(output_operations):
if output_arity == 0:
# Returns a list of NoOps dependent on the replication Op, indexed by
# [replica_num].
return [
control_flow_ops.no_op(name="%s_shard_%d" % (name, i))
for i in range(num_replicas)
]
else:
# Wraps the outputs in identity operators so the names of any possible
# `fetch` nodes are preserved by the replication rewrite.
return [
[array_ops.identity(outputs[out][replica],
name="output_%d_shard_%d" % (out, replica))
for out in xrange(output_arity)]
for replica in xrange(num_replicas)
]
def shard(computation,
inputs=None,
num_shards=1,
input_shard_axes=None,
outputs_from_all_shards=True,
output_shard_axes=None,
infeed_queue=None,
global_tpu_id=None,
name=None):
"""Shards `computation` for parallel execution.
`inputs` must be a list of Tensors or None (equivalent to an empty
list), each of which has a corresponding split axis (from
`input_shard_axes`). Each input is split into `num_shards` pieces
along the corresponding axis, and computation is applied to each
shard in parallel.
Tensors are broadcast to all shards if they are lexically captured by
`computation`. e.g.,
x = tf.constant(7)
def computation():
return x + 3
... = shard(computation, ...)
TODO(phawkins): consider adding support for broadcasting Tensors passed
as inputs.
If `outputs_from_all_shards` is true, the outputs from all shards of
`computation` are concatenated back together along their `output_shards_axes`.
Otherwise, each output is taken from an arbitrary shard.
Inputs and outputs of the computation must be at least rank-1 Tensors.
Args:
computation: a Python function that builds a computation to apply to each
shard of the input.
inputs: a list of input tensors or None (equivalent to an empty
list). Each input tensor has a corresponding shard axes, given
by `input_shard_axes`, which must have size divisible by
`num_shards`.
num_shards: the number of shards.
input_shard_axes: a list of dimensions along which to shard `inputs`, or
`None`. `None` means "shard all inputs along dimension 0". If not `None`,
there must be one dimension per input.
outputs_from_all_shards: boolean or list of boolean. For each output, if
`True`, outputs from all shards are concatenated along the corresponding
`output_shard_axes` entry. Otherwise, each output is taken
from an arbitrary shard. If the argument is a boolean, the argument's
value is used for each output.
output_shard_axes: a list of dimensions along which to concatenate the
outputs of `computation`, or `None`. `None` means "concatenate all outputs
along dimension 0". If not `None`, there must be one dimension per output.
Ignored if `outputs_from_all_shards` is False.
infeed_queue: if not None, the InfeedQueue to use to augment the inputs of
`computation`.
global_tpu_id: if not None, a Numpy 2D array indicating the global
id of each TPU device in the system. The outer dimension of the
array is host task id, and the inner dimension is device ordinal,
so e.g., global_tpu_id[x][y] indicates the global id of device
/task:x/device:TPU_NODE:y.
name: name of the operator.
Returns:
A list of output tensors.
Raises:
ValueError: if num_shards <= 0
ValueError: if len(input_shard_axes) != len(inputs)
ValueError: if len(output_shard_axes) != len(outputs from `computation`)
"""
if num_shards <= 0:
raise ValueError("num_shards must be a positive integer.")
# Converts inputs to Tensors.
inputs = [] if inputs is None else [ops.convert_to_tensor(x) for x in inputs]
if input_shard_axes is None:
input_shard_axes = [0] * len(inputs)
if len(inputs) != len(input_shard_axes):
raise ValueError("Length of input_shard_axes must be equal to the number "
"of inputs.")
if inputs:
# Splits the `inputs` along the corresponding `input_shard_axes`, giving
# lists with layout [input][shard]
split_inputs = [
array_ops.split(x, num_shards, axis=axis)
for (axis, x) in zip(input_shard_axes, inputs)]
# Transposes the input lists to have layout [shard][input]
transposed_inputs = [list(i) for i in zip(*split_inputs)]
else:
transposed_inputs = [[]] * num_shards
outputs = replicate(
computation,
transposed_inputs,
infeed_queue=infeed_queue,
global_tpu_id=global_tpu_id,
name=name)
# There must be at least one shard since num_shards > 0.
# TODO(b/36647078) remove disable when pylint bug is fixed.
# pylint: disable=indexing-exception
if isinstance(outputs[0], ops.Operation):
# pylint: enable=indexing-exception
# There were no outputs from the computation and replicate returned a list
# of NoOps with control dependencies on the computation. Return the first
# one so it can be used as a control dependency or fetch node.
# TODO(b/36647078) remove disable when pylint bug is fixed.
# pylint: disable=indexing-exception
return [outputs[0]]
# pylint: enable=indexing-exception
# TODO(b/36647078) remove disable when pylint bug is fixed.
# pylint: disable=indexing-exception
num_outputs = len(outputs[0])
# pylint: enable=indexing-exception
if output_shard_axes is None:
output_shard_axes = [0] * num_outputs
if num_outputs != len(output_shard_axes):
raise ValueError("Length of output_shard_axes must be equal to the number "
"of outputs.")
if isinstance(outputs_from_all_shards, bool):
outputs_from_all_shards = [outputs_from_all_shards] * num_outputs
if num_outputs != len(outputs_from_all_shards):
raise ValueError("Length of outputs_from_all_shards must be equal to the "
"number of outputs.")
results = []
for (axis, all_shards, x) in zip(output_shard_axes, outputs_from_all_shards,
zip(*outputs)):
if all_shards:
# Concatenate all of the outputs together (use stack for scalars).
shape = x[0].shape
is_scalar = shape is not None and (shape.ndims == 0)
results.append((array_ops.stack(list(x)) if is_scalar
else array_ops.concat(list(x), axis=axis)))
else:
# TODO(phawkins): use a smarter policy, e.g., round-robin across shards.
results.append(x[0])
return results
def batch_parallel(computation,
inputs=None,
num_shards=1,
infeed_queue=None,
global_tpu_id=None,
name=None):
"""Shards `computation` along the batch dimension for parallel execution.
Convenience wrapper around shard().
`inputs` must be a list of Tensors or None (equivalent to an empty
list). Each input is split into `num_shards` pieces along the 0-th
dimension, and computation is applied to each shard in parallel.
Tensors are broadcast to all shards if they are lexically captured by
`computation`. e.g.,
x = tf.constant(7)
def computation():
return x + 3
... = shard(computation, ...)
The outputs from all shards are concatenated back together along their 0-th
dimension.
Inputs and outputs of the computation must be at least rank-1 Tensors.
Args:
computation: a Python function that builds a computation to apply to each
shard of the input.
inputs: a list of input tensors or None (equivalent to an empty
list). The 0-th dimension of each Tensor must have size
divisible by `num_shards`.
num_shards: the number of shards.
infeed_queue: if not None, the InfeedQueue from which to append a tuple
of arguments as inputs to `computation`.
global_tpu_id: if not None, a Numpy 2D array indicating the global
id of each TPU device in the system. The outer dimension of the
array is host task id, and the inner dimension is device ordinal,
so e.g., global_tpu_id[x][y] indicates the global id of device
/task:x/device:TPU_NODE:y.
name: name of the operator.
Returns:
A list of output tensors.
Raises:
ValueError: if num_shards <= 0
"""
return shard(
computation,
inputs,
num_shards=num_shards,
infeed_queue=infeed_queue,
global_tpu_id=global_tpu_id,
name=name)
def rewrite(computation,
inputs=None,
infeed_queue=None,
global_tpu_id=None,
name=None):
"""Rewrites `computation` for execution on a TPU system.
Args:
computation: a Python function that builds a computation to apply
to the input. If the function takes n inputs, 'inputs' should be
a list of n tensors. If the function returns m outputs, rewrite
will return a list of m tensors.
inputs: a list of input tensors or None (equivalent to an empty list).
infeed_queue: if not None, the InfeedQueue from which to append a tuple
of arguments as inputs to `computation`.
global_tpu_id: if not None, a Numpy 2D array indicating the global
id of each TPU device in the system. The outer dimension of the
array is host task id, and the inner dimension is device ordinal,
so e.g., global_tpu_id[x][y] indicates the global id of device
/task:x/device:TPU_NODE:y.
name: name of the operator.
Returns:
A list of output tensors.
"""
if inputs is not None and not isinstance(inputs, (list, tuple)):
raise TypeError("tpu.rewrite() inputs must be a list or tuple")
# TODO(b/36647078) remove disable when pylint bug is fixed.
# pylint: disable=indexing-exception
return replicate(
computation,
None if inputs is None else [inputs],
infeed_queue=infeed_queue,
global_tpu_id=global_tpu_id,
name=name)[0]
# pylint: enable=indexing-exception
|
|
# Low-color themes should ONLY use the standard foreground and background
# colours listed here:
#
# http://urwid.org/manual/displayattributes.html
#
class Palette:
_fields = [
'background',
'title',
# Status bar & heading
'heading', 'heading_key', 'heading_inactive',
# Help
'key', 'head', 'text',
# Options
'option_selected', 'option_active', 'option_active_selected',
'option_selected_key',
# List and Connections
'method', 'focus',
'code_200', 'code_300', 'code_400', 'code_500', 'code_other',
'error', "warn",
'header', 'highlight', 'intercept', 'replay', 'mark',
# Hex view
'offset',
# Grid Editor
'focusfield', 'focusfield_error', 'field_error', 'editfield',
]
high = None
def palette(self, transparent):
l = []
highback, lowback = None, None
if not transparent:
if self.high and self.high.get("background"):
highback = self.high["background"][1]
lowback = self.low["background"][1]
for i in self._fields:
if transparent and i == "background":
l.append(["background", "default", "default"])
else:
v = [i]
low = list(self.low[i])
if lowback and low[1] == "default":
low[1] = lowback
v.extend(low)
if self.high and i in self.high:
v.append(None)
high = list(self.high[i])
if highback and high[1] == "default":
high[1] = highback
v.extend(high)
elif highback and self.low[i][1] == "default":
high = [None, low[0], highback]
v.extend(high)
l.append(tuple(v))
return l
class LowDark(Palette):
"""
Low-color dark background
"""
low = dict(
background = ('white', 'black'),
title = ('white,bold', 'default'),
# Status bar & heading
heading = ('white', 'dark blue'),
heading_key = ('light cyan', 'dark blue'),
heading_inactive = ('dark gray', 'light gray'),
# Help
key = ('light cyan', 'default'),
head = ('white,bold', 'default'),
text = ('light gray', 'default'),
# Options
option_selected = ('black', 'light gray'),
option_selected_key = ('light cyan', 'light gray'),
option_active = ('light red', 'default'),
option_active_selected = ('light red', 'light gray'),
# List and Connections
method = ('dark cyan', 'default'),
focus = ('yellow', 'default'),
code_200 = ('dark green', 'default'),
code_300 = ('light blue', 'default'),
code_400 = ('light red', 'default'),
code_500 = ('light red', 'default'),
code_other = ('dark red', 'default'),
warn = ('brown', 'default'),
error = ('light red', 'default'),
header = ('dark cyan', 'default'),
highlight = ('white,bold', 'default'),
intercept = ('brown', 'default'),
replay = ('light green', 'default'),
mark = ('light red', 'default'),
# Hex view
offset = ('dark cyan', 'default'),
# Grid Editor
focusfield = ('black', 'light gray'),
focusfield_error = ('dark red', 'light gray'),
field_error = ('dark red', 'default'),
editfield = ('white', 'default'),
)
class Dark(LowDark):
high = dict(
heading_inactive = ('g58', 'g11'),
intercept = ('#f60', 'default'),
option_selected = ('g85', 'g45'),
option_selected_key = ('light cyan', 'g50'),
option_active_selected = ('light red', 'g50'),
)
class LowLight(Palette):
"""
Low-color light background
"""
low = dict(
background = ('black', 'white'),
title = ('dark magenta', 'default'),
# Status bar & heading
heading = ('white', 'black'),
heading_key = ('dark blue', 'black'),
heading_inactive = ('black', 'light gray'),
# Help
key = ('dark blue', 'default'),
head = ('black', 'default'),
text = ('dark gray', 'default'),
# Options
option_selected = ('black', 'light gray'),
option_selected_key = ('dark blue', 'light gray'),
option_active = ('light red', 'default'),
option_active_selected = ('light red', 'light gray'),
# List and Connections
method = ('dark cyan', 'default'),
focus = ('black', 'default'),
code_200 = ('dark green', 'default'),
code_300 = ('light blue', 'default'),
code_400 = ('dark red', 'default'),
code_500 = ('dark red', 'default'),
code_other = ('light red', 'default'),
error = ('light red', 'default'),
warn = ('brown', 'default'),
header = ('dark blue', 'default'),
highlight = ('black,bold', 'default'),
intercept = ('brown', 'default'),
replay = ('dark green', 'default'),
mark = ('dark red', 'default'),
# Hex view
offset = ('dark blue', 'default'),
# Grid Editor
focusfield = ('black', 'light gray'),
focusfield_error = ('dark red', 'light gray'),
field_error = ('dark red', 'black'),
editfield = ('black', 'default'),
)
class Light(LowLight):
high = dict(
background = ('black', 'g100'),
heading = ('g99', '#08f'),
heading_key = ('#0ff,bold', '#08f'),
heading_inactive = ('g35', 'g85'),
replay = ('#0a0,bold', 'default'),
option_selected = ('black', 'g85'),
option_selected_key = ('dark blue', 'g85'),
option_active_selected = ('light red', 'g85'),
)
# Solarized palette in Urwid-style terminal high-colour offsets
# See: http://ethanschoonover.com/solarized
sol_base03 = "h234"
sol_base02 = "h235"
sol_base01 = "h240"
sol_base00 = "h241"
sol_base0 = "h244"
sol_base1 = "h245"
sol_base2 = "h254"
sol_base3 = "h230"
sol_yellow = "h136"
sol_orange = "h166"
sol_red = "h160"
sol_magenta = "h125"
sol_violet = "h61"
sol_blue = "h33"
sol_cyan = "h37"
sol_green = "h64"
class SolarizedLight(LowLight):
high = dict(
background = (sol_base00, sol_base3),
title = (sol_cyan, 'default'),
text = (sol_base00, 'default'),
# Status bar & heading
heading = (sol_base2, sol_base02),
heading_key = (sol_blue, sol_base03),
heading_inactive = (sol_base03, sol_base1),
# Help
key = (sol_blue, 'default',),
head = (sol_base00, 'default'),
# Options
option_selected = (sol_base03, sol_base2),
option_selected_key = (sol_blue, sol_base2),
option_active = (sol_orange, 'default'),
option_active_selected = (sol_orange, sol_base2),
# List and Connections
method = (sol_cyan, 'default'),
focus = (sol_base01, 'default'),
code_200 = (sol_green, 'default'),
code_300 = (sol_blue, 'default'),
code_400 = (sol_orange, 'default',),
code_500 = (sol_red, 'default'),
code_other = (sol_magenta, 'default'),
error = (sol_red, 'default'),
warn = (sol_orange, 'default'),
header = (sol_blue, 'default'),
highlight = (sol_base01, 'default'),
intercept = (sol_red, 'default',),
replay = (sol_green, 'default',),
# Hex view
offset = (sol_cyan, 'default'),
# Grid Editor
focusfield = (sol_base00, sol_base2),
focusfield_error = (sol_red, sol_base2),
field_error = (sol_red, 'default'),
editfield = (sol_base01, 'default'),
)
class SolarizedDark(LowDark):
high = dict(
background = (sol_base2, sol_base03),
title = (sol_blue, 'default'),
text = (sol_base1, 'default'),
# Status bar & heading
heading = (sol_base2, sol_base01),
heading_key = (sol_blue + ",bold", sol_base01),
heading_inactive = (sol_base1, sol_base02),
# Help
key = (sol_blue, 'default',),
head = (sol_base2, 'default'),
# Options
option_selected = (sol_base03, sol_base00),
option_selected_key = (sol_blue, sol_base00),
option_active = (sol_orange, 'default'),
option_active_selected = (sol_orange, sol_base00),
# List and Connections
method = (sol_cyan, 'default'),
focus = (sol_base1, 'default'),
code_200 = (sol_green, 'default'),
code_300 = (sol_blue, 'default'),
code_400 = (sol_orange, 'default',),
code_500 = (sol_red, 'default'),
code_other = (sol_magenta, 'default'),
error = (sol_red, 'default'),
warn = (sol_orange, 'default'),
header = (sol_blue, 'default'),
highlight = (sol_base01, 'default'),
intercept = (sol_red, 'default',),
replay = (sol_green, 'default',),
# Hex view
offset = (sol_cyan, 'default'),
# Grid Editor
focusfield = (sol_base0, sol_base02),
focusfield_error = (sol_red, sol_base02),
field_error = (sol_red, 'default'),
editfield = (sol_base1, 'default'),
)
DEFAULT = "dark"
palettes = {
"lowlight": LowLight(),
"lowdark": LowDark(),
"light": Light(),
"dark": Dark(),
"solarized_light": SolarizedLight(),
"solarized_dark": SolarizedDark(),
}
|
|
# coding: utf8
# Author: Rodrigo Bistolfi
# Date: 03/2013
""" Test cases for Nikola ReST extensions.
A base class ReSTExtensionTestCase provides the tests basic behaivor.
Subclasses must override the "sample" class attribute with the ReST markup.
The sample will be rendered as HTML using publish_parts() by setUp().
One method is provided for checking the resulting HTML:
* assertHTMLContains(element, attributes=None, text=None)
The HTML is parsed with lxml for checking against the data you provide. The
method takes an element argument, a string representing the *name* of an HTML
tag, like "script" or "iframe". We will try to find this tag in the document
and perform the tests on it. You can pass a dictionary to the attributes kwarg
representing the name and the value of the tag attributes. The text kwarg takes
a string argument, which will be tested against the contents of the HTML
element.
One last caveat: you need to url unquote your urls if you are going to test
attributes like "src" or "link", since the HTML rendered by docutils will be
always unquoted.
"""
from __future__ import unicode_literals, absolute_import
import os
import sys
import io
try:
from io import StringIO
except ImportError:
from StringIO import StringIO # NOQA
import tempfile
import docutils
from lxml import html
import pytest
import unittest
import nikola.plugins.compile.rest
from nikola.plugins.compile.rest import gist
from nikola.plugins.compile.rest import vimeo
import nikola.plugins.compile.rest.listing
from nikola.plugins.compile.rest.doc import Plugin as DocPlugin
from nikola.utils import _reload
from .base import BaseTestCase, FakeSite
class ReSTExtensionTestCase(BaseTestCase):
""" Base class for testing ReST extensions """
sample = 'foo'
deps = None
def setUp(self):
self.compiler = nikola.plugins.compile.rest.CompileRest()
self.compiler.set_site(FakeSite())
return super(ReSTExtensionTestCase, self).setUp()
def basic_test(self):
""" Parse cls.sample into a HTML document tree """
self.setHtmlFromRst(self.sample)
def setHtmlFromRst(self, rst):
""" Create html output from rst string """
tmpdir = tempfile.mkdtemp()
inf = os.path.join(tmpdir, 'inf')
outf = os.path.join(tmpdir, 'outf')
depf = os.path.join(tmpdir, 'outf.dep')
with io.open(inf, 'w+', encoding='utf8') as f:
f.write(rst)
self.html = self.compiler.compile_html(inf, outf)
with io.open(outf, 'r', encoding='utf8') as f:
self.html = f.read()
os.unlink(inf)
os.unlink(outf)
if os.path.isfile(depf):
with io.open(depf, 'r', encoding='utf8') as f:
self.assertEqual(self.deps, f.read())
os.unlink(depf)
else:
self.assertEqual(self.deps, None)
os.rmdir(tmpdir)
self.html_doc = html.parse(StringIO(self.html))
def assertHTMLContains(self, element, attributes=None, text=None):
""" Test if HTML document includes an element with the given
attributes and text content
"""
try:
tag = next(self.html_doc.iter(element))
except StopIteration:
raise Exception("<{0}> not in {1}".format(element, self.html))
else:
if attributes:
arg_attrs = set(attributes.items())
tag_attrs = set(tag.items())
self.assertTrue(arg_attrs.issubset(tag_attrs))
if text:
self.assertIn(text, tag.text)
class ReSTExtensionTestCaseTestCase(ReSTExtensionTestCase):
""" Simple test for our base class :) """
sample = '.. raw:: html\n\n <iframe src="foo" height="bar">spam</iframe>'
def test_test(self):
self.basic_test()
self.assertHTMLContains("iframe", attributes={"src": "foo"},
text="spam")
self.assertRaises(Exception, self.assertHTMLContains, "eggs", {})
class MathTestCase(ReSTExtensionTestCase):
sample = ':math:`e^{ix} = \cos x + i\sin x`'
def test_math(self):
""" Test that math is outputting TeX code."""
self.basic_test()
self.assertHTMLContains("span", attributes={"class": "math"},
text="\(e^{ix} = \cos x + i\sin x\)")
class GistTestCase(ReSTExtensionTestCase):
""" Test GitHubGist.
We will replace get_raw_gist() and get_raw_gist_with_filename()
monkeypatching the GitHubGist class for avoiding network dependency
"""
gist_type = gist.GitHubGist
sample = '.. gist:: fake_id\n :file: spam.py'
sample_without_filename = '.. gist:: fake_id2'
def setUp(self):
""" Patch GitHubGist for avoiding network dependency """
super(GistTestCase, self).setUp()
self.gist_type.get_raw_gist_with_filename = lambda *_: 'raw_gist_file'
self.gist_type.get_raw_gist = lambda *_: "raw_gist"
_reload(nikola.plugins.compile.rest)
@pytest.mark.skipif(True, reason="This test indefinitely skipped.")
def test_gist(self):
""" Test the gist directive with filename """
self.setHtmlFromRst(self.sample)
output = 'https://gist.github.com/fake_id.js?file=spam.py'
self.assertHTMLContains("script", attributes={"src": output})
self.assertHTMLContains("pre", text="raw_gist_file")
@pytest.mark.skipif(True, reason="This test indefinitely skipped.")
def test_gist_without_filename(self):
""" Test the gist directive without filename """
self.setHtmlFromRst(self.sample_without_filename)
output = 'https://gist.github.com/fake_id2.js'
self.assertHTMLContains("script", attributes={"src": output})
self.assertHTMLContains("pre", text="raw_gist")
class GistIntegrationTestCase(ReSTExtensionTestCase):
""" Test requests integration. The gist plugin uses requests to fetch gist
contents and place it in a noscript tag.
"""
sample = '.. gist:: 1812835'
def test_gist_integration(self):
""" Fetch contents of the gist from GH and render in a noscript tag """
self.basic_test()
text = ('Be alone, that is the secret of invention: be alone, that is'
' when ideas are born. -- Nikola Tesla')
self.assertHTMLContains('pre', text=text)
class SlidesTestCase(ReSTExtensionTestCase):
""" Slides test case """
sample = '.. slides:: IMG.jpg\n'
def test_slides(self):
""" Test the slides js generation and img tag creation """
self.basic_test()
self.assertHTMLContains("img", attributes={"src": "IMG.jpg"})
class SoundCloudTestCase(ReSTExtensionTestCase):
""" SoundCloud test case """
sample = '.. soundcloud:: SID\n :height: 400\n :width: 600'
def test_soundcloud(self):
""" Test SoundCloud iframe tag generation """
self.basic_test()
self.assertHTMLContains("iframe",
attributes={"src": ("https://w.soundcloud.com"
"/player/?url=http://"
"api.soundcloud.com/"
"tracks/SID"),
"height": "400", "width": "600"})
class VimeoTestCase(ReSTExtensionTestCase):
"""Vimeo test.
Set Vimeo.request_size to False for avoiding querying the Vimeo api
over the network
"""
sample = '.. vimeo:: VID\n :height: 400\n :width: 600'
def setUp(self):
""" Disable query of the vimeo api over the wire """
vimeo.Vimeo.request_size = False
super(VimeoTestCase, self).setUp()
_reload(nikola.plugins.compile.rest)
def test_vimeo(self):
""" Test Vimeo iframe tag generation """
self.basic_test()
self.assertHTMLContains("iframe",
attributes={"src": ("//player.vimeo.com/"
"video/VID"),
"height": "400", "width": "600"})
class YoutubeTestCase(ReSTExtensionTestCase):
""" Youtube test case """
sample = '.. youtube:: YID\n :height: 400\n :width: 600'
def test_youtube(self):
""" Test Youtube iframe tag generation """
self.basic_test()
self.assertHTMLContains("iframe",
attributes={"src": ("//www.youtube.com/"
"embed/YID?rel=0&hd=1&"
"wmode=transparent"),
"height": "400", "width": "600"})
class ListingTestCase(ReSTExtensionTestCase):
""" Listing test case and CodeBlock alias tests """
deps = None
sample1 = '.. listing:: nikola.py python\n\n'
sample2 = '.. code-block:: python\n\n import antigravity'
sample3 = '.. sourcecode:: python\n\n import antigravity'
# def test_listing(self):
# """ Test that we can render a file object contents without errors """
# with cd(os.path.dirname(__file__)):
# self.deps = 'listings/nikola.py'
# self.setHtmlFromRst(self.sample1)
def test_codeblock_alias(self):
""" Test CodeBlock aliases """
self.deps = None
self.setHtmlFromRst(self.sample2)
self.setHtmlFromRst(self.sample3)
class DocTestCase(ReSTExtensionTestCase):
""" Ref role test case """
sample = 'Sample for testing my :doc:`doesnt-exist-post`'
sample1 = 'Sample for testing my :doc:`fake-post`'
sample2 = 'Sample for testing my :doc:`titled post <fake-post>`'
def setUp(self):
# Initialize plugin, register role
self.plugin = DocPlugin()
self.plugin.set_site(FakeSite())
# Hack to fix leaked state from integration tests
try:
f = docutils.parsers.rst.roles.role('doc', None, None, None)[0]
f.site = FakeSite()
except AttributeError:
pass
return super(DocTestCase, self).setUp()
def test_doc_doesnt_exist(self):
self.assertRaises(Exception, self.assertHTMLContains, 'anything', {})
def test_doc(self):
self.setHtmlFromRst(self.sample1)
self.assertHTMLContains('a',
text='Fake post',
attributes={'href': '/posts/fake-post'})
def test_doc_titled(self):
self.setHtmlFromRst(self.sample2)
self.assertHTMLContains('a',
text='titled post',
attributes={'href': '/posts/fake-post'})
if __name__ == "__main__":
unittest.main()
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ironicclient.common import base
from ironicclient import exc
CREATION_ATTRIBUTES = ['chassis_uuid', 'driver', 'driver_info', 'extra',
'node_id', 'properties']
class Node(base.Resource):
def __repr__(self):
return "<Node %s>" % self._info
class NodeManager(base.Manager):
resource_class = Node
@staticmethod
def _path(id=None):
return '/v1/nodes/%s' % id if id else '/v1/nodes'
def list(self, associated=None, maintenance=None, marker=None, limit=None,
detail=False):
"""Retrieve a list of nodes.
:param associated: Optional, boolean whether to return a list of
associated or unassociated nodes.
:param maintenance: Optional, boolean value that indicates whether
to get nodes in maintenance mode ("True"), or not
in maintenance mode ("False").
:param marker: Optional, the UUID of a node, eg the last
node from a previous result set. Return
the next result set.
:param limit: The maximum number of results to return per
request, if:
1) limit > 0, the maximum number of nodes to return.
2) limit == 0, return the entire list of nodes.
3) limit param is NOT specified (None), the number of items
returned respect the maximum imposed by the Ironic API
(see Ironic's api.max_limit option).
:param detail: Optional, boolean whether to return detailed information
about nodes.
:returns: A list of nodes.
"""
if limit is not None:
limit = int(limit)
filters = []
if isinstance(limit, int) and limit > 0:
filters.append('limit=%s' % limit)
if marker is not None:
filters.append('marker=%s' % marker)
if associated is not None:
filters.append('associated=%s' % associated)
if maintenance is not None:
filters.append('maintenance=%s' % maintenance)
path = ''
if detail:
path += 'detail'
if filters:
path += '?' + '&'.join(filters)
if limit is None:
return self._list(self._path(path), "nodes")
else:
return self._list_pagination(self._path(path), "nodes",
limit=limit)
def list_ports(self, node_id, marker=None, limit=None):
"""List all the ports for a given node.
:param node_id: The UUID of the node.
:param marker: Optional, the UUID of a port, eg the last
port from a previous result set. Return
the next result set.
:param limit: The maximum number of results to return per
request, if:
1) limit > 0, the maximum number of ports to return.
2) limit == 0, return the entire list of ports.
3) limit param is NOT specified (None), the number of items
returned respect the maximum imposed by the Ironic API
(see Ironic's api.max_limit option).
:returns: A list of ports.
"""
if limit is not None:
limit = int(limit)
filters = []
if isinstance(limit, int) and limit > 0:
filters.append('limit=%s' % limit)
if marker is not None:
filters.append('marker=%s' % marker)
path = "%s/ports" % node_id
if filters:
path += '?' + '&'.join(filters)
if limit is None:
return self._list(self._path(path), "ports")
else:
return self._list_pagination(self._path(path), "ports",
limit=limit)
def get(self, node_id):
try:
return self._list(self._path(node_id))[0]
except IndexError:
return None
def get_by_instance_uuid(self, instance_uuid):
path = "detail?instance_uuid=%s" % instance_uuid
nodes = self._list(self._path(path), 'nodes')
# get all the details of the node assuming that
# filtering by instance_uuid returns a collection
# of one node if successful.
if len(nodes) == 1:
return nodes[0]
else:
raise exc.NotFound()
def create(self, **kwargs):
new = {}
for (key, value) in kwargs.items():
if key in CREATION_ATTRIBUTES:
new[key] = value
else:
raise exc.InvalidAttribute()
return self._create(self._path(), new)
def delete(self, node_id):
return self._delete(self._path(node_id))
def update(self, node_id, patch):
return self._update(self._path(node_id), patch)
def vendor_passthru(self, **kwargs):
node_id = kwargs['node_id']
method = kwargs['method']
args = kwargs['args']
path = self._path(node_id) + "/vendor_passthru/%s" % method
return self._update(path, args, method='POST')
def set_power_state(self, node_id, state):
path = "%s/states/power" % node_id
if state in ['on', 'off']:
state = "power %s" % state
if state in ['reboot']:
state = "rebooting"
target = {'target': state}
return self._update(self._path(path), target, method='PUT')
def validate(self, node_uuid):
path = "%s/validate" % node_uuid
return self.get(path)
def set_provision_state(self, node_uuid, state):
path = "%s/states/provision" % node_uuid
target = {'target': state}
return self._update(self._path(path), target, method='PUT')
def states(self, node_uuid):
path = "%s/states" % node_uuid
return self.get(path)
def get_console(self, node_uuid):
path = "%s/states/console" % node_uuid
info = self.get(path)
if not info:
return {}
return info.to_dict()
def set_console_mode(self, node_uuid, enabled):
path = "%s/states/console" % node_uuid
target = {'enabled': enabled}
return self._update(self._path(path), target, method='PUT')
def set_boot_device(self, node_uuid, boot_device, persistent=False):
path = "%s/management/boot_device" % node_uuid
target = {'boot_device': boot_device, 'persistent': persistent}
return self._update(self._path(path), target, method='PUT')
def get_boot_device(self, node_uuid):
path = "%s/management/boot_device" % node_uuid
return self.get(path).to_dict()
def get_supported_boot_devices(self, node_uuid):
path = "%s/management/boot_device/supported" % node_uuid
return self.get(path).to_dict()
|
|
import base64
import os
import re
import socket
import urllib2
from tempfile import NamedTemporaryFile
from cStringIO import StringIO
from django.conf import settings
from django.contrib.auth.models import User
from django.test import TransactionTestCase
from django.test.client import Client
from django_digest.test import Client as DigestClient
from django_digest.test import DigestAuth
from django.contrib.auth import authenticate
from django.utils import timezone
from rest_framework.test import APIRequestFactory
from onadata.apps.logger.models import XForm, Instance, Attachment
from onadata.apps.logger.views import submission
from onadata.apps.main.models import UserProfile
class TestBase(TransactionTestCase):
surveys = ['transport_2011-07-25_19-05-49',
'transport_2011-07-25_19-05-36',
'transport_2011-07-25_19-06-01',
'transport_2011-07-25_19-06-14']
this_directory = os.path.dirname(__file__)
def setUp(self):
self.maxDiff = None
self._create_user_and_login()
self.base_url = 'http://testserver'
self.factory = APIRequestFactory()
def tearDown(self):
# clear mongo db after each test
settings.MONGO_DB.instances.drop()
def _fixture_path(self, *args):
return os.path.join(os.path.dirname(__file__), 'fixtures', *args)
def _create_user(self, username, password):
user, created = User.objects.get_or_create(username=username)
user.set_password(password)
user.save()
return user
def _login(self, username, password):
client = Client()
assert client.login(username=username, password=password)
return client
def _logout(self, client=None):
if not client:
client = self.client
client.logout()
def _create_user_and_login(self, username="bob", password="bob"):
self.login_username = username
self.login_password = password
self.user = self._create_user(username, password)
# create user profile and set require_auth to false for tests
profile, created = UserProfile.objects.get_or_create(user=self.user)
profile.require_auth = False
profile.save()
self.client = self._login(username, password)
self.anon = Client()
def _publish_xls_file(self, path):
if not path.startswith('/%s/' % self.user.username):
path = os.path.join(self.this_directory, path)
with open(path) as xls_file:
post_data = {'xls_file': xls_file}
return self.client.post('/%s/' % self.user.username, post_data)
def _publish_xlsx_file(self):
path = os.path.join(self.this_directory, 'fixtures', 'exp.xlsx')
pre_count = XForm.objects.count()
response = TestBase._publish_xls_file(self, path)
# make sure publishing the survey worked
self.assertEqual(response.status_code, 200)
self.assertEqual(XForm.objects.count(), pre_count + 1)
def _publish_xls_file_and_set_xform(self, path):
count = XForm.objects.count()
self.response = self._publish_xls_file(path)
self.assertEqual(XForm.objects.count(), count + 1)
self.xform = XForm.objects.order_by('pk').reverse()[0]
def _share_form_data(self, id_string='transportation_2011_07_25'):
xform = XForm.objects.get(id_string=id_string)
xform.shared_data = True
xform.save()
def _publish_transportation_form(self):
xls_path = os.path.join(
self.this_directory, "fixtures",
"transportation", "transportation.xls")
count = XForm.objects.count()
TestBase._publish_xls_file(self, xls_path)
self.assertEqual(XForm.objects.count(), count + 1)
self.xform = XForm.objects.order_by('pk').reverse()[0]
def _submit_transport_instance(self, survey_at=0):
s = self.surveys[survey_at]
self._make_submission(os.path.join(
self.this_directory, 'fixtures',
'transportation', 'instances', s, s + '.xml'))
def _submit_transport_instance_w_uuid(self, name):
self._make_submission(os.path.join(
self.this_directory, 'fixtures',
'transportation', 'instances_w_uuid', name, name + '.xml'))
def _submit_transport_instance_w_attachment(self, survey_at=0):
s = self.surveys[survey_at]
media_file = "1335783522563.jpg"
self._make_submission_w_attachment(os.path.join(
self.this_directory, 'fixtures',
'transportation', 'instances', s, s + '.xml'),
os.path.join(self.this_directory, 'fixtures',
'transportation', 'instances', s, media_file))
self.attachment = Attachment.objects.all().reverse()[0]
self.attachment_media_file = self.attachment.media_file
def _publish_transportation_form_and_submit_instance(self):
self._publish_transportation_form()
self._submit_transport_instance()
def _make_submissions_gps(self):
surveys = ['gps_1980-01-23_20-52-08',
'gps_1980-01-23_21-21-33', ]
for survey in surveys:
path = self._fixture_path('gps', 'instances', survey + '.xml')
self._make_submission(path)
def _make_submission(self, path, username=None, add_uuid=False,
forced_submission_time=None, auth=None, client=None):
# store temporary file with dynamic uuid
self.factory = APIRequestFactory()
if auth is None:
auth = DigestAuth('bob', 'bob')
tmp_file = None
if add_uuid:
tmp_file = NamedTemporaryFile(delete=False)
split_xml = None
with open(path) as _file:
split_xml = re.split(r'(<transport>)', _file.read())
split_xml[1:1] = [
'<formhub><uuid>%s</uuid></formhub>' % self.xform.uuid
]
tmp_file.write(''.join(split_xml))
path = tmp_file.name
tmp_file.close()
with open(path) as f:
post_data = {'xml_submission_file': f}
if username is None:
username = self.user.username
url_prefix = '%s/' % username if username else ''
url = '/%ssubmission' % url_prefix
request = self.factory.post(url, post_data)
request.user = authenticate(username=auth.username,
password=auth.password)
self.response = submission(request, username=username)
if auth and self.response.status_code == 401:
request.META.update(auth(request.META, self.response))
self.response = submission(request, username=username)
if forced_submission_time:
instance = Instance.objects.order_by('-pk').all()[0]
instance.date_created = forced_submission_time
instance.save()
instance.parsed_instance.save()
# remove temporary file if stored
if add_uuid:
os.unlink(tmp_file.name)
def _make_submission_w_attachment(self, path, attachment_path):
with open(path) as f:
a = open(attachment_path)
post_data = {'xml_submission_file': f, 'media_file': a}
url = '/%s/submission' % self.user.username
auth = DigestAuth('bob', 'bob')
self.factory = APIRequestFactory()
request = self.factory.post(url, post_data)
request.user = authenticate(username='bob',
password='bob')
self.response = submission(request,
username=self.user.username)
if auth and self.response.status_code == 401:
request.META.update(auth(request.META, self.response))
self.response = submission(request,
username=self.user.username)
def _make_submissions(self, username=None, add_uuid=False,
should_store=True):
"""Make test fixture submissions to current xform.
:param username: submit under this username, default None.
:param add_uuid: add UUID to submission, default False.
:param should_store: should submissions be save, default True.
"""
paths = [os.path.join(
self.this_directory, 'fixtures', 'transportation',
'instances', s, s + '.xml') for s in self.surveys]
pre_count = Instance.objects.count()
for path in paths:
self._make_submission(path, username, add_uuid)
post_count = pre_count + len(self.surveys) if should_store\
else pre_count
self.assertEqual(Instance.objects.count(), post_count)
self.assertEqual(self.xform.instances.count(), post_count)
xform = XForm.objects.get(pk=self.xform.pk)
self.assertEqual(xform.num_of_submissions, post_count)
self.assertEqual(xform.user.profile.num_of_submissions, post_count)
def _check_url(self, url, timeout=1):
try:
urllib2.urlopen(url, timeout=timeout)
return True
except (urllib2.URLError, socket.timeout):
pass
return False
def _internet_on(self, url='http://74.125.113.99'):
# default value is some google IP
return self._check_url(url)
def _set_auth_headers(self, username, password):
return {
'HTTP_AUTHORIZATION':
'Basic ' + base64.b64encode('%s:%s' % (username, password)),
}
def _get_authenticated_client(
self, url, username='bob', password='bob', extra={}):
client = DigestClient()
# request with no credentials
req = client.get(url, {}, **extra)
self.assertEqual(req.status_code, 401)
# apply credentials
client.set_authorization(username, password, 'Digest')
return client
def _get_response_content(self, response):
contents = u''
if response.streaming:
actual_content = StringIO()
for content in response.streaming_content:
actual_content.write(content)
contents = actual_content.getvalue()
actual_content.close()
else:
contents = response.content
return contents
def _set_mock_time(self, mock_time):
current_time = timezone.now()
mock_time.return_value = current_time
def _set_require_auth(self, auth=True):
profile, created = UserProfile.objects.get_or_create(user=self.user)
profile.require_auth = auth
profile.save()
def _get_digest_client(self):
self._set_require_auth(True)
client = DigestClient()
client.set_authorization('bob', 'bob', 'Digest')
return client
|
|
#!/usr/bin/env python
"""
@package ion.services.mi.test.test_basic
@file ion/services/mi/test/test_basic.py
@author Carlos Rueda
@brief Some unit tests for R2 instrument driver base classes.
This file defines subclasses of core classes mainly to supply required
definitions and then tests functionality in the base classes.
NOTE 4/25/12: This source file is broken after the March-April refactoring
(which at this point is still to be completed).
I only did the minimal changes needed to at least avoid errors but all tests
are skipped.
"""
__author__ = 'Carlos Rueda'
__license__ = 'Apache 2.0'
import logging
import unittest
from mi.core.unit_test import MiUnitTest
from nose.plugins.attrib import attr
from mock import Mock
from mi.core.common import BaseEnum
from mi.core.common import InstErrorCode
#from ion.services.mi.common import DriverAnnouncement
from mi.core.exceptions import InstrumentParameterException
from mi.core.instrument.instrument_protocol import InstrumentProtocol
from mi.core.instrument.instrument_driver import DriverState
from mi.core.instrument.instrument_driver import InstrumentDriver
#from ion.services.mi.instrument_driver import DriverChannel
from mi.core.instrument.instrument_driver import DriverState, DriverConnectionState
import mi.core.mi_logger
mi_logger = logging.getLogger('mi_logger')
class Command(BaseEnum):
pass
class Channel(BaseEnum):
CHAN1 = "CHAN1"
CHAN2 = "CHAN2"
ALL = "CHANNEL_ALL" # DriverChannel.ALL
INSTRUMENT = "CHANNEL_INSTRUMENT" # DriverChannel.INSTRUMENT
class Error(BaseEnum):
pass
class Status(BaseEnum):
pass
class MetadataParameter(BaseEnum):
pass
class Parameter(BaseEnum):
PARAM1 = 'PARAM1'
PARAM2 = 'PARAM2'
PARAM3 = 'PARAM3'
class MyProtocol(InstrumentProtocol):
"""
A MyProtocol instance will be created for each driver channel.
"""
# a base for values that can be easily associated to each protocol
# (facilitates inspection)
next_base_value = 0
def __init__(self, channel, params, evt_callback=None):
"""
@param channel identifies the particular protocol instance.
@param params the particular parameters for this channel.
"""
InstrumentProtocol.__init__(self, evt_callback)
self._channel = channel
# initialize values for the params:
MyProtocol.next_base_value += 1000
next_value = MyProtocol.next_base_value
self._values = {}
for param in params:
next_value += 1
self._values[param] = next_value
def initialize(self, *args, **kwargs):
self._state = DriverState.UNCONFIGURED
return InstErrorCode.OK
def configure(self, config, *args, **kwargs):
self.config = config
self._state = DriverState.DISCONNECTED
return InstErrorCode.OK
def connect(self, *args, **kwargs):
self._state = DriverState.AUTOSAMPLE
return InstErrorCode.OK
def disconnect(self, *args, **kwargs):
self._state = DriverState.DISCONNECTED
return InstErrorCode.OK
def attach(self, *args, **kwargs):
return InstErrorCode.OK
def detach(self, *args, **kwargs):
return InstErrorCode.OK
def get(self, params, *args, **kwargs):
mi_logger.debug("MyProtocol(%s).get: params=%s" % (self._channel,
str(params)))
assert isinstance(params, (list, tuple))
result = {}
for param in params:
if param in self._values:
value = self._values[param]
else:
value = InstErrorCode.INVALID_PARAMETER
result[param] = value
return result
def set(self, params, *args, **kwargs):
mi_logger.debug("MyProtocol(%s).set: params=%s" % (self._channel,
str(params)))
assert isinstance(params, dict)
updated_params = 0
result = {}
for (param, value) in params.items():
if param in self._values:
if isinstance(value, int):
self._values[param] = value
result[param] = InstErrorCode.OK
updated_params += 1
else:
result[param] = InstErrorCode.INVALID_PARAM_VALUE
else:
result[param] = InstErrorCode.INVALID_PARAMETER
# self.announce_to_driver(DriverAnnouncement.CONFIG_CHANGE,
# msg="%s parameter(s) successfully set." %
# updated_params)
return result
class MyDriver(InstrumentDriver):
def __init__(self, evt_callback=None):
InstrumentDriver.__init__(self, evt_callback)
self.instrument_commands = Command
self.instrument_parameters = Parameter
self.instrument_channels = Channel
self.instrument_errors = Error
for channel in self.instrument_channels.list():
#
# TODO associate some specific params per channel. Note that
# there is no framework mechanism to specify this. For the
# moment, just associate *all* parameters to each channel:
#
params_per_channel = self.instrument_parameters.list()
protocol = MyProtocol(channel, params_per_channel,
self.protocol_callback)
protocol._fsm = Mock()
protocol._fsm.get_current_state = Mock(return_value=DriverState.UNCONFIGURED)
self.chan_map[channel] = protocol
class Some(object):
VALID_CHANNELS = [
Channel.CHAN1,
Channel.CHAN2,
Channel.INSTRUMENT]
INVALID_CHANNELS = [
"invalid_chan1",
"invalid_chan2"]
VALID_PARAMS = [
(Channel.CHAN1, Parameter.PARAM1),
(Channel.CHAN1, Parameter.PARAM1), # duplicate of previous one
(Channel.CHAN2, Parameter.PARAM1),
(Channel.CHAN2, Parameter.PARAM2),
(Channel.CHAN2, Parameter.PARAM3)]
INVALID_PARAMS = [
("invalid_chan", Parameter.PARAM1),
(Channel.CHAN1, "invalid_param")]
def _print_dict(title, d):
mi_logger.debug("%s:" % title)
for item in d.items():
mi_logger.debug("\t%s" % str(item))
@unittest.skip('Need to align with new refactoring')
@attr('UNIT', group='mi')
class DriverTest(MiUnitTest):
def setUp(self):
self.callback = Mock()
MyProtocol.next_base_value = 0
self.driver = MyDriver(self.callback)
def test_initialize(self):
"""Driver initialization tests"""
channels = Some.VALID_CHANNELS + Some.INVALID_CHANNELS
mi_logger.debug("\n initialize: %s" % str(channels))
result = self.driver.initialize(channels)
_print_dict("\n initialize result", result)
for c in channels:
self.assertTrue(c in result)
for c in Some.INVALID_CHANNELS:
self.assertEqual(result[c], InstErrorCode.INVALID_CHANNEL)
for c in Some.VALID_CHANNELS:
self.assertEqual(result[c], InstErrorCode.OK)
def test_configure(self):
"""Driver configuration tests"""
channels = Some.VALID_CHANNELS + Some.INVALID_CHANNELS
mi_logger.debug("\n configure: %s" % str(channels))
configs = {}
for c in channels:
configs[c] = {'method': 'ethernet',
'device_addr': '1.1.1.1',
'device_port': 1,
'server_addr': '2.2.2.2',
'server_port': 2}
result = self.driver.configure(configs)
_print_dict("\n configure result", result)
for c in channels:
self.assertTrue(c in result)
for c in Some.INVALID_CHANNELS:
self.assertEqual(result[c], InstErrorCode.INVALID_CHANNEL)
for c in Some.VALID_CHANNELS:
self.assertEqual(result[c], InstErrorCode.OK)
def test_connect(self):
"""Driver connection tests"""
channels = Some.VALID_CHANNELS + Some.INVALID_CHANNELS
mi_logger.debug("\n connect: %s" % str(channels))
result = self.driver.connect(channels)
_print_dict("\n connect result", result)
for c in channels:
self.assertTrue(c in result)
for c in Some.INVALID_CHANNELS:
self.assertEqual(result[c], InstErrorCode.INVALID_CHANNEL)
for c in Some.VALID_CHANNELS:
self.assertEqual(result[c], InstErrorCode.OK)
def test_get_params(self):
"""Driver get params tests"""
params = Some.VALID_PARAMS + Some.INVALID_PARAMS
mi_logger.debug("\nGET: %s" % str(params))
get_result = self.driver.get_resource(params)
_print_dict("\nGET get_result", get_result)
self.assertEqual(get_result[("invalid_chan", Parameter.PARAM1)],
InstErrorCode.INVALID_CHANNEL)
self.assertEqual(get_result[(Channel.CHAN1, "invalid_param")],
InstErrorCode.INVALID_PARAMETER)
for cp in Some.VALID_PARAMS:
self.assertTrue(cp in get_result)
def test_get_params_channel_all(self):
"""Driver get all params tests"""
params = [(Channel.ALL, Parameter.PARAM1),
(Channel.ALL, Parameter.PARAM2)]
mi_logger.debug("\nGET: %s" % str(params))
get_result = self.driver.get_resource(params)
_print_dict("\nGET get_result", get_result)
for c in Channel.list():
if c != Channel.ALL:
self.assertTrue((c, Parameter.PARAM1) in get_result)
self.assertTrue((c, Parameter.PARAM2) in get_result)
def _prepare_set_params(self, params):
"""Gets a dict for the set operation"""
value = 99000
set_params = {}
for cp in params:
set_params[cp] = value
value += 1
_print_dict("\nset_params", set_params)
return set_params
def test_set_params(self):
"""Driver set params tests"""
params = Some.VALID_PARAMS + Some.INVALID_PARAMS
set_params = self._prepare_set_params(params)
set_result = self.driver.set_resource(set_params)
_print_dict("\nSET set_result", set_result)
# now, get the values for the valid parameters and check
get_result = self.driver.get_resource(Some.VALID_PARAMS)
_print_dict("\nGET get_result", get_result)
# verify the new values are the ones we wanted
for cp in Some.VALID_PARAMS:
self.assertEqual(set_params[cp], get_result[cp])
def test_set_duplicate_param(self):
"""Driver set duplicate params tests"""
#
# Note that via the ALL specifier, along with a specific channel,
# one could indicate a duplicate parameter for the same channel.
#
params = [(Channel.ALL, Parameter.PARAM1),
(Channel.CHAN1, Parameter.PARAM1)]
set_params = self._prepare_set_params(params)
set_result = self.driver.set_resource(set_params)
_print_dict("\nSET set_result", set_result)
self.assertEqual(set_result[(Channel.CHAN1, Parameter.PARAM1)],
InstErrorCode.DUPLICATE_PARAMETER)
def test_check_channel(self):
"""Test the routines to check the channel arguments"""
self.assertRaises(InstrumentParameterException,
self.driver._check_channel_args, Channel.ALL)
self.assertRaises(InstrumentParameterException,
self.driver._check_channel_args, [])
self.assertRaises(InstrumentParameterException,
self.driver._check_channel_args, None)
(bad, good) = self.driver._check_channel_args(
[Channel.INSTRUMENT])
self.assertEquals(bad, {})
self.assertEquals(good, [Channel.INSTRUMENT])
(bad, good) = self.driver._check_channel_args(["BAD_CHANNEL"])
self.assertEquals(bad, {"BAD_CHANNEL": InstErrorCode.INVALID_CHANNEL})
self.assertEquals(good, [])
(bad, good) = self.driver._check_channel_args([Channel.CHAN1])
self.assertEquals(bad, {})
self.assertEquals(good, [Channel.CHAN1])
(bad, good) = self.driver._check_channel_args([Channel.CHAN1,
Channel.CHAN1])
self.assertEquals(bad, {})
self.assertEquals(good, [Channel.CHAN1])
# @todo Need a better test...something with more channels
(bad, good) = self.driver._check_channel_args([Channel.CHAN1,
Channel.ALL])
self.assertEquals(bad, {})
self.assertEquals(good, [Channel.CHAN1, Channel.CHAN2])
(bad, good) = self.driver._check_channel_args([Channel.CHAN1,
Channel.INSTRUMENT])
self.assertEquals(bad, {})
self.assertEquals(good.count(Channel.CHAN1), 1)
self.assertEquals(good.count(Channel.INSTRUMENT), 1)
self.assertEquals(len(good), 2)
(bad, good) = self.driver._check_channel_args([Channel.CHAN1,
"BAD_CHANNEL"])
self.assertEquals(bad, {"BAD_CHANNEL": InstErrorCode.INVALID_CHANNEL})
self.assertEquals(good, [Channel.CHAN1])
def test_connect_disconnect(self):
"""Test state change when connecting and disconnecting"""
result = self.driver.get_resource_state()
mi_logger.debug("Initial state result: %s", result)
self.assertEquals(result[Channel.INSTRUMENT], DriverState.UNCONFIGURED)
self.driver.chan_map[Channel.INSTRUMENT].connect = Mock(return_value = 12)
result = self.driver.connect()
result = self.driver.get_resource_state()
# Verify we hit the protocol since we are "connected"
self.assertEquals(result[Channel.INSTRUMENT], DriverState.UNCONFIGURED)
result = self.driver.disconnect()
result = self.driver.get_resource_state()
# driver FSM should intercept
self.assertEquals(result[Channel.INSTRUMENT], DriverConnectionState.DISCONNECTED)
|
|
from pydel.pydel_exceptions import (AuthenticationError, UnexpectedResponseCodeException, InvalidPostException,
NoPydelInstanceException, UnauthorizedDeletionException, UnauthenticatedException)
from pydel import colors, utils
import requests
import time
DEFAULT_USER_AGENT_STRING = 'Jodel/65000 Dalvik/2.1.0 (Linux; U; Android 5.0; SM-G900F Build/LRX21T)'
BASE_API_URL = 'https://api.go-tellm.com/api/v2/'
class Pydel:
def __init__(self, city, country_code, lat, lng, device_uid=None, user_agent_string=DEFAULT_USER_AGENT_STRING, debug=False):
self._location = {
'city': city,
'country': country_code,
'loc_accuracy': utils.random_loc_accuracy(),
'loc_coordinates': {
'lat': lat,
'lng': lng
},
'name': city
}
self._device_uid = device_uid
self._user_agent_string = user_agent_string
self._debug = debug
self._access_token = None
self._distinct_id = None
self._expiration_date = None
self._refresh_token = None
def _generate_headers(self):
return {'User-Agent': self._user_agent_string,
'Authorization': "Bearer {}".format(self._access_token),
'Accept-Encoding': 'gzip'
}
def _authenticated_request(self, method, url, json_data=None, data=None):
if self._access_token is None:
raise UnauthenticatedException()
if self._expiration_date is not None and self._expiration_date < time.time(): # Our access token has expired
self.authenticate()
req = requests.request(method=method, url=BASE_API_URL + url, headers=self._generate_headers(), json=json_data,
data=data)
if self._debug:
print("_authenticated_request: " + req.text)
if req.status_code == requests.codes.ok or req.status_code == requests.codes.no_content:
return req
else:
raise UnexpectedResponseCodeException("Server responded with {}".format(req.status_code))
def _new_post(self, color, message):
"""
Posts a new Jodel.
Args:
color: Post color, hexadecimal without leading #. Can be FF9908 (orange), FFBA00 (yellow), DD5F5F (red), 06A3CB (blue), 8ABDB0 (bluegreyish), 9EC41C (green)
message: Content of the post
Returns:
Request object
Raises:
AuthenticationError: An attempt to replace an outdated auth token failed.
UnexpectedResponseCodeException: The server responded with an unexpected HTTP status code (that is, not 200 or 204)
"""
return self._authenticated_request(method='POST', url='posts',
json_data={
'color': color,
'location': self._location,
'message': message})
def _reply_to_post_id(self, color, message, post_id):
"""
Posts a reply to a Jodel.
Args:
color: Post color, hexadecimal without leading #. Can be FF9908 (orange), FFBA00 (yellow), DD5F5F (red), 06A3CB (blue), 8ABDB0 (bluegreyish), 9EC41C (green)
message: Content of the post
post_id: Id of the post to reply to
Returns:
Request object
Raises:
AuthenticationError: An attempt to replace an outdated auth token failed.
UnexpectedResponseCodeException: The server responded with an unexpected HTTP status code (that is, not 200 or 204)
"""
return self._authenticated_request(method='POST', url='posts',
json_data={
'ancestor': post_id,
'color': color,
'location': self._location,
'message': message})
def _delete_post_id(self, post_id):
return self._authenticated_request(method='DELETE', url="posts/{}".format(post_id))
def _vote_post_id(self, post_id, direction):
"""
Upvotes or downvotes a jodel.
Args:
post_id: id of the post to vote.
direction: "up" for upvote, "down" for downvote.
Returns:
Request object.
Raises:
AuthenticationError: An attempt to replace an outdated auth token failed.
UnexpectedResponseCodeException: The server responded with an unexpected HTTP status code (that is, not 200 or 204)
"""
return self._authenticated_request(method='PUT', url="posts/{}/{}vote".format(post_id, direction))
def get_device_uid(self):
return self._device_uid
def set_token(self, access_token):
self._access_token = access_token
return True
def authenticate(self):
"""
Authenticates with the Jodel server, then sleeps for 5 seconds.
Returns:
True on success.
Raises:
AuthenticationError on failure to authenticate (typically, the server not returning HTTP 200 or 204).
"""
req = requests.post(BASE_API_URL + 'users',
headers={'User-Agent': self._user_agent_string,
'Accept-Encoding': 'gzip',
'Content-Type': 'application/json; charset=UTF-8'},
json={'client_id': '81e8a76e-1e02-4d17-9ba0-8a7020261b26',
'device_uid': self._device_uid,
'location': self._location}
)
if self._debug:
print("authenticate: " + req.text)
if req.status_code == requests.codes.ok:
self._access_token = req.json()['access_token']
self._distinct_id = req.json()['distinct_id']
self._expiration_date = req.json()['expiration_date']
self._refresh_token = req.json()['refresh_token']
time.sleep(5) # Workaround for certain actions being disabled for x seconds after authentication
return True
else:
raise AuthenticationError("Server returned {}".format(req.status_code))
def set_location(self, city=None, lat=None, lng=None, country_code=None, loc_name=None, loc_accuracy=None, force=False):
"""
Sets the current location.
Args:
city: City name
lat: Latitude of position to post from
lng: Longitude of position to post from
country_code: 2 or 3 capital letter country code
loc_name: Human-friendly name of position to post from
loc_accuracy: Location accuracy
Returns:
True if location modified, False if not
"""
modified = False
if city and city != self._location['city']:
self._location['city'] = city
modified = True
if lat and lat != self._location['loc_coordinates']['lat']:
self._location['loc_coordinates']['lat'] = lat
modified = True
if lng and lng != self._location['loc_coordinates']['lng']:
self._location['loc_coordinates']['lng'] = lng
modified = True
if country_code and country_code != self._location['country']:
self._location['country'] = country_code
modified = True
if loc_name and loc_name != self._location['name']:
self._location['name'] = loc_name
modified = True
if loc_accuracy and loc_accuracy != self._location['loc_accuracy']:
self._location['loc_accuracy'] = loc_accuracy
modified = True
if modified or force:
self._authenticated_request(method='PUT', url='users/location', json_data={'location': self._location}).text
modified = True
return modified
def get_karma(self):
"""
Returns karma for the currently logged in user.
Returns:
Karma as an integer.
Raises:
AuthenticationError: An attempt to replace an outdated auth token failed.
UnexpectedResponseCodeException: The server responded with an unexpected HTTP status code (that is, not 200 or 204)
"""
return int(self._authenticated_request(method='GET', url='/users/karma').json()['karma'])
def get_my_recent_posts(self):
"""
Returns the posts of the currently logged in user.
Returns:
list of Post objects.
Raises:
AuthenticationError: An attempt to replace an outdated auth token failed.
UnexpectedResponseCodeException: The server responded with an unexpected HTTP status code (that is, not 200 or 204)
"""
return generate_post_list(self._authenticated_request(method='GET', url='posts/mine/').json()['posts'], self)
def get_my_popular_posts(self):
"""
Returns the highest voted posts of the currently logged in user.
Returns:
list of Post objects.
Raises:
AuthenticationError: An attempt to replace an outdated auth token failed.
UnexpectedResponseCodeException: The server responded with an unexpected HTTP status code (that is, not 200 or 204)
"""
return generate_post_list(
self._authenticated_request(method='GET', url='posts/mine/popular').json()['posts'], self)
def get_my_discussed_posts(self):
"""
Returns the most commented posts of the currently logged in user.
Returns:
list of Post objects.
Raises:
AuthenticationError: An attempt to replace an outdated auth token failed.
UnexpectedResponseCodeException: The server responded with an unexpected HTTP status code (that is, not 200 or 204)
"""
return generate_post_list(
self._authenticated_request(method='GET', url='posts/mine/discussed').json()['posts'], self)
def get_my_replies(self):
"""
Returns the replies of the currently logged in user.
Returns:
list of Post objects.
Raises:
AuthenticationError: An attempt to replace an outdated auth token failed.
UnexpectedResponseCodeException: The server responded with an unexpected HTTP status code (that is, not 200 or 204)
"""
return generate_post_list(
self._authenticated_request(method='GET', url='posts/mine/replies').json()['posts'], self)
def get_my_votes(self):
"""
Returns posts the currently logged in user has voted on.
Returns:
list of Post objects.
Raises:
AuthenticationError: An attempt to replace an outdated auth token failed.
UnexpectedResponseCodeException: The server responded with an unexpected HTTP status code (that is, not 200 or 204)
"""
return generate_post_list(
self._authenticated_request(method='GET', url='posts/mine/votes').json()['posts'], self)
def get_recent_posts(self, lat=None, lng=None, limit=30):
"""
Returns most recent posts near the current position.
Args:
lat: Latitude of position to get posts from
lng: Longitude of position to get post from
limit: Number of posts to get
Returns:
list of Post objects.
Raises:
AuthenticationError: An attempt to replace an outdated auth token failed.
UnexpectedResponseCodeException: The server responded with an unexpected HTTP status code (that is, not 200 or 204)
"""
self.set_location(lat=lat, lng=lng)
params = '?limit=' + str(limit)
if (lat and lng):
params += '&lat=' + str(lat) + '&lng=' + str(lng)
return generate_post_list(self._authenticated_request(method='GET', url='posts/location' + params).json()['posts'], self)
def get_popular_posts(self, lat=None, lng=None, limit=30):
"""
Returns highest voted posts near the current position.
Args:
lat: Latitude of position to get posts from
lng: Longitude of position to get post from
limit: Number of posts to get
Returns:
list of Post objects.
Raises:
AuthenticationError: An attempt to replace an outdated auth token failed.
UnexpectedResponseCodeException: The server responded with an unexpected HTTP status code (that is, not 200 or 204)
"""
self.set_location(lat=lat, lng=lng)
params = '?limit=' + str(limit)
if (lat and lng):
params += '&lat=' + str(lat) + '&lng=' + str(lng)
return generate_post_list(
self._authenticated_request(method='GET', url='posts/location/popular' + params).json()['posts'], self)
def get_discussed_posts(self, lat=None, lng=None, limit=30):
"""
Returns most commented posts near the current position.
Args:
lat: Latitude of position to get posts from
lng: Longitude of position to get post from
limit: Number of posts to get
Returns:
list of Post objects.
Raises:
AuthenticationError: An attempt to replace an outdated auth token failed.
UnexpectedResponseCodeException: The server responded with an unexpected HTTP status code (that is, not 200 or 204)
"""
self.set_location(lat=lat, lng=lng)
params = '?limit=' + str(limit)
if (lat and lng):
params += '&lat=' + str(lat) + '&lng=' + str(lng)
return generate_post_list(
self._authenticated_request(method='GET', url='posts/location/discussed' + params).json()['posts'], self)
def get_post(self, post_id):
"""
Returns a specific Jodel post.
Args:
post_id: Alphanumeric string identifying the post
Returns:
Post object.
Raises:
AuthenticationError: An attempt to replace an outdated auth token failed.
UnexpectedResponseCodeException: The server responded with an unexpected HTTP status code (that is, not 200 or 204)
"""
return Post(self._authenticated_request(method='GET', url='posts/{}'.format(post_id)).json(), self)
def new_post(self, color, message):
"""
Posts a new Jodel, using current position and a randomized location accuracy.
Args:
color: Post color, hexadecimal without leading #. Can be FF9908 (orange), FFBA00 (yellow), DD5F5F (red), 06A3CB (blue), 8ABDB0 (bluegreyish), 9EC41C (green)
message: Content of the post.
Returns:
List of Post objects containing the newest posts near the current position.
Raises:
AuthenticationError: An attempt to replace an outdated auth token failed.
UnexpectedResponseCodeException: The server responded with an unexpected HTTP status code (that is, not 200 or 204)
"""
return generate_post_list(self._new_post(color=color, message=message).json()['posts'], self)
def new_reply(self, message, post):
"""
Posts a reply, using current position and a randomized location accuracy.
Args:
message: Content of the reply.
post: Post object to reply to.
Returns:
List of Post objects containing the newest posts near the current position.
Raises:
AuthenticationError: An attempt to replace an outdated auth token failed.
UnexpectedResponseCodeException: The server responded with an unexpected HTTP status code (that is, not 200 or 204)
"""
return generate_post_list(self._reply_to_post_id(color=post.color, message=message, post_id=post.post_id).json(), self)
def delete_post(self, post):
"""
Deletes a post.
Args:
post: Post object to delete.
Returns:
True if the deletion request was successfully sent.
Raises:
AuthenticationError: An attempt to replace an outdated auth token failed.
UnexpectedResponseCodeException: The server responded with an unexpected HTTP status code (that is, not 200 or 204)
"""
self._delete_post_id(post.post_id)
return True
def upvote_post(self, post):
"""
Upvotes a post.
Args:
post: Post object to upvote.
Returns:
False if the currently logged in user has already voted on this post, True if the vote was successful.
Raises:
AuthenticationError: An attempt to replace an outdated auth token failed.
UnexpectedResponseCodeException: The server responded with an unexpected HTTP status code (that is, not 200 or 204).
"""
if post.voted is not None:
return False
else:
self._vote_post_id(post.post_id, 'up')
return True
def downvote_post(self, post):
"""
Downvotes a post.
Args:
post: Post object to downvote.
Returns:
False if the currently logged in user has already voted on this post, True if the vote was successful.
Raises:
AuthenticationError: An attempt to replace an outdated auth token failed.
UnexpectedResponseCodeException: The server responded with an unexpected HTTP status code (that is, not 200 or 204).
"""
if post.voted is not None:
return False
else:
self._vote_post_id(post.post_id, 'down')
return True
class Post:
"""
A Jodel post.
In addition to the explicitly declared attributes, Post instances will also return data for any key found in the json
data used for instantiation.
Attributes:
voted (str): "up"/"down" if the user fetching the post has voted on the post. None if the user has not voted.
vote_count (int): Signed integer indicating how many votes the post has.
has_replies (bool): True if the post has replies, False if it does not.
reply_from_op (bool): True if the post was made by someone replying to their own thread.
replies (list): List of Post objects representing the replies to this post. Empty list if there are no replies.
reply_count (int): The number of replies to this post.
is_image (bool): True if the post contains an image, False if it does not.
image_url (str): None if the post doesn't contain an image, AWS url if it does.
thumbnail_url (str): None if the post doesn't contain an image, AWS url if it does.
created_at (datetime): Time the post was created.
updated_at (datetime): Time the post was last updated (seems to always be the same as created_at).
own_post (boolean): True if the post was written by the user who fetched it, False if it was not.
distance (int): Euclidean post distance in kilometers (very_close: 0..1, close: 2..10, city_name: 11+).
location (dict): Dictionary mapping 'lat', 'lng' and 'name' to latitude, longitude and name.
message (str): The contents of the post. Empty string it no message is found.
color (str): Six character hex describing the color of the post. FFFFFF if no color is found.
post_id (str): Alphanumeric string identifying the post.
user_handle (str): Alphanumeric string identifying a user in the current thread.
"""
def __init__(self, json_dict, pydel_instance=None):
"""
Instantiates a Post object.
Args:
json_dict: Dictionary describing a Jodel post.
(optional) pydel_instance: A Pydel instance used for voting/replying/deleting.
Raises:
InvalidPostException: json_dict does not describe a valid Jodel (typically, it does map post_id)
"""
if 'post_id' not in json_dict:
raise InvalidPostException('Post data did not contain post_id', json_dict)
self._json_dict = json_dict
self._pydel_instance = pydel_instance
def upvote(self):
"""
Upvotes this post using the Pydel instance given in the constructor.
Returns:
False if the currently logged in user has already voted on this post, True if the vote was successful.
Raises:
AuthenticationError: An attempt to replace an outdated auth token failed.
UnexpectedResponseCodeException: The server responded with an unexpected HTTP status code (that is, not 200 or 204).
NoPydelInstanceException: This Post instance was not instantiated with a Pydel instance.
"""
if self._pydel_instance is not None:
return self._pydel_instance.upvote_post(self)
else:
raise NoPydelInstanceException()
def downvote(self):
"""
Downvotes this post using the Pydel instance given in the constructor.
Returns:
False if the currently logged in user has already voted on this post, True if the vote was successful.
Raises:
AuthenticationError: An attempt to replace an outdated auth token failed.
UnexpectedResponseCodeException: The server responded with an unexpected HTTP status code (that is, not 200 or 204).
NoPydelInstanceException: This Post instance was not instantiated with a Pydel instance.
"""
if self._pydel_instance is not None:
return self._pydel_instance.downvote_post(self)
else:
raise NoPydelInstanceException()
def reply(self, message):
"""
Replies to this post using the Pydel instance given in the constructor.
Args:
message: Post message
Returns:
List of Post objects containing the newest posts near the current position.
Raises:
AuthenticationError: An attempt to replace an outdated auth token failed.
UnexpectedResponseCodeException: The server responded with an unexpected HTTP status code (that is, not 200 or 204)
NoPydelInstanceException: This Post instance was not instantiated with a Pydel instance.
"""
if self._pydel_instance is not None:
return self._pydel_instance.new_reply(message, self)
else:
raise NoPydelInstanceException()
def delete(self):
"""
Deletes this post using the Pydel instance given in the constructor.
Returns:
True if the deletion request was successfully sent.
Raises:
AuthenticationError: An attempt to replace an outdated auth token failed.
UnexpectedResponseCodeException: The server responded with an unexpected HTTP status code (that is, not 200 or 204)
NoPydelInstanceException: This Post instance was not instantiated with a Pydel instance.
UnauthorizedDeletionException: The Pydel instance associated with this Post object does not own the post.
"""
if not self.own_post:
raise UnauthorizedDeletionException(self.post_id)
elif self._pydel_instance is None:
raise NoPydelInstanceException
else:
return self._pydel_instance.delete_post(self)
@property
def voted(self):
if 'voted' in self._json_dict:
return self._json_dict['voted']
else:
return None
@property
def vote_count(self):
return self._json_dict['vote_count']
@property
def has_replies(self):
return 'child_count' in self._json_dict and self._json_dict['child_count'] != 0
@property
def reply_from_op(self):
if 'parent_creator' not in self._json_dict:
return False
else:
return self._json_dict['parent_creator'] == 1
@property
def replies(self):
if self.has_replies:
return generate_post_list(self._json_dict['children'], self._pydel_instance)
else:
return []
@property
def reply_count(self):
if 'child_count' in self._json_dict:
return self._json_dict['child_count']
else:
return 0
@property
def is_image(self):
return 'image_url' in self._json_dict
@property
def image_url(self):
if 'image_url' in self._json_dict:
return self._json_dict['image_url']
else:
return None
@property
def thumbnail_url(self):
if 'thumbnail_url' in self._json_dict:
return self._json_dict['thumbnail_url']
else:
return None
@property
def created_at(self):
return utils.iso8601_to_datetime(self._json_dict['created_at'])
@property
def updated_at(self):
return utils.iso8601_to_datetime(self._json_dict['updated_at'])
@property
def own_post(self):
return self._json_dict['post_own'] == 'own'
@property
def distance(self):
return self._json_dict['distance']
@property
def location(self):
location = self._json_dict['location']
return {
'lat': location['loc_coordinates']['lat'],
'lng': location['loc_coordinates']['lng'],
'name': location['name']
}
@property
def message(self):
if 'message' in self._json_dict:
return self._json_dict['message']
else:
return ''
@property
def color(self):
if 'color' in self._json_dict:
return self._json_dict['color']
else:
return "FFFFFF"
@property
def post_id(self):
return self._json_dict['post_id']
@property
def user_handle(self):
return self._json_dict['user_handle']
def __getattr__(self, key):
if key in self._json_dict:
return self._json_dict[key]
else:
raise AttributeError
def generate_post_list(json_data, pydel_instance):
return [Post(p, pydel_instance) for p in json_data]
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ServiceEndpointPoliciesOperations:
"""ServiceEndpointPoliciesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
service_endpoint_policy_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
service_endpoint_policy_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified service endpoint policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_endpoint_policy_name: The name of the service endpoint policy.
:type service_endpoint_policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
service_endpoint_policy_name=service_endpoint_policy_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}'} # type: ignore
async def get(
self,
resource_group_name: str,
service_endpoint_policy_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.ServiceEndpointPolicy":
"""Gets the specified service Endpoint Policies in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_endpoint_policy_name: The name of the service endpoint policy.
:type service_endpoint_policy_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ServiceEndpointPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_08_01.models.ServiceEndpointPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceEndpointPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ServiceEndpointPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
service_endpoint_policy_name: str,
parameters: "_models.ServiceEndpointPolicy",
**kwargs: Any
) -> "_models.ServiceEndpointPolicy":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceEndpointPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ServiceEndpointPolicy')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ServiceEndpointPolicy', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ServiceEndpointPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
service_endpoint_policy_name: str,
parameters: "_models.ServiceEndpointPolicy",
**kwargs: Any
) -> AsyncLROPoller["_models.ServiceEndpointPolicy"]:
"""Creates or updates a service Endpoint Policies.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_endpoint_policy_name: The name of the service endpoint policy.
:type service_endpoint_policy_name: str
:param parameters: Parameters supplied to the create or update service endpoint policy
operation.
:type parameters: ~azure.mgmt.network.v2020_08_01.models.ServiceEndpointPolicy
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ServiceEndpointPolicy or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_08_01.models.ServiceEndpointPolicy]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceEndpointPolicy"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
service_endpoint_policy_name=service_endpoint_policy_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ServiceEndpointPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}'} # type: ignore
async def update_tags(
self,
resource_group_name: str,
service_endpoint_policy_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> "_models.ServiceEndpointPolicy":
"""Updates tags of a service endpoint policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_endpoint_policy_name: The name of the service endpoint policy.
:type service_endpoint_policy_name: str
:param parameters: Parameters supplied to update service endpoint policy tags.
:type parameters: ~azure.mgmt.network.v2020_08_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ServiceEndpointPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_08_01.models.ServiceEndpointPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceEndpointPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ServiceEndpointPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}'} # type: ignore
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.ServiceEndpointPolicyListResult"]:
"""Gets all the service endpoint policies in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ServiceEndpointPolicyListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_08_01.models.ServiceEndpointPolicyListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceEndpointPolicyListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ServiceEndpointPolicyListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/ServiceEndpointPolicies'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ServiceEndpointPolicyListResult"]:
"""Gets all service endpoint Policies in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ServiceEndpointPolicyListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_08_01.models.ServiceEndpointPolicyListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceEndpointPolicyListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ServiceEndpointPolicyListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies'} # type: ignore
|
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
from importlib import import_module
import inspect
import logging
from django.conf import settings
from django import forms
from django.forms.forms import NON_FIELD_ERRORS
from django import template
from django.template.defaultfilters import linebreaks
from django.template.defaultfilters import safe
from django.template.defaultfilters import slugify
from django import urls
from django.utils.encoding import force_text
from django.utils import module_loading
from django.utils.translation import ugettext_lazy as _
from openstack_auth import policy
import six
from horizon import base
from horizon import exceptions
from horizon.templatetags.horizon import has_permissions
from horizon.utils import html
LOG = logging.getLogger(__name__)
class WorkflowContext(dict):
def __init__(self, workflow, *args, **kwargs):
super(WorkflowContext, self).__init__(*args, **kwargs)
self._workflow = workflow
def __setitem__(self, key, val):
super(WorkflowContext, self).__setitem__(key, val)
return self._workflow._trigger_handlers(key)
def __delitem__(self, key):
return self.__setitem__(key, None)
def set(self, key, val):
return self.__setitem__(key, val)
def unset(self, key):
return self.__delitem__(key)
class ActionMetaclass(forms.forms.DeclarativeFieldsMetaclass):
def __new__(mcs, name, bases, attrs):
# Pop Meta for later processing
opts = attrs.pop("Meta", None)
# Create our new class
cls = super(ActionMetaclass, mcs).__new__(mcs, name, bases, attrs)
# Process options from Meta
cls.name = getattr(opts, "name", name)
cls.slug = getattr(opts, "slug", slugify(name))
cls.permissions = getattr(opts, "permissions", ())
cls.policy_rules = getattr(opts, "policy_rules", ())
cls.progress_message = getattr(opts,
"progress_message",
_("Processing..."))
cls.help_text = getattr(opts, "help_text", "")
cls.help_text_template = getattr(opts, "help_text_template", None)
return cls
@six.python_2_unicode_compatible
@six.add_metaclass(ActionMetaclass)
class Action(forms.Form):
"""An ``Action`` represents an atomic logical interaction with the system.
This is easier to understand with a conceptual example: in the context of
a "launch instance" workflow, actions would include "naming the instance",
"selecting an image", and ultimately "launching the instance".
Because ``Actions`` are always interactive, they always provide form
controls, and thus inherit from Django's ``Form`` class. However, they
have some additional intelligence added to them:
* ``Actions`` are aware of the permissions required to complete them.
* ``Actions`` have a meta-level concept of "help text" which is meant to be
displayed in such a way as to give context to the action regardless of
where the action is presented in a site or workflow.
* ``Actions`` understand how to handle their inputs and produce outputs,
much like :class:`~horizon.forms.SelfHandlingForm` does now.
``Action`` classes may define the following attributes in a ``Meta``
class within them:
.. attribute:: name
The verbose name for this action. Defaults to the name of the class.
.. attribute:: slug
A semi-unique slug for this action. Defaults to the "slugified" name
of the class.
.. attribute:: permissions
A list of permission names which this action requires in order to be
completed. Defaults to an empty list (``[]``).
.. attribute:: policy_rules
list of scope and rule tuples to do policy checks on, the
composition of which is (scope, rule)
* scope: service type managing the policy for action
* rule: string representing the action to be checked
for a policy that requires a single rule check::
policy_rules should look like
"(("compute", "compute:create_instance"),)"
for a policy that requires multiple rule checks::
rules should look like
"(("identity", "identity:list_users"),
("identity", "identity:list_roles"))"
where two service-rule clauses are OR-ed.
.. attribute:: help_text
A string of simple help text to be displayed alongside the Action's
fields.
.. attribute:: help_text_template
A path to a template which contains more complex help text to be
displayed alongside the Action's fields. In conjunction with
:meth:`~horizon.workflows.Action.get_help_text` method you can
customize your help text template to display practically anything.
"""
def __init__(self, request, context, *args, **kwargs):
if request.method == "POST":
super(Action, self).__init__(request.POST, initial=context)
else:
super(Action, self).__init__(initial=context)
if not hasattr(self, "handle"):
raise AttributeError("The action %s must define a handle method."
% self.__class__.__name__)
self.request = request
self._populate_choices(request, context)
self.required_css_class = 'required'
def __str__(self):
return force_text(self.name)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.slug)
def _populate_choices(self, request, context):
for field_name, bound_field in self.fields.items():
meth = getattr(self, "populate_%s_choices" % field_name, None)
if meth is not None and callable(meth):
bound_field.choices = meth(request, context)
def get_help_text(self, extra_context=None):
"""Returns the help text for this step."""
text = ""
extra_context = extra_context or {}
if self.help_text_template:
tmpl = template.loader.get_template(self.help_text_template)
text += tmpl.render(extra_context, self.request)
else:
text += linebreaks(force_text(self.help_text))
return safe(text)
def add_action_error(self, message):
"""Adds an error to the Action's Step based on API issues."""
self.errors[NON_FIELD_ERRORS] = self.error_class([message])
def handle(self, request, context):
"""Handles any requisite processing for this action.
The method should return either ``None`` or a dictionary of data
to be passed to :meth:`~horizon.workflows.Step.contribute`.
Returns ``None`` by default, effectively making it a no-op.
"""
return None
class MembershipAction(Action):
"""An action that allows a user to add/remove members from a group.
Extend the Action class with additional helper method for membership
management.
"""
def get_default_role_field_name(self):
return "default_" + self.slug + "_role"
def get_member_field_name(self, role_id):
return self.slug + "_role_" + role_id
@six.python_2_unicode_compatible
class Step(object):
"""A wrapper around an action which defines its context in a workflow.
It knows about details such as:
* The workflow's context data (data passed from step to step).
* The data which must be present in the context to begin this step (the
step's dependencies).
* The keys which will be added to the context data upon completion of the
step.
* The connections between this step's fields and changes in the context
data (e.g. if that piece of data changes, what needs to be updated in
this step).
A ``Step`` class has the following attributes:
.. attribute:: action_class
The :class:`~horizon.workflows.Action` class which this step wraps.
.. attribute:: depends_on
A list of context data keys which this step requires in order to
begin interaction.
.. attribute:: contributes
A list of keys which this step will contribute to the workflow's
context data. Optional keys should still be listed, even if their
values may be set to ``None``.
.. attribute:: connections
A dictionary which maps context data key names to lists of callbacks.
The callbacks may be functions, dotted python paths to functions
which may be imported, or dotted strings beginning with ``"self"``
to indicate methods on the current ``Step`` instance.
.. attribute:: before
Another ``Step`` class. This optional attribute is used to provide
control over workflow ordering when steps are dynamically added to
workflows. The workflow mechanism will attempt to place the current
step before the step specified in the attribute.
.. attribute:: after
Another ``Step`` class. This attribute has the same purpose as
:meth:`~horizon.workflows.Step.before` except that it will instead
attempt to place the current step after the given step.
.. attribute:: help_text
A string of simple help text which will be prepended to the ``Action``
class' help text if desired.
.. attribute:: template_name
A path to a template which will be used to render this step. In
general the default common template should be used. Default:
``"horizon/common/_workflow_step.html"``.
.. attribute:: has_errors
A boolean value which indicates whether or not this step has any
errors on the action within it or in the scope of the workflow. This
attribute will only accurately reflect this status after validation
has occurred.
.. attribute:: slug
Inherited from the ``Action`` class.
.. attribute:: name
Inherited from the ``Action`` class.
.. attribute:: permissions
Inherited from the ``Action`` class.
"""
action_class = None
depends_on = ()
contributes = ()
connections = None
before = None
after = None
help_text = ""
template_name = "horizon/common/_workflow_step.html"
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.slug)
def __str__(self):
return force_text(self.name)
def __init__(self, workflow):
super(Step, self).__init__()
self.workflow = workflow
cls = self.__class__.__name__
if not (self.action_class and issubclass(self.action_class, Action)):
raise AttributeError("action_class not specified for %s." % cls)
self.slug = self.action_class.slug
self.name = self.action_class.name
self.permissions = self.action_class.permissions
self.policy_rules = self.action_class.policy_rules
self.has_errors = False
self._handlers = {}
if self.connections is None:
# We want a dict, but don't want to declare a mutable type on the
# class directly.
self.connections = {}
# Gather our connection handlers and make sure they exist.
for key, handlers in self.connections.items():
self._handlers[key] = []
# TODO(gabriel): This is a poor substitute for broader handling
if not isinstance(handlers, (list, tuple)):
raise TypeError("The connection handlers for %s must be a "
"list or tuple." % cls)
for possible_handler in handlers:
if callable(possible_handler):
# If it's callable we know the function exists and is valid
self._handlers[key].append(possible_handler)
continue
elif not isinstance(possible_handler, six.string_types):
raise TypeError("Connection handlers must be either "
"callables or strings.")
bits = possible_handler.split(".")
if bits[0] == "self":
root = self
for bit in bits[1:]:
try:
root = getattr(root, bit)
except AttributeError:
raise AttributeError("The connection handler %s "
"could not be found on %s."
% (possible_handler, cls))
handler = root
elif len(bits) == 1:
# Import by name from local module not supported
raise ValueError("Importing a local function as a string "
"is not supported for the connection "
"handler %s on %s."
% (possible_handler, cls))
else:
# Try a general import
module_name = ".".join(bits[:-1])
try:
mod = import_module(module_name)
handler = getattr(mod, bits[-1])
except ImportError:
raise ImportError("Could not import %s from the "
"module %s as a connection "
"handler on %s."
% (bits[-1], module_name, cls))
except AttributeError:
raise AttributeError("Could not import %s from the "
"module %s as a connection "
"handler on %s."
% (bits[-1], module_name, cls))
self._handlers[key].append(handler)
@property
def action(self):
if not getattr(self, "_action", None):
try:
# Hook in the action context customization.
workflow_context = dict(self.workflow.context)
context = self.prepare_action_context(self.workflow.request,
workflow_context)
self._action = self.action_class(self.workflow.request,
context)
except Exception:
LOG.exception("Problem instantiating action class.")
raise
return self._action
def prepare_action_context(self, request, context):
"""Hook to customize how the workflow context is passed to the action.
This is the reverse of what "contribute" does to make the
action outputs sane for the workflow. Changes to the context are not
saved globally here. They are localized to the action.
Simply returns the unaltered context by default.
"""
return context
def get_id(self):
"""Returns the ID for this step. Suitable for use in HTML markup."""
return "%s__%s" % (self.workflow.slug, self.slug)
def _verify_contributions(self, context):
for key in self.contributes:
# Make sure we don't skip steps based on weird behavior of
# POST query dicts.
field = self.action.fields.get(key, None)
if field and field.required and not context.get(key):
context.pop(key, None)
failed_to_contribute = set(self.contributes)
failed_to_contribute -= set(context.keys())
if failed_to_contribute:
raise exceptions.WorkflowError("The following expected data was "
"not added to the workflow context "
"by the step %s: %s."
% (self.__class__,
failed_to_contribute))
return True
def contribute(self, data, context):
"""Adds the data listed in ``contributes`` to the workflow's context.
By default, the context is simply updated with all the data
returned by the action.
Note that even if the value of one of the ``contributes`` keys is
not present (e.g. optional) the key should still be added to the
context with a value of ``None``.
"""
if data:
for key in self.contributes:
context[key] = data.get(key, None)
return context
def render(self):
"""Renders the step."""
step_template = template.loader.get_template(self.template_name)
extra_context = {"form": self.action,
"step": self}
return step_template.render(extra_context, self.workflow.request)
def get_help_text(self):
"""Returns the help text for this step."""
text = linebreaks(force_text(self.help_text))
text += self.action.get_help_text()
return safe(text)
def add_step_error(self, message):
"""Adds an error to the Step based on API issues."""
self.action.add_action_error(message)
def has_required_fields(self):
"""Returns True if action contains any required fields."""
return any(field.required for field in self.action.fields.values())
def allowed(self, request):
"""Determines whether or not the step is displayed.
Step instances can override this method to specify conditions under
which this tab should not be shown at all by returning ``False``.
The default behavior is to return ``True`` for all cases.
"""
return True
class WorkflowMetaclass(type):
def __new__(mcs, name, bases, attrs):
super(WorkflowMetaclass, mcs).__new__(mcs, name, bases, attrs)
attrs["_cls_registry"] = []
return type.__new__(mcs, name, bases, attrs)
class UpdateMembersStep(Step):
"""A step that allows a user to add/remove members from a group.
.. attribute:: show_roles
Set to False to disable the display of the roles dropdown.
.. attribute:: available_list_title
The title used for the available list column.
.. attribute:: members_list_title
The title used for the members list column.
.. attribute:: no_available_text
The placeholder text used when the available list is empty.
.. attribute:: no_members_text
The placeholder text used when the members list is empty.
"""
template_name = "horizon/common/_workflow_step_update_members.html"
show_roles = True
available_list_title = _("All available")
members_list_title = _("Members")
no_available_text = _("None available.")
no_members_text = _("No members.")
def get_member_field_name(self, role_id):
if issubclass(self.action_class, MembershipAction):
return self.action.get_member_field_name(role_id)
else:
return self.slug + "_role_" + role_id
@six.python_2_unicode_compatible
@six.add_metaclass(WorkflowMetaclass)
class Workflow(html.HTMLElement):
"""A Workflow is a collection of Steps.
Its interface is very straightforward, but it is responsible for handling
some very important tasks such as:
* Handling the injection, removal, and ordering of arbitrary steps.
* Determining if the workflow can be completed by a given user at runtime
based on all available information.
* Dispatching connections between steps to ensure that when context data
changes all the applicable callback functions are executed.
* Verifying/validating the overall data integrity and subsequently
triggering the final method to complete the workflow.
The ``Workflow`` class has the following attributes:
.. attribute:: name
The verbose name for this workflow which will be displayed to the user.
Defaults to the class name.
.. attribute:: slug
The unique slug for this workflow. Required.
.. attribute:: steps
Read-only access to the final ordered set of step instances for
this workflow.
.. attribute:: default_steps
A list of :class:`~horizon.workflows.Step` classes which serve as the
starting point for this workflow's ordered steps. Defaults to an empty
list (``[]``).
.. attribute:: finalize_button_name
The name which will appear on the submit button for the workflow's
form. Defaults to ``"Save"``.
.. attribute:: success_message
A string which will be displayed to the user upon successful completion
of the workflow. Defaults to
``"{{ workflow.name }} completed successfully."``
.. attribute:: failure_message
A string which will be displayed to the user upon failure to complete
the workflow. Defaults to ``"{{ workflow.name }} did not complete."``
.. attribute:: depends_on
A roll-up list of all the ``depends_on`` values compiled from the
workflow's steps.
.. attribute:: contributions
A roll-up list of all the ``contributes`` values compiled from the
workflow's steps.
.. attribute:: template_name
Path to the template which should be used to render this workflow.
In general the default common template should be used. Default:
``"horizon/common/_workflow.html"``.
.. attribute:: entry_point
The slug of the step which should initially be active when the
workflow is rendered. This can be passed in upon initialization of
the workflow, or set anytime after initialization but before calling
either ``get_entry_point`` or ``render``.
.. attribute:: redirect_param_name
The name of a parameter used for tracking the URL to redirect to upon
completion of the workflow. Defaults to ``"next"``.
.. attribute:: object
The object (if any) which this workflow relates to. In the case of
a workflow which creates a new resource the object would be the created
resource after the relevant creation steps have been undertaken. In
the case of a workflow which updates a resource it would be the
resource being updated after it has been retrieved.
.. attribute:: wizard
Whether to present the workflow as a wizard, with "prev" and "next"
buttons and validation after every step.
"""
slug = None
default_steps = ()
template_name = "horizon/common/_workflow.html"
finalize_button_name = _("Save")
success_message = _("%s completed successfully.")
failure_message = _("%s did not complete.")
redirect_param_name = "next"
multipart = False
wizard = False
_registerable_class = Step
def __str__(self):
return self.name
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.slug)
def __init__(self, request=None, context_seed=None, entry_point=None,
*args, **kwargs):
super(Workflow, self).__init__(*args, **kwargs)
if self.slug is None:
raise AttributeError("The workflow %s must have a slug."
% self.__class__.__name__)
self.name = getattr(self, "name", self.__class__.__name__)
self.request = request
self.depends_on = set([])
self.contributions = set([])
self.entry_point = entry_point
self.object = None
self._register_steps_from_config()
# Put together our steps in order. Note that we pre-register
# non-default steps so that we can identify them and subsequently
# insert them in order correctly.
self._registry = collections.OrderedDict(
[(step_class, step_class(self)) for step_class
in self.__class__._cls_registry
if step_class not in self.default_steps])
self._gather_steps()
# Determine all the context data we need to end up with.
for step in self.steps:
self.depends_on = self.depends_on | set(step.depends_on)
self.contributions = self.contributions | set(step.contributes)
# Initialize our context. For ease we can preseed it with a
# regular dictionary. This should happen after steps have been
# registered and ordered.
self.context = WorkflowContext(self)
context_seed = context_seed or {}
clean_seed = dict([(key, val)
for key, val in context_seed.items()
if key in self.contributions | self.depends_on])
self.context_seed = clean_seed
self.context.update(clean_seed)
if request and request.method == "POST":
for step in self.steps:
valid = step.action.is_valid()
# Be sure to use the CLEANED data if the workflow is valid.
if valid:
data = step.action.cleaned_data
else:
data = request.POST
self.context = step.contribute(data, self.context)
@property
def steps(self):
if getattr(self, "_ordered_steps", None) is None:
self._gather_steps()
return self._ordered_steps
def get_step(self, slug):
"""Returns the instantiated step matching the given slug."""
for step in self.steps:
if step.slug == slug:
return step
def _register_steps_from_config(self):
my_name = '.'.join([self.__class__.__module__,
self.__class__.__name__])
horizon_config = settings.HORIZON_CONFIG.get('extra_steps', {})
extra_steps = horizon_config.get(my_name, [])
for step in extra_steps:
self._register_step_from_config(step, my_name)
def _register_step_from_config(self, step_config, my_name):
if not isinstance(step_config, str):
LOG.error('Extra step definition must be a string '
'(workflow "%s"', my_name)
return
try:
class_ = module_loading.import_string(step_config)
except ImportError:
LOG.error('Step class "%s" is not found (workflow "%s")',
step_config, my_name)
return
self.register(class_)
def _gather_steps(self):
ordered_step_classes = self._order_steps()
for default_step in self.default_steps:
self.register(default_step)
self._registry[default_step] = default_step(self)
self._ordered_steps = []
for step_class in ordered_step_classes:
cls = self._registry[step_class]
if (has_permissions(self.request.user, cls) and
policy.check(cls.policy_rules, self.request) and
cls.allowed(self.request)):
self._ordered_steps.append(cls)
def _order_steps(self):
steps = list(copy.copy(self.default_steps))
additional = self._registry.keys()
for step in additional:
try:
min_pos = steps.index(step.after)
except ValueError:
min_pos = 0
try:
max_pos = steps.index(step.before)
except ValueError:
max_pos = len(steps)
if min_pos > max_pos:
raise exceptions.WorkflowError("The step %(new)s can't be "
"placed between the steps "
"%(after)s and %(before)s; the "
"step %(before)s comes before "
"%(after)s."
% {"new": additional,
"after": step.after,
"before": step.before})
steps.insert(max_pos, step)
return steps
def get_entry_point(self):
"""Returns the slug of the step which the workflow should begin on.
This method takes into account both already-available data and errors
within the steps.
"""
# If we have a valid specified entry point, use it.
if self.entry_point:
if self.get_step(self.entry_point):
return self.entry_point
# Otherwise fall back to calculating the appropriate entry point.
for step in self.steps:
if step.has_errors:
return step.slug
try:
step._verify_contributions(self.context)
except exceptions.WorkflowError:
return step.slug
# If nothing else, just return the first step.
return self.steps[0].slug
def _trigger_handlers(self, key):
responses = []
handlers = [(step.slug, f) for step in self.steps
for f in step._handlers.get(key, [])]
for slug, handler in handlers:
responses.append((slug, handler(self.request, self.context)))
return responses
@classmethod
def register(cls, step_class):
"""Registers a :class:`~horizon.workflows.Step` with the workflow."""
if not inspect.isclass(step_class):
raise ValueError('Only classes may be registered.')
elif not issubclass(step_class, cls._registerable_class):
raise ValueError('Only %s classes or subclasses may be registered.'
% cls._registerable_class.__name__)
if step_class in cls._cls_registry:
return False
else:
cls._cls_registry.append(step_class)
return True
@classmethod
def unregister(cls, step_class):
"""Unregisters a :class:`~horizon.workflows.Step` from the workflow."""
try:
cls._cls_registry.remove(step_class)
except ValueError:
raise base.NotRegistered('%s is not registered' % cls)
return cls._unregister(step_class)
def validate(self, context):
"""Hook for custom context data validation.
Should return a booleanvalue or
raise :class:`~horizon.exceptions.WorkflowValidationError`.
"""
return True
def is_valid(self):
"""Verifies that all required data is present in the context.
It also calls the ``validate`` method to allow for finer-grained checks
on the context data.
"""
missing = self.depends_on - set(self.context.keys())
if missing:
raise exceptions.WorkflowValidationError(
"Unable to complete the workflow. The values %s are "
"required but not present." % ", ".join(missing))
# Validate each step. Cycle through all of them to catch all errors
# in one pass before returning.
steps_valid = True
for step in self.steps:
if not step.action.is_valid():
steps_valid = False
step.has_errors = True
if not steps_valid:
return steps_valid
return self.validate(self.context)
def finalize(self):
"""Finalizes a workflow by running through all the actions.
It runs all the actions in order and calling their ``handle`` methods.
Returns ``True`` on full success, or ``False`` for a partial success,
e.g. there were non-critical errors.
(If it failed completely the function wouldn't return.)
"""
partial = False
for step in self.steps:
try:
data = step.action.handle(self.request, self.context)
if data is True or data is None:
continue
elif data is False:
partial = True
else:
self.context = step.contribute(data or {}, self.context)
except Exception:
partial = True
exceptions.handle(self.request)
if not self.handle(self.request, self.context):
partial = True
return not partial
def handle(self, request, context):
"""Handles any final processing for this workflow.
Should return a boolean value indicating success.
"""
return True
def get_success_url(self):
"""Returns a URL to redirect the user to upon completion.
By default it will attempt to parse a ``success_url`` attribute on the
workflow, which can take the form of a reversible URL pattern name,
or a standard HTTP URL.
"""
try:
return urls.reverse(self.success_url)
except urls.NoReverseMatch:
return self.success_url
def format_status_message(self, message):
"""Hook to allow customization of the message returned to the user.
This is called upon both successful or unsuccessful completion of
the workflow.
By default it simply inserts the workflow's name into the message
string.
"""
if "%s" in message:
return message % self.name
else:
return message
def verify_integrity(self):
provided_keys = self.contributions | set(self.context_seed.keys())
if len(self.depends_on - provided_keys):
raise exceptions.NotAvailable(
_("The current user has insufficient permission to complete "
"the requested task."))
def render(self):
"""Renders the workflow."""
workflow_template = template.loader.get_template(self.template_name)
extra_context = {"workflow": self}
if self.request.is_ajax():
extra_context['modal'] = True
return workflow_template.render(extra_context, self.request)
def get_absolute_url(self):
"""Returns the canonical URL for this workflow.
This is used for the POST action attribute on the form element
wrapping the workflow.
For convenience it defaults to the value of
``request.get_full_path()`` with any query string stripped off,
e.g. the path at which the workflow was requested.
"""
return self.request.get_full_path().partition('?')[0]
def add_error_to_step(self, message, slug):
"""Adds an error message to the workflow's Step.
This is useful when you wish for API errors to appear as errors
on the form rather than using the messages framework.
The workflow's Step is specified by its slug.
"""
step = self.get_step(slug)
if step:
step.add_step_error(message)
|
|
"""
By specifying the 'proxy' Meta attribute, model subclasses can specify that
they will take data directly from the table of their base class table rather
than using a new table of their own. This allows them to act as simple proxies,
providing a modified interface to the data from the base class.
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
# A couple of managers for testing managing overriding in proxy model cases.
class PersonManager(models.Manager):
def get_queryset(self):
return super(PersonManager, self).get_queryset().exclude(name="fred")
class SubManager(models.Manager):
def get_queryset(self):
return super(SubManager, self).get_queryset().exclude(name="wilma")
@python_2_unicode_compatible
class Person(models.Model):
"""
A simple concrete base class.
"""
name = models.CharField(max_length=50)
objects = PersonManager()
def __str__(self):
return self.name
class Abstract(models.Model):
"""
A simple abstract base class, to be used for error checking.
"""
data = models.CharField(max_length=10)
class Meta:
abstract = True
class MyPerson(Person):
"""
A proxy subclass, this should not get a new table. Overrides the default
manager.
"""
class Meta:
proxy = True
ordering = ["name"]
permissions = (
("display_users", "May display users information"),
)
objects = SubManager()
other = PersonManager()
def has_special_name(self):
return self.name.lower() == "special"
class ManagerMixin(models.Model):
excluder = SubManager()
class Meta:
abstract = True
class OtherPerson(Person, ManagerMixin):
"""
A class with the default manager from Person, plus an secondary manager.
"""
class Meta:
proxy = True
ordering = ["name"]
class StatusPerson(MyPerson):
"""
A non-proxy subclass of a proxy, it should get a new table.
"""
status = models.CharField(max_length=80)
# We can even have proxies of proxies (and subclass of those).
class MyPersonProxy(MyPerson):
class Meta:
proxy = True
class LowerStatusPerson(MyPersonProxy):
status = models.CharField(max_length=80)
@python_2_unicode_compatible
class User(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class UserProxy(User):
class Meta:
proxy = True
class AnotherUserProxy(User):
class Meta:
proxy = True
class UserProxyProxy(UserProxy):
class Meta:
proxy = True
class MultiUserProxy(UserProxy, AnotherUserProxy):
class Meta:
proxy = True
# We can still use `select_related()` to include related models in our querysets.
class Country(models.Model):
name = models.CharField(max_length=50)
@python_2_unicode_compatible
class State(models.Model):
name = models.CharField(max_length=50)
country = models.ForeignKey(Country, models.CASCADE)
def __str__(self):
return self.name
class StateProxy(State):
class Meta:
proxy = True
# Proxy models still works with filters (on related fields)
# and select_related, even when mixed with model inheritance
@python_2_unicode_compatible
class BaseUser(models.Model):
name = models.CharField(max_length=255)
def __str__(self):
return ':'.join((self.__class__.__name__, self.name,))
class TrackerUser(BaseUser):
status = models.CharField(max_length=50)
class ProxyTrackerUser(TrackerUser):
class Meta:
proxy = True
@python_2_unicode_compatible
class Issue(models.Model):
summary = models.CharField(max_length=255)
assignee = models.ForeignKey(ProxyTrackerUser, models.CASCADE, related_name='issues')
def __str__(self):
return ':'.join((self.__class__.__name__, self.summary,))
class Bug(Issue):
version = models.CharField(max_length=50)
reporter = models.ForeignKey(BaseUser, models.CASCADE)
class ProxyBug(Bug):
"""
Proxy of an inherited class
"""
class Meta:
proxy = True
class ProxyProxyBug(ProxyBug):
"""
A proxy of proxy model with related field
"""
class Meta:
proxy = True
class Improvement(Issue):
"""
A model that has relation to a proxy model
or to a proxy of proxy model
"""
version = models.CharField(max_length=50)
reporter = models.ForeignKey(ProxyTrackerUser, models.CASCADE)
associated_bug = models.ForeignKey(ProxyProxyBug, models.CASCADE)
class ProxyImprovement(Improvement):
class Meta:
proxy = True
|
|
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import time
from polyaxon.stores.local_store import LocalStore
from polyaxon.utils.date_utils import to_datetime
from polyaxon.utils.test_utils import BaseTestCase
class TestLocalStore(BaseTestCase):
def test_ls(self):
store = LocalStore()
dirname1 = tempfile.mkdtemp()
dirname2 = tempfile.mkdtemp(prefix=dirname1 + "/")
open(dirname1 + "/a", "w")
open(dirname1 + "/b", "w")
open(dirname2 + "/c", "w")
full_response = {
"files": [("a", 0), ("b", 0)],
"dirs": [os.path.basename(dirname2)],
}
empty_response = {"dirs": [], "files": []}
dir_response = {"dirs": [], "files": [("c", 0)]}
assert store.ls(dirname1) == full_response
assert store.ls(dirname1 + "/") == full_response
assert store.ls(dirname1 + "/non-existent") == empty_response
assert store.ls(dirname1 + "/non-existent/") == empty_response
assert store.ls(dirname2) == dir_response
assert store.ls(dirname2) == dir_response
def test_delete(self):
store = LocalStore()
dirname1 = tempfile.mkdtemp()
dirname2 = tempfile.mkdtemp(prefix=dirname1 + "/")
open(dirname1 + "/a", "w")
open(dirname1 + "/b", "w")
open(dirname2 + "/c", "w")
store.delete(dirname1 + "/a")
assert store.ls(dirname1) == {
"files": [("b", 0)],
"dirs": [os.path.basename(dirname2)],
}
store.delete(dirname2)
assert store.ls(dirname1) == {"files": [("b", 0)], "dirs": []}
def test_upload(self):
dirname = tempfile.mkdtemp()
dirname2 = tempfile.mkdtemp()
fpath = dirname + "/test.txt"
open(fpath, "w")
store = LocalStore()
# Test without basename
path2 = dirname2 + "/fo.txt"
assert os.path.isfile(path2) is False
store.upload_file(filename=fpath, path_to=path2, use_basename=False)
assert os.path.isfile(path2) is True
# Test with basename
dirname2 = tempfile.mkdtemp()
assert os.path.isfile(dirname2 + "/test.txt") is False
store.upload_file(filename=fpath, path_to=dirname2, use_basename=True)
assert os.path.isfile(dirname2 + "/test.txt") is True
def test_download(self):
dirname = tempfile.mkdtemp()
dirname2 = tempfile.mkdtemp()
fpath = dirname + "/test.txt"
open(fpath, "w")
store = LocalStore()
# Test without basename
path2 = dirname2 + "/fo.txt"
assert os.path.isfile(path2) is False
store.download_file(path_from=fpath, local_path=path2, use_basename=False)
assert os.path.isfile(path2) is True
# Test with basename
dirname2 = tempfile.mkdtemp()
assert os.path.isfile(dirname2 + "/test.txt") is False
store.download_file(path_from=fpath, local_path=dirname2, use_basename=True)
assert os.path.isfile(dirname2 + "/test.txt") is True
def test_upload_dir(self):
dirname1 = tempfile.mkdtemp()
fpath1 = dirname1 + "/test1.txt"
with open(fpath1, "w") as f:
f.write("data1")
fpath2 = dirname1 + "/test2.txt"
with open(fpath2, "w") as f:
f.write("data2")
dirname2 = tempfile.mkdtemp(prefix=dirname1 + "/")
fpath3 = dirname2 + "/test3.txt"
with open(fpath3, "w") as f:
f.write("data3")
store = LocalStore()
path_to = tempfile.mkdtemp()
rel_path1 = dirname1.split("/")[-1]
rel_path2 = dirname2.split("/")[-1]
# Test without basename
assert os.path.exists(os.path.join(path_to, "test1.txt")) is False
assert os.path.exists(os.path.join(path_to, "test2.txt")) is False
assert os.path.exists(os.path.join(path_to, rel_path2, "test3.txt")) is False
store.upload_dir(dirname=dirname1, path_to=path_to, use_basename=False)
assert os.path.exists(os.path.join(path_to, "test1.txt")) is True
assert os.path.exists(os.path.join(path_to, "test2.txt")) is True
assert os.path.exists(os.path.join(path_to, rel_path2, "test3.txt")) is True
# Test with basename
path_to = tempfile.mkdtemp()
assert os.path.exists(os.path.join(path_to, rel_path1, "test1.txt")) is False
assert os.path.exists(os.path.join(path_to, rel_path1, "test2.txt")) is False
assert (
os.path.exists(os.path.join(path_to, rel_path1, rel_path2, "test3.txt"))
is False
)
store.upload_dir(dirname=dirname1, path_to=path_to, use_basename=True)
assert os.path.exists(os.path.join(path_to, rel_path1, "test1.txt")) is True
assert os.path.exists(os.path.join(path_to, rel_path1, "test2.txt")) is True
assert (
os.path.exists(os.path.join(path_to, rel_path1, rel_path2, "test3.txt"))
is True
)
def test_upload_dir_with_last_time(self):
dirname1 = tempfile.mkdtemp()
fpath1 = dirname1 + "/test1.txt"
with open(fpath1, "w") as f:
f.write("data1")
fpath2 = dirname1 + "/test2.txt"
with open(fpath2, "w") as f:
f.write("data2")
last_time = to_datetime(os.stat(fpath2).st_mtime)
time.sleep(0.1)
dirname2 = tempfile.mkdtemp(prefix=dirname1 + "/")
fpath3 = dirname2 + "/test3.txt"
with open(fpath3, "w") as f:
f.write("data3")
store = LocalStore()
path_to = tempfile.mkdtemp()
rel_path1 = dirname1.split("/")[-1]
rel_path2 = dirname2.split("/")[-1]
# Test without basename
assert os.path.exists(os.path.join(path_to, "test1.txt")) is False
assert os.path.exists(os.path.join(path_to, "test2.txt")) is False
assert os.path.exists(os.path.join(path_to, rel_path2, "test3.txt")) is False
store.upload_dir(
dirname=dirname1, path_to=path_to, use_basename=False, last_time=last_time
)
assert os.path.exists(os.path.join(path_to, "test1.txt")) is False
assert os.path.exists(os.path.join(path_to, "test2.txt")) is False
assert os.path.exists(os.path.join(path_to, rel_path2, "test3.txt")) is True
# Test with basename
path_to = tempfile.mkdtemp()
assert os.path.exists(os.path.join(path_to, rel_path1, "test1.txt")) is False
assert os.path.exists(os.path.join(path_to, rel_path1, "test2.txt")) is False
assert (
os.path.exists(os.path.join(path_to, rel_path1, rel_path2, "test3.txt"))
is False
)
store.upload_dir(
dirname=dirname1, path_to=path_to, use_basename=True, last_time=last_time
)
assert os.path.exists(os.path.join(path_to, "test1.txt")) is False
assert os.path.exists(os.path.join(path_to, "test2.txt")) is False
assert (
os.path.exists(os.path.join(path_to, rel_path1, rel_path2, "test3.txt"))
is True
)
def test_download_dir(self):
dirname1 = tempfile.mkdtemp()
fpath1 = dirname1 + "/test1.txt"
with open(fpath1, "w") as f:
f.write("data1")
fpath2 = dirname1 + "/test2.txt"
with open(fpath2, "w") as f:
f.write("data2")
dirname2 = tempfile.mkdtemp(prefix=dirname1 + "/")
fpath3 = dirname2 + "/test3.txt"
with open(fpath3, "w") as f:
f.write("data3")
store = LocalStore()
path_to = tempfile.mkdtemp()
rel_path1 = dirname1.split("/")[-1]
rel_path2 = dirname2.split("/")[-1]
# Test without basename
assert os.path.exists(os.path.join(path_to, "test1.txt")) is False
assert os.path.exists(os.path.join(path_to, "test2.txt")) is False
assert os.path.exists(os.path.join(path_to, rel_path2, "test3.txt")) is False
store.download_dir(path_from=dirname1, local_path=path_to, use_basename=False)
assert os.path.exists(os.path.join(path_to, "test1.txt")) is True
assert os.path.exists(os.path.join(path_to, "test2.txt")) is True
assert os.path.exists(os.path.join(path_to, rel_path2, "test3.txt")) is True
# Test with basename
path_to = tempfile.mkdtemp()
assert os.path.exists(os.path.join(path_to, rel_path1, "test1.txt")) is False
assert os.path.exists(os.path.join(path_to, rel_path1, "test2.txt")) is False
assert (
os.path.exists(os.path.join(path_to, rel_path1, rel_path2, "test3.txt"))
is False
)
store.download_dir(path_from=dirname1, local_path=path_to, use_basename=True)
assert os.path.exists(os.path.join(path_to, rel_path1, "test1.txt")) is True
assert os.path.exists(os.path.join(path_to, rel_path1, "test2.txt")) is True
assert (
os.path.exists(os.path.join(path_to, rel_path1, rel_path2, "test3.txt"))
is True
)
|
|
from __future__ import absolute_import
from operator import attrgetter
import sys
import os
import shlex
from six import StringIO
from mock import patch
from .testcases import DockerClientTestCase
from compose.cli.main import TopLevelCommand
from compose.cli.errors import UserError
from compose.project import NoSuchService
class CLITestCase(DockerClientTestCase):
def setUp(self):
super(CLITestCase, self).setUp()
self.old_sys_exit = sys.exit
sys.exit = lambda code=0: None
self.command = TopLevelCommand()
self.command.base_dir = 'tests/fixtures/simple-composefile'
def tearDown(self):
sys.exit = self.old_sys_exit
self.project.kill()
self.project.remove_stopped()
for container in self.project.containers(stopped=True, one_off=True):
container.remove(force=True)
super(CLITestCase, self).tearDown()
@property
def project(self):
# Hack: allow project to be overridden. This needs refactoring so that
# the project object is built exactly once, by the command object, and
# accessed by the test case object.
if hasattr(self, '_project'):
return self._project
return self.command.get_project()
def test_help(self):
old_base_dir = self.command.base_dir
self.command.base_dir = 'tests/fixtures/no-composefile'
with self.assertRaises(SystemExit) as exc_context:
self.command.dispatch(['help', 'up'], None)
self.assertIn('Usage: up [options] [SERVICE...]', str(exc_context.exception))
# self.project.kill() fails during teardown
# unless there is a composefile.
self.command.base_dir = old_base_dir
# TODO: address the "Inappropriate ioctl for device" warnings in test output
@patch('sys.stdout', new_callable=StringIO)
def test_ps(self, mock_stdout):
self.project.get_service('simple').create_container()
self.command.dispatch(['ps'], None)
self.assertIn('simplecomposefile_simple_1', mock_stdout.getvalue())
@patch('sys.stdout', new_callable=StringIO)
def test_ps_default_composefile(self, mock_stdout):
self.command.base_dir = 'tests/fixtures/multiple-composefiles'
self.command.dispatch(['up', '-d'], None)
self.command.dispatch(['ps'], None)
output = mock_stdout.getvalue()
self.assertIn('multiplecomposefiles_simple_1', output)
self.assertIn('multiplecomposefiles_another_1', output)
self.assertNotIn('multiplecomposefiles_yetanother_1', output)
@patch('sys.stdout', new_callable=StringIO)
def test_ps_alternate_composefile(self, mock_stdout):
config_path = os.path.abspath(
'tests/fixtures/multiple-composefiles/compose2.yml')
self._project = self.command.get_project(config_path)
self.command.base_dir = 'tests/fixtures/multiple-composefiles'
self.command.dispatch(['-f', 'compose2.yml', 'up', '-d'], None)
self.command.dispatch(['-f', 'compose2.yml', 'ps'], None)
output = mock_stdout.getvalue()
self.assertNotIn('multiplecomposefiles_simple_1', output)
self.assertNotIn('multiplecomposefiles_another_1', output)
self.assertIn('multiplecomposefiles_yetanother_1', output)
@patch('compose.service.log')
def test_pull(self, mock_logging):
self.command.dispatch(['pull'], None)
mock_logging.info.assert_any_call('Pulling simple (busybox:latest)...')
mock_logging.info.assert_any_call('Pulling another (busybox:latest)...')
@patch('sys.stdout', new_callable=StringIO)
def test_build_no_cache(self, mock_stdout):
self.command.base_dir = 'tests/fixtures/simple-dockerfile'
self.command.dispatch(['build', 'simple'], None)
mock_stdout.truncate(0)
cache_indicator = 'Using cache'
self.command.dispatch(['build', 'simple'], None)
output = mock_stdout.getvalue()
self.assertIn(cache_indicator, output)
mock_stdout.truncate(0)
self.command.dispatch(['build', '--no-cache', 'simple'], None)
output = mock_stdout.getvalue()
self.assertNotIn(cache_indicator, output)
def test_up(self):
self.command.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
another = self.project.get_service('another')
self.assertEqual(len(service.containers()), 1)
self.assertEqual(len(another.containers()), 1)
# Ensure containers don't have stdin and stdout connected in -d mode
config = service.containers()[0].inspect()['Config']
self.assertFalse(config['AttachStderr'])
self.assertFalse(config['AttachStdout'])
self.assertFalse(config['AttachStdin'])
def test_up_with_links(self):
self.command.base_dir = 'tests/fixtures/links-composefile'
self.command.dispatch(['up', '-d', 'web'], None)
web = self.project.get_service('web')
db = self.project.get_service('db')
console = self.project.get_service('console')
self.assertEqual(len(web.containers()), 1)
self.assertEqual(len(db.containers()), 1)
self.assertEqual(len(console.containers()), 0)
def test_up_with_no_deps(self):
self.command.base_dir = 'tests/fixtures/links-composefile'
self.command.dispatch(['up', '-d', '--no-deps', 'web'], None)
web = self.project.get_service('web')
db = self.project.get_service('db')
console = self.project.get_service('console')
self.assertEqual(len(web.containers()), 1)
self.assertEqual(len(db.containers()), 0)
self.assertEqual(len(console.containers()), 0)
def test_up_with_force_recreate(self):
self.command.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
self.assertEqual(len(service.containers()), 1)
old_ids = [c.id for c in service.containers()]
self.command.dispatch(['up', '-d', '--force-recreate'], None)
self.assertEqual(len(service.containers()), 1)
new_ids = [c.id for c in service.containers()]
self.assertNotEqual(old_ids, new_ids)
def test_up_with_no_recreate(self):
self.command.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
self.assertEqual(len(service.containers()), 1)
old_ids = [c.id for c in service.containers()]
self.command.dispatch(['up', '-d', '--no-recreate'], None)
self.assertEqual(len(service.containers()), 1)
new_ids = [c.id for c in service.containers()]
self.assertEqual(old_ids, new_ids)
def test_up_with_force_recreate_and_no_recreate(self):
with self.assertRaises(UserError):
self.command.dispatch(['up', '-d', '--force-recreate', '--no-recreate'], None)
def test_up_with_timeout(self):
self.command.dispatch(['up', '-d', '-t', '1'], None)
service = self.project.get_service('simple')
another = self.project.get_service('another')
self.assertEqual(len(service.containers()), 1)
self.assertEqual(len(another.containers()), 1)
# Ensure containers don't have stdin and stdout connected in -d mode
config = service.containers()[0].inspect()['Config']
self.assertFalse(config['AttachStderr'])
self.assertFalse(config['AttachStdout'])
self.assertFalse(config['AttachStdin'])
@patch('dockerpty.start')
def test_run_service_without_links(self, mock_stdout):
self.command.base_dir = 'tests/fixtures/links-composefile'
self.command.dispatch(['run', 'console', '/bin/true'], None)
self.assertEqual(len(self.project.containers()), 0)
# Ensure stdin/out was open
container = self.project.containers(stopped=True, one_off=True)[0]
config = container.inspect()['Config']
self.assertTrue(config['AttachStderr'])
self.assertTrue(config['AttachStdout'])
self.assertTrue(config['AttachStdin'])
@patch('dockerpty.start')
def test_run_service_with_links(self, __):
self.command.base_dir = 'tests/fixtures/links-composefile'
self.command.dispatch(['run', 'web', '/bin/true'], None)
db = self.project.get_service('db')
console = self.project.get_service('console')
self.assertEqual(len(db.containers()), 1)
self.assertEqual(len(console.containers()), 0)
@patch('dockerpty.start')
def test_run_with_no_deps(self, __):
self.command.base_dir = 'tests/fixtures/links-composefile'
self.command.dispatch(['run', '--no-deps', 'web', '/bin/true'], None)
db = self.project.get_service('db')
self.assertEqual(len(db.containers()), 0)
@patch('dockerpty.start')
def test_run_does_not_recreate_linked_containers(self, __):
self.command.base_dir = 'tests/fixtures/links-composefile'
self.command.dispatch(['up', '-d', 'db'], None)
db = self.project.get_service('db')
self.assertEqual(len(db.containers()), 1)
old_ids = [c.id for c in db.containers()]
self.command.dispatch(['run', 'web', '/bin/true'], None)
self.assertEqual(len(db.containers()), 1)
new_ids = [c.id for c in db.containers()]
self.assertEqual(old_ids, new_ids)
@patch('dockerpty.start')
def test_run_without_command(self, _):
self.command.base_dir = 'tests/fixtures/commands-composefile'
self.check_build('tests/fixtures/simple-dockerfile', tag='composetest_test')
self.command.dispatch(['run', 'implicit'], None)
service = self.project.get_service('implicit')
containers = service.containers(stopped=True, one_off=True)
self.assertEqual(
[c.human_readable_command for c in containers],
[u'/bin/sh -c echo "success"'],
)
self.command.dispatch(['run', 'explicit'], None)
service = self.project.get_service('explicit')
containers = service.containers(stopped=True, one_off=True)
self.assertEqual(
[c.human_readable_command for c in containers],
[u'/bin/true'],
)
@patch('dockerpty.start')
def test_run_service_with_entrypoint_overridden(self, _):
self.command.base_dir = 'tests/fixtures/dockerfile_with_entrypoint'
name = 'service'
self.command.dispatch(
['run', '--entrypoint', '/bin/echo', name, 'helloworld'],
None
)
service = self.project.get_service(name)
container = service.containers(stopped=True, one_off=True)[0]
self.assertEqual(
shlex.split(container.human_readable_command),
[u'/bin/echo', u'helloworld'],
)
@patch('dockerpty.start')
def test_run_service_with_user_overridden(self, _):
self.command.base_dir = 'tests/fixtures/user-composefile'
name = 'service'
user = 'sshd'
args = ['run', '--user={}'.format(user), name]
self.command.dispatch(args, None)
service = self.project.get_service(name)
container = service.containers(stopped=True, one_off=True)[0]
self.assertEqual(user, container.get('Config.User'))
@patch('dockerpty.start')
def test_run_service_with_user_overridden_short_form(self, _):
self.command.base_dir = 'tests/fixtures/user-composefile'
name = 'service'
user = 'sshd'
args = ['run', '-u', user, name]
self.command.dispatch(args, None)
service = self.project.get_service(name)
container = service.containers(stopped=True, one_off=True)[0]
self.assertEqual(user, container.get('Config.User'))
@patch('dockerpty.start')
def test_run_service_with_environement_overridden(self, _):
name = 'service'
self.command.base_dir = 'tests/fixtures/environment-composefile'
self.command.dispatch(
['run', '-e', 'foo=notbar', '-e', 'allo=moto=bobo',
'-e', 'alpha=beta', name],
None
)
service = self.project.get_service(name)
container = service.containers(stopped=True, one_off=True)[0]
# env overriden
self.assertEqual('notbar', container.environment['foo'])
# keep environement from yaml
self.assertEqual('world', container.environment['hello'])
# added option from command line
self.assertEqual('beta', container.environment['alpha'])
# make sure a value with a = don't crash out
self.assertEqual('moto=bobo', container.environment['allo'])
@patch('dockerpty.start')
def test_run_service_without_map_ports(self, __):
# create one off container
self.command.base_dir = 'tests/fixtures/ports-composefile'
self.command.dispatch(['run', '-d', 'simple'], None)
container = self.project.get_service('simple').containers(one_off=True)[0]
# get port information
port_random = container.get_local_port(3000)
port_assigned = container.get_local_port(3001)
# close all one off containers we just created
container.stop()
# check the ports
self.assertEqual(port_random, None)
self.assertEqual(port_assigned, None)
@patch('dockerpty.start')
def test_run_service_with_map_ports(self, __):
# create one off container
self.command.base_dir = 'tests/fixtures/ports-composefile'
self.command.dispatch(['run', '-d', '--service-ports', 'simple'], None)
container = self.project.get_service('simple').containers(one_off=True)[0]
# get port information
port_random = container.get_local_port(3000)
port_assigned = container.get_local_port(3001)
# close all one off containers we just created
container.stop()
# check the ports
self.assertNotEqual(port_random, None)
self.assertIn("0.0.0.0", port_random)
self.assertEqual(port_assigned, "0.0.0.0:49152")
def test_rm(self):
service = self.project.get_service('simple')
service.create_container()
service.kill()
self.assertEqual(len(service.containers(stopped=True)), 1)
self.command.dispatch(['rm', '--force'], None)
self.assertEqual(len(service.containers(stopped=True)), 0)
service = self.project.get_service('simple')
service.create_container()
service.kill()
self.assertEqual(len(service.containers(stopped=True)), 1)
self.command.dispatch(['rm', '-f'], None)
self.assertEqual(len(service.containers(stopped=True)), 0)
def test_stop(self):
self.command.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
self.assertEqual(len(service.containers()), 1)
self.assertTrue(service.containers()[0].is_running)
self.command.dispatch(['stop', '-t', '1'], None)
self.assertEqual(len(service.containers(stopped=True)), 1)
self.assertFalse(service.containers(stopped=True)[0].is_running)
def test_logs_invalid_service_name(self):
with self.assertRaises(NoSuchService):
self.command.dispatch(['logs', 'madeupname'], None)
def test_kill(self):
self.command.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
self.assertEqual(len(service.containers()), 1)
self.assertTrue(service.containers()[0].is_running)
self.command.dispatch(['kill'], None)
self.assertEqual(len(service.containers(stopped=True)), 1)
self.assertFalse(service.containers(stopped=True)[0].is_running)
def test_kill_signal_sigstop(self):
self.command.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
self.assertEqual(len(service.containers()), 1)
self.assertTrue(service.containers()[0].is_running)
self.command.dispatch(['kill', '-s', 'SIGSTOP'], None)
self.assertEqual(len(service.containers()), 1)
# The container is still running. It has only been paused
self.assertTrue(service.containers()[0].is_running)
def test_kill_stopped_service(self):
self.command.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
self.command.dispatch(['kill', '-s', 'SIGSTOP'], None)
self.assertTrue(service.containers()[0].is_running)
self.command.dispatch(['kill', '-s', 'SIGKILL'], None)
self.assertEqual(len(service.containers(stopped=True)), 1)
self.assertFalse(service.containers(stopped=True)[0].is_running)
def test_restart(self):
service = self.project.get_service('simple')
container = service.create_container()
service.start_container(container)
started_at = container.dictionary['State']['StartedAt']
self.command.dispatch(['restart', '-t', '1'], None)
container.inspect()
self.assertNotEqual(
container.dictionary['State']['FinishedAt'],
'0001-01-01T00:00:00Z',
)
self.assertNotEqual(
container.dictionary['State']['StartedAt'],
started_at,
)
def test_scale(self):
project = self.project
self.command.scale(project, {'SERVICE=NUM': ['simple=1']})
self.assertEqual(len(project.get_service('simple').containers()), 1)
self.command.scale(project, {'SERVICE=NUM': ['simple=3', 'another=2']})
self.assertEqual(len(project.get_service('simple').containers()), 3)
self.assertEqual(len(project.get_service('another').containers()), 2)
self.command.scale(project, {'SERVICE=NUM': ['simple=1', 'another=1']})
self.assertEqual(len(project.get_service('simple').containers()), 1)
self.assertEqual(len(project.get_service('another').containers()), 1)
self.command.scale(project, {'SERVICE=NUM': ['simple=1', 'another=1']})
self.assertEqual(len(project.get_service('simple').containers()), 1)
self.assertEqual(len(project.get_service('another').containers()), 1)
self.command.scale(project, {'SERVICE=NUM': ['simple=0', 'another=0']})
self.assertEqual(len(project.get_service('simple').containers()), 0)
self.assertEqual(len(project.get_service('another').containers()), 0)
def test_port(self):
self.command.base_dir = 'tests/fixtures/ports-composefile'
self.command.dispatch(['up', '-d'], None)
container = self.project.get_service('simple').get_container()
@patch('sys.stdout', new_callable=StringIO)
def get_port(number, mock_stdout):
self.command.dispatch(['port', 'simple', str(number)], None)
return mock_stdout.getvalue().rstrip()
self.assertEqual(get_port(3000), container.get_local_port(3000))
self.assertEqual(get_port(3001), "0.0.0.0:49152")
self.assertEqual(get_port(3002), "")
def test_port_with_scale(self):
self.command.base_dir = 'tests/fixtures/ports-composefile-scale'
self.command.dispatch(['scale', 'simple=2'], None)
containers = sorted(
self.project.containers(service_names=['simple']),
key=attrgetter('name'))
@patch('sys.stdout', new_callable=StringIO)
def get_port(number, mock_stdout, index=None):
if index is None:
self.command.dispatch(['port', 'simple', str(number)], None)
else:
self.command.dispatch(['port', '--index=' + str(index), 'simple', str(number)], None)
return mock_stdout.getvalue().rstrip()
self.assertEqual(get_port(3000), containers[0].get_local_port(3000))
self.assertEqual(get_port(3000, index=1), containers[0].get_local_port(3000))
self.assertEqual(get_port(3000, index=2), containers[1].get_local_port(3000))
self.assertEqual(get_port(3002), "")
def test_env_file_relative_to_compose_file(self):
config_path = os.path.abspath('tests/fixtures/env-file/docker-compose.yml')
self.command.dispatch(['-f', config_path, 'up', '-d'], None)
self._project = self.command.get_project(config_path)
containers = self.project.containers(stopped=True)
self.assertEqual(len(containers), 1)
self.assertIn("FOO=1", containers[0].get('Config.Env'))
def test_up_with_extends(self):
self.command.base_dir = 'tests/fixtures/extends'
self.command.dispatch(['up', '-d'], None)
self.assertEqual(
set([s.name for s in self.project.services]),
set(['mydb', 'myweb']),
)
# Sort by name so we get [db, web]
containers = sorted(
self.project.containers(stopped=True),
key=lambda c: c.name,
)
self.assertEqual(len(containers), 2)
web = containers[1]
self.assertEqual(set(web.links()), set(['db', 'mydb_1', 'extends_mydb_1']))
expected_env = set([
"FOO=1",
"BAR=2",
"BAZ=2",
])
self.assertTrue(expected_env <= set(web.get('Config.Env')))
|
|
""" Bunch is a subclass of dict with attribute-style access.
>>> b = Bunch()
>>> b.hello = 'world'
>>> b.hello
'world'
>>> b['hello'] += "!"
>>> b.hello
'world!'
>>> b.foo = Bunch(lol=True)
>>> b.foo.lol
True
>>> b.foo is b['foo']
True
It is safe to import * from this module:
__all__ = ('Bunch', 'bunchify','unbunchify')
un/bunchify provide dictionary conversion; Bunches can also be
converted via Bunch.to/fromDict().
Copyright (c) 2010 David Schoonover
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
__version__ = '1.0.1'
VERSION = tuple(map(int, __version__.split('.')))
__all__ = ('Bunch', 'bunchify','unbunchify',)
import sys
_IS_PYTHON_3 = sys.version_info.major >= 3
identity = lambda x : x
# u('string') replaces the forwards-incompatible u'string'
if _IS_PYTHON_3:
u = identity
else:
import codecs
def u(string):
return codecs.unicode_escape_decode(string)[0]
# dict.iteritems(), dict.iterkeys() is also incompatible
if _IS_PYTHON_3:
iteritems = dict.items
iterkeys = dict.keys
else:
iteritems = dict.iteritems
iterkeys = dict.iterkeys
class Bunch(dict):
""" A dictionary that provides attribute-style access.
>>> b = Bunch()
>>> b.hello = 'world'
>>> b.hello
'world'
>>> b['hello'] += "!"
>>> b.hello
'world!'
>>> b.foo = Bunch(lol=True)
>>> b.foo.lol
True
>>> b.foo is b['foo']
True
A Bunch is a subclass of dict; it supports all the methods a dict does...
>>> sorted(b.keys())
['foo', 'hello']
Including update()...
>>> b.update({ 'ponies': 'are pretty!' }, hello=42)
>>> print (repr(b))
Bunch(foo=Bunch(lol=True), hello=42, ponies='are pretty!')
As well as iteration...
>>> [ (k,b[k]) for k in b ]
[('ponies', 'are pretty!'), ('foo', Bunch(lol=True)), ('hello', 42)]
And "splats".
>>> "The {knights} who say {ni}!".format(**Bunch(knights='lolcats', ni='can haz'))
'The lolcats who say can haz!'
See unbunchify/Bunch.toDict, bunchify/Bunch.fromDict for notes about conversion.
"""
def __contains__(self, k):
""" >>> b = Bunch(ponies='are pretty!')
>>> 'ponies' in b
True
>>> 'foo' in b
False
>>> b['foo'] = 42
>>> 'foo' in b
True
>>> b.hello = 'hai'
>>> 'hello' in b
True
>>> b[None] = 123
>>> None in b
True
>>> b[False] = 456
>>> False in b
True
"""
try:
return dict.__contains__(self, k) or hasattr(self, k)
except:
return False
# only called if k not found in normal places
def __getattr__(self, k):
""" Gets key if it exists, otherwise throws AttributeError.
nb. __getattr__ is only called if key is not found in normal places.
>>> b = Bunch(bar='baz', lol={})
>>> b.foo
Traceback (most recent call last):
...
AttributeError: foo
>>> b.bar
'baz'
>>> getattr(b, 'bar')
'baz'
>>> b['bar']
'baz'
>>> b.lol is b['lol']
True
>>> b.lol is getattr(b, 'lol')
True
"""
try:
# Throws exception if not in prototype chain
return object.__getattribute__(self, k)
except AttributeError:
try:
return self[k]
except KeyError:
raise AttributeError(k)
def __setattr__(self, k, v):
""" Sets attribute k if it exists, otherwise sets key k. A KeyError
raised by set-item (only likely if you subclass Bunch) will
propagate as an AttributeError instead.
>>> b = Bunch(foo='bar', this_is='useful when subclassing')
>>> b.values #doctest: +ELLIPSIS
<built-in method values of Bunch object at 0x...>
>>> b.values = 'uh oh'
>>> b.values
'uh oh'
>>> b['values']
Traceback (most recent call last):
...
KeyError: 'values'
"""
try:
# Throws exception if not in prototype chain
object.__getattribute__(self, k)
except AttributeError:
try:
self[k] = v
except:
raise AttributeError(k)
else:
object.__setattr__(self, k, v)
def __delattr__(self, k):
""" Deletes attribute k if it exists, otherwise deletes key k. A KeyError
raised by deleting the key--such as when the key is missing--will
propagate as an AttributeError instead.
>>> b = Bunch(lol=42)
>>> del b.values
Traceback (most recent call last):
...
AttributeError: 'Bunch' object attribute 'values' is read-only
>>> del b.lol
>>> b.lol
Traceback (most recent call last):
...
AttributeError: lol
"""
try:
# Throws exception if not in prototype chain
object.__getattribute__(self, k)
except AttributeError:
try:
del self[k]
except KeyError:
raise AttributeError(k)
else:
object.__delattr__(self, k)
def toDict(self):
""" Recursively converts a bunch back into a dictionary.
>>> b = Bunch(foo=Bunch(lol=True), hello=42, ponies='are pretty!')
>>> b.toDict()
{'ponies': 'are pretty!', 'foo': {'lol': True}, 'hello': 42}
See unbunchify for more info.
"""
return unbunchify(self)
def __repr__(self):
""" Invertible* string-form of a Bunch.
>>> b = Bunch(foo=Bunch(lol=True), hello=42, ponies='are pretty!')
>>> print (repr(b))
Bunch(foo=Bunch(lol=True), hello=42, ponies='are pretty!')
>>> eval(repr(b))
Bunch(foo=Bunch(lol=True), hello=42, ponies='are pretty!')
(*) Invertible so long as collection contents are each repr-invertible.
"""
keys = list(iterkeys(self))
keys.sort()
args = ', '.join(['%s=%r' % (key, self[key]) for key in keys])
return '%s(%s)' % (self.__class__.__name__, args)
@staticmethod
def fromDict(d):
""" Recursively transforms a dictionary into a Bunch via copy.
>>> b = Bunch.fromDict({'urmom': {'sez': {'what': 'what'}}})
>>> b.urmom.sez.what
'what'
See bunchify for more info.
"""
return bunchify(d)
# While we could convert abstract types like Mapping or Iterable, I think
# bunchify is more likely to "do what you mean" if it is conservative about
# casting (ex: isinstance(str,Iterable) == True ).
#
# Should you disagree, it is not difficult to duplicate this function with
# more aggressive coercion to suit your own purposes.
def bunchify(x):
""" Recursively transforms a dictionary into a Bunch via copy.
>>> b = bunchify({'urmom': {'sez': {'what': 'what'}}})
>>> b.urmom.sez.what
'what'
bunchify can handle intermediary dicts, lists and tuples (as well as
their subclasses), but ymmv on custom datatypes.
>>> b = bunchify({ 'lol': ('cats', {'hah':'i win again'}),
... 'hello': [{'french':'salut', 'german':'hallo'}] })
>>> b.hello[0].french
'salut'
>>> b.lol[1].hah
'i win again'
nb. As dicts are not hashable, they cannot be nested in sets/frozensets.
"""
if isinstance(x, dict):
return Bunch( (k, bunchify(v)) for k,v in iteritems(x) )
elif isinstance(x, (list, tuple)):
return type(x)( bunchify(v) for v in x )
else:
return x
def unbunchify(x):
""" Recursively converts a Bunch into a dictionary.
>>> b = Bunch(foo=Bunch(lol=True), hello=42, ponies='are pretty!')
>>> unbunchify(b)
{'ponies': 'are pretty!', 'foo': {'lol': True}, 'hello': 42}
unbunchify will handle intermediary dicts, lists and tuples (as well as
their subclasses), but ymmv on custom datatypes.
>>> b = Bunch(foo=['bar', Bunch(lol=True)], hello=42,
... ponies=('are pretty!', Bunch(lies='are trouble!')))
>>> unbunchify(b) #doctest: +NORMALIZE_WHITESPACE
{'ponies': ('are pretty!', {'lies': 'are trouble!'}),
'foo': ['bar', {'lol': True}], 'hello': 42}
nb. As dicts are not hashable, they cannot be nested in sets/frozensets.
"""
if isinstance(x, dict):
return dict( (k, unbunchify(v)) for k,v in iteritems(x) )
elif isinstance(x, (list, tuple)):
return type(x)( unbunchify(v) for v in x )
else:
return x
### Serialization
try:
try:
import json
except ImportError:
import simplejson as json
def toJSON(self, **options):
""" Serializes this Bunch to JSON. Accepts the same keyword options as `json.dumps()`.
>>> b = Bunch(foo=Bunch(lol=True), hello=42, ponies='are pretty!')
>>> json.dumps(b)
'{"ponies": "are pretty!", "foo": {"lol": true}, "hello": 42}'
>>> b.toJSON()
'{"ponies": "are pretty!", "foo": {"lol": true}, "hello": 42}'
"""
return json.dumps(self, **options)
Bunch.toJSON = toJSON
except ImportError:
pass
try:
# Attempt to register ourself with PyYAML as a representer
import yaml
from yaml.representer import Representer, SafeRepresenter
def from_yaml(loader, node):
""" PyYAML support for Bunches using the tag `!bunch` and `!bunch.Bunch`.
>>> import yaml
>>> yaml.load('''
... Flow style: !bunch.Bunch { Clark: Evans, Brian: Ingerson, Oren: Ben-Kiki }
... Block style: !bunch
... Clark : Evans
... Brian : Ingerson
... Oren : Ben-Kiki
... ''') #doctest: +NORMALIZE_WHITESPACE
{'Flow style': Bunch(Brian='Ingerson', Clark='Evans', Oren='Ben-Kiki'),
'Block style': Bunch(Brian='Ingerson', Clark='Evans', Oren='Ben-Kiki')}
This module registers itself automatically to cover both Bunch and any
subclasses. Should you want to customize the representation of a subclass,
simply register it with PyYAML yourself.
"""
data = Bunch()
yield data
value = loader.construct_mapping(node)
data.update(value)
def to_yaml_safe(dumper, data):
""" Converts Bunch to a normal mapping node, making it appear as a
dict in the YAML output.
>>> b = Bunch(foo=['bar', Bunch(lol=True)], hello=42)
>>> import yaml
>>> yaml.safe_dump(b, default_flow_style=True)
'{foo: [bar, {lol: true}], hello: 42}\\n'
"""
return dumper.represent_dict(data)
def to_yaml(dumper, data):
""" Converts Bunch to a representation node.
>>> b = Bunch(foo=['bar', Bunch(lol=True)], hello=42)
>>> import yaml
>>> yaml.dump(b, default_flow_style=True)
'!bunch.Bunch {foo: [bar, !bunch.Bunch {lol: true}], hello: 42}\\n'
"""
return dumper.represent_mapping(u('!bunch.Bunch'), data)
yaml.add_constructor(u('!bunch'), from_yaml)
yaml.add_constructor(u('!bunch.Bunch'), from_yaml)
SafeRepresenter.add_representer(Bunch, to_yaml_safe)
SafeRepresenter.add_multi_representer(Bunch, to_yaml_safe)
Representer.add_representer(Bunch, to_yaml)
Representer.add_multi_representer(Bunch, to_yaml)
# Instance methods for YAML conversion
def toYAML(self, **options):
""" Serializes this Bunch to YAML, using `yaml.safe_dump()` if
no `Dumper` is provided. See the PyYAML documentation for more info.
>>> b = Bunch(foo=['bar', Bunch(lol=True)], hello=42)
>>> import yaml
>>> yaml.safe_dump(b, default_flow_style=True)
'{foo: [bar, {lol: true}], hello: 42}\\n'
>>> b.toYAML(default_flow_style=True)
'{foo: [bar, {lol: true}], hello: 42}\\n'
>>> yaml.dump(b, default_flow_style=True)
'!bunch.Bunch {foo: [bar, !bunch.Bunch {lol: true}], hello: 42}\\n'
>>> b.toYAML(Dumper=yaml.Dumper, default_flow_style=True)
'!bunch.Bunch {foo: [bar, !bunch.Bunch {lol: true}], hello: 42}\\n'
"""
opts = dict(indent=4, default_flow_style=False)
opts.update(options)
if 'Dumper' not in opts:
return yaml.safe_dump(self, **opts)
else:
return yaml.dump(self, **opts)
def fromYAML(*args, **kwargs):
return bunchify( yaml.load(*args, **kwargs) )
Bunch.toYAML = toYAML
Bunch.fromYAML = staticmethod(fromYAML)
except ImportError:
pass
|
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 OpenStack Foundation
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf import settings
from django.contrib.auth.models import User # noqa
from django.core.exceptions import ImproperlyConfigured # noqa
from django.core import urlresolvers
from django.utils.importlib import import_module # noqa
import horizon
from horizon import base
from horizon import conf
from horizon.test import helpers as test
from horizon.test.test_dashboards.cats.dashboard import Cats # noqa
from horizon.test.test_dashboards.cats.kittens.panel import Kittens # noqa
from horizon.test.test_dashboards.cats.tigers.panel import Tigers # noqa
from horizon.test.test_dashboards.dogs.dashboard import Dogs # noqa
from horizon.test.test_dashboards.dogs.puppies.panel import Puppies # noqa
class MyDash(horizon.Dashboard):
name = "My Dashboard"
slug = "mydash"
default_panel = "myslug"
class MyPanel(horizon.Panel):
name = "My Panel"
slug = "myslug"
urls = 'horizon.test.test_dashboards.cats.kittens.urls'
class AdminPanel(horizon.Panel):
name = "Admin Panel"
slug = "admin_panel"
permissions = ("horizon.test",)
urls = 'horizon.test.test_dashboards.cats.kittens.urls'
class RbacNoAccessPanel(horizon.Panel):
name = "RBAC Panel No"
slug = "rbac_panel_no"
def _can_access(self, request):
return False
class RbacYesAccessPanel(horizon.Panel):
name = "RBAC Panel Yes"
slug = "rbac_panel_yes"
class BaseHorizonTests(test.TestCase):
def setUp(self):
super(BaseHorizonTests, self).setUp()
# Adjust our horizon config and register our custom dashboards/panels.
self.old_default_dash = settings.HORIZON_CONFIG['default_dashboard']
settings.HORIZON_CONFIG['default_dashboard'] = 'cats'
self.old_dashboards = settings.HORIZON_CONFIG['dashboards']
settings.HORIZON_CONFIG['dashboards'] = ('cats', 'dogs')
base.Horizon.register(Cats)
base.Horizon.register(Dogs)
Cats.register(Kittens)
Cats.register(Tigers)
Dogs.register(Puppies)
# Trigger discovery, registration, and URLconf generation if it
# hasn't happened yet.
base.Horizon._urls()
# Store our original dashboards
self._discovered_dashboards = base.Horizon._registry.keys()
# Gather up and store our original panels for each dashboard
self._discovered_panels = {}
for dash in self._discovered_dashboards:
panels = base.Horizon._registry[dash]._registry.keys()
self._discovered_panels[dash] = panels
def tearDown(self):
super(BaseHorizonTests, self).tearDown()
# Restore our settings
settings.HORIZON_CONFIG['default_dashboard'] = self.old_default_dash
settings.HORIZON_CONFIG['dashboards'] = self.old_dashboards
# Destroy our singleton and re-create it.
base.HorizonSite._instance = None
del base.Horizon
base.Horizon = base.HorizonSite()
# Reload the convenience references to Horizon stored in __init__
reload(import_module("horizon"))
# Re-register our original dashboards and panels.
# This is necessary because autodiscovery only works on the first
# import, and calling reload introduces innumerable additional
# problems. Manual re-registration is the only good way for testing.
self._discovered_dashboards.remove(Cats)
self._discovered_dashboards.remove(Dogs)
for dash in self._discovered_dashboards:
base.Horizon.register(dash)
for panel in self._discovered_panels[dash]:
dash.register(panel)
def _reload_urls(self):
"""Clears out the URL caches, reloads the root urls module, and
re-triggers the autodiscovery mechanism for Horizon. Allows URLs
to be re-calculated after registering new dashboards. Useful
only for testing and should never be used on a live site.
"""
urlresolvers.clear_url_caches()
reload(import_module(settings.ROOT_URLCONF))
base.Horizon._urls()
class HorizonTests(BaseHorizonTests):
def test_registry(self):
"""Verify registration and autodiscovery work correctly.
Please note that this implicitly tests that autodiscovery works
by virtue of the fact that the dashboards listed in
``settings.INSTALLED_APPS`` are loaded from the start.
"""
# Registration
self.assertEqual(len(base.Horizon._registry), 2)
horizon.register(MyDash)
self.assertEqual(len(base.Horizon._registry), 3)
with self.assertRaises(ValueError):
horizon.register(MyPanel)
with self.assertRaises(ValueError):
horizon.register("MyPanel")
# Retrieval
my_dash_instance_by_name = horizon.get_dashboard("mydash")
self.assertIsInstance(my_dash_instance_by_name, MyDash)
my_dash_instance_by_class = horizon.get_dashboard(MyDash)
self.assertEqual(my_dash_instance_by_name, my_dash_instance_by_class)
with self.assertRaises(base.NotRegistered):
horizon.get_dashboard("fake")
self.assertQuerysetEqual(horizon.get_dashboards(),
['<Dashboard: cats>',
'<Dashboard: dogs>',
'<Dashboard: mydash>'])
# Removal
self.assertEqual(len(base.Horizon._registry), 3)
horizon.unregister(MyDash)
self.assertEqual(len(base.Horizon._registry), 2)
with self.assertRaises(base.NotRegistered):
horizon.get_dashboard(MyDash)
def test_site(self):
self.assertEqual(unicode(base.Horizon), "Horizon")
self.assertEqual(repr(base.Horizon), "<Site: horizon>")
dash = base.Horizon.get_dashboard('cats')
self.assertEqual(base.Horizon.get_default_dashboard(), dash)
test_user = User()
self.assertEqual(base.Horizon.get_user_home(test_user),
dash.get_absolute_url())
def test_dashboard(self):
cats = horizon.get_dashboard("cats")
self.assertEqual(cats._registered_with, base.Horizon)
self.assertQuerysetEqual(cats.get_panels(),
['<Panel: kittens>',
'<Panel: tigers>'])
self.assertEqual(cats.get_absolute_url(), "/cats/")
self.assertEqual(cats.name, "Cats")
# Test registering a module with a dashboard that defines panels
# as a panel group.
cats.register(MyPanel)
self.assertQuerysetEqual(cats.get_panel_groups()['other'],
['<Panel: myslug>'])
# Test that panels defined as a tuple still return a PanelGroup
dogs = horizon.get_dashboard("dogs")
self.assertQuerysetEqual(dogs.get_panel_groups().values(),
['<PanelGroup: default>'])
# Test registering a module with a dashboard that defines panels
# as a tuple.
dogs = horizon.get_dashboard("dogs")
dogs.register(MyPanel)
self.assertQuerysetEqual(dogs.get_panels(),
['<Panel: puppies>',
'<Panel: myslug>'])
def test_panels(self):
cats = horizon.get_dashboard("cats")
tigers = cats.get_panel("tigers")
self.assertEqual(tigers._registered_with, cats)
self.assertEqual(tigers.get_absolute_url(), "/cats/tigers/")
def test_panel_without_slug_fails(self):
class InvalidPanel(horizon.Panel):
name = 'Invalid'
self.assertRaises(ImproperlyConfigured, InvalidPanel)
def test_registry_without_registerable_class_attr_fails(self):
class InvalidRegistry(base.Registry):
pass
self.assertRaises(ImproperlyConfigured, InvalidRegistry)
def test_index_url_name(self):
cats = horizon.get_dashboard("cats")
tigers = cats.get_panel("tigers")
tigers.index_url_name = "does_not_exist"
with self.assertRaises(urlresolvers.NoReverseMatch):
tigers.get_absolute_url()
tigers.index_url_name = "index"
self.assertEqual(tigers.get_absolute_url(), "/cats/tigers/")
def test_lazy_urls(self):
urlpatterns = horizon.urls[0]
self.assertIsInstance(urlpatterns, base.LazyURLPattern)
# The following two methods simply should not raise any exceptions
iter(urlpatterns)
reversed(urlpatterns)
def test_horizon_test_isolation_1(self):
"""Isolation Test Part 1: sets a value."""
cats = horizon.get_dashboard("cats")
cats.evil = True
def test_horizon_test_isolation_2(self):
"""Isolation Test Part 2: The value set in part 1 should be gone."""
cats = horizon.get_dashboard("cats")
self.assertFalse(hasattr(cats, "evil"))
def test_public(self):
dogs = horizon.get_dashboard("dogs")
# Known to have no restrictions on it other than being logged in.
puppies = dogs.get_panel("puppies")
url = puppies.get_absolute_url()
# Get a clean, logged out client instance.
self.client.logout()
resp = self.client.get(url)
redirect_url = "?".join(['http://testserver' + settings.LOGIN_URL,
"next=%s" % url])
self.assertRedirects(resp, redirect_url)
# Simulate ajax call
resp = self.client.get(url, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
# Response should be HTTP 401 with redirect header
self.assertEqual(resp.status_code, 401)
self.assertEqual(resp["X-Horizon-Location"],
redirect_url)
def test_required_permissions(self):
dash = horizon.get_dashboard("cats")
panel = dash.get_panel('tigers')
# Non-admin user
self.assertQuerysetEqual(self.user.get_all_permissions(), [])
resp = self.client.get(panel.get_absolute_url())
self.assertEqual(resp.status_code, 302)
resp = self.client.get(panel.get_absolute_url(),
follow=False,
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(resp.status_code, 401)
# Test insufficient permissions for logged-in user
resp = self.client.get(panel.get_absolute_url(), follow=True)
self.assertEqual(resp.status_code, 200)
self.assertTemplateUsed(resp, "auth/login.html")
self.assertContains(resp, "Login as different user", 1, 200)
# Set roles for admin user
self.set_permissions(permissions=['test'])
resp = self.client.get(panel.get_absolute_url())
self.assertEqual(resp.status_code, 200)
# Test modal form
resp = self.client.get(panel.get_absolute_url(),
follow=False,
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(resp.status_code, 200)
def test_ssl_redirect_by_proxy(self):
dogs = horizon.get_dashboard("dogs")
puppies = dogs.get_panel("puppies")
url = puppies.get_absolute_url()
redirect_url = "?".join([settings.LOGIN_URL,
"next=%s" % url])
self.client.logout()
resp = self.client.get(url)
self.assertRedirects(resp, redirect_url)
# Set SSL settings for test server
settings.SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL',
'https')
resp = self.client.get(url, HTTP_X_FORWARDED_PROTOCOL="https")
self.assertEqual(resp.status_code, 302)
self.assertEqual(resp['location'],
'https://testserver:80%s' % redirect_url)
# Restore settings
settings.SECURE_PROXY_SSL_HEADER = None
class GetUserHomeTests(BaseHorizonTests):
"""Test get_user_home parameters."""
def setUp(self):
self.orig_user_home = settings.HORIZON_CONFIG['user_home']
super(BaseHorizonTests, self).setUp()
self.original_username = "testname"
self.test_user = User()
self.test_user.username = self.original_username
def tearDown(self):
settings.HORIZON_CONFIG['user_home'] = self.orig_user_home
conf.HORIZON_CONFIG._setup()
def test_using_callable(self):
def fancy_user_fnc(user):
return user.username.upper()
settings.HORIZON_CONFIG['user_home'] = fancy_user_fnc
conf.HORIZON_CONFIG._setup()
self.assertEqual(self.test_user.username.upper(),
base.Horizon.get_user_home(self.test_user))
def test_using_module_function(self):
module_func = 'django.utils.encoding.force_text'
settings.HORIZON_CONFIG['user_home'] = module_func
conf.HORIZON_CONFIG._setup()
self.test_user.username = 'testname'
self.assertEqual(self.original_username,
base.Horizon.get_user_home(self.test_user))
def test_using_url(self):
fixed_url = "/url"
settings.HORIZON_CONFIG['user_home'] = fixed_url
conf.HORIZON_CONFIG._setup()
self.assertEqual(fixed_url,
base.Horizon.get_user_home(self.test_user))
class CustomPanelTests(BaseHorizonTests):
"""Test customization of dashboards and panels
using 'customization_module' to HORIZON_CONFIG.
"""
def setUp(self):
settings.HORIZON_CONFIG['customization_module'] = \
'horizon.test.customization.cust_test1'
# refresh config
conf.HORIZON_CONFIG._setup()
super(CustomPanelTests, self).setUp()
def tearDown(self):
# Restore dash
cats = horizon.get_dashboard("cats")
cats.name = "Cats"
horizon.register(Dogs)
self._discovered_dashboards.append(Dogs)
Dogs.register(Puppies)
Cats.register(Tigers)
super(CustomPanelTests, self).tearDown()
settings.HORIZON_CONFIG.pop('customization_module')
# refresh config
conf.HORIZON_CONFIG._setup()
def test_customize_dashboard(self):
cats = horizon.get_dashboard("cats")
self.assertEqual(cats.name, "WildCats")
self.assertQuerysetEqual(cats.get_panels(),
['<Panel: kittens>'])
with self.assertRaises(base.NotRegistered):
horizon.get_dashboard("dogs")
class CustomPermissionsTests(BaseHorizonTests):
"""Test customization of permissions on panels
using 'customization_module' to HORIZON_CONFIG.
"""
def setUp(self):
settings.HORIZON_CONFIG['customization_module'] = \
'horizon.test.customization.cust_test2'
# refresh config
conf.HORIZON_CONFIG._setup()
super(CustomPermissionsTests, self).setUp()
def tearDown(self):
# Restore permissions
dogs = horizon.get_dashboard("dogs")
puppies = dogs.get_panel("puppies")
puppies.permissions = tuple([])
super(CustomPermissionsTests, self).tearDown()
settings.HORIZON_CONFIG.pop('customization_module')
# refresh config
conf.HORIZON_CONFIG._setup()
def test_customized_permissions(self):
dogs = horizon.get_dashboard("dogs")
panel = dogs.get_panel('puppies')
# Non-admin user
self.assertQuerysetEqual(self.user.get_all_permissions(), [])
resp = self.client.get(panel.get_absolute_url())
self.assertEqual(resp.status_code, 302)
resp = self.client.get(panel.get_absolute_url(),
follow=False,
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(resp.status_code, 401)
# Test customized permissions for logged-in user
resp = self.client.get(panel.get_absolute_url(), follow=True)
self.assertEqual(resp.status_code, 200)
self.assertTemplateUsed(resp, "auth/login.html")
self.assertContains(resp, "Login as different user", 1, 200)
# Set roles for admin user
self.set_permissions(permissions=['test'])
resp = self.client.get(panel.get_absolute_url())
self.assertEqual(resp.status_code, 200)
# Test modal form
resp = self.client.get(panel.get_absolute_url(),
follow=False,
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(resp.status_code, 200)
class RbacHorizonTests(test.TestCase):
def setUp(self):
super(RbacHorizonTests, self).setUp()
# Adjust our horizon config and register our custom dashboards/panels.
self.old_default_dash = settings.HORIZON_CONFIG['default_dashboard']
settings.HORIZON_CONFIG['default_dashboard'] = 'cats'
self.old_dashboards = settings.HORIZON_CONFIG['dashboards']
settings.HORIZON_CONFIG['dashboards'] = ('cats', 'dogs')
base.Horizon.register(Cats)
base.Horizon.register(Dogs)
Cats.register(RbacNoAccessPanel)
Cats.default_panel = 'rbac_panel_no'
Dogs.register(RbacYesAccessPanel)
Dogs.default_panel = 'rbac_panel_yes'
# Trigger discovery, registration, and URLconf generation if it
# hasn't happened yet.
base.Horizon._urls()
# Store our original dashboards
self._discovered_dashboards = base.Horizon._registry.keys()
# Gather up and store our original panels for each dashboard
self._discovered_panels = {}
for dash in self._discovered_dashboards:
panels = base.Horizon._registry[dash]._registry.keys()
self._discovered_panels[dash] = panels
def tearDown(self):
super(RbacHorizonTests, self).tearDown()
# Restore our settings
settings.HORIZON_CONFIG['default_dashboard'] = self.old_default_dash
settings.HORIZON_CONFIG['dashboards'] = self.old_dashboards
# Destroy our singleton and re-create it.
base.HorizonSite._instance = None
del base.Horizon
base.Horizon = base.HorizonSite()
# Reload the convenience references to Horizon stored in __init__
reload(import_module("horizon"))
# Re-register our original dashboards and panels.
# This is necessary because autodiscovery only works on the first
# import, and calling reload introduces innumerable additional
# problems. Manual re-registration is the only good way for testing.
self._discovered_dashboards.remove(Cats)
self._discovered_dashboards.remove(Dogs)
for dash in self._discovered_dashboards:
base.Horizon.register(dash)
for panel in self._discovered_panels[dash]:
dash.register(panel)
def test_rbac_panels(self):
context = {'request': None}
cats = horizon.get_dashboard("cats")
self.assertEqual(cats._registered_with, base.Horizon)
self.assertQuerysetEqual(cats.get_panels(),
['<Panel: rbac_panel_no>'])
self.assertFalse(cats.can_access(context))
dogs = horizon.get_dashboard("dogs")
self.assertEqual(dogs._registered_with, base.Horizon)
self.assertQuerysetEqual(dogs.get_panels(),
['<Panel: rbac_panel_yes>'])
self.assertTrue(dogs.can_access(context))
|
|
import inspect
from django.utils.html import strip_tags
from django.utils.encoding import force_unicode
from fields import model_fields
from fields import model_meta_fields
def process_docstring(app, what, name, obj, options, lines):
# This causes import errors if left outside the function
from django.db import models
from django import forms
# Only look at objects that inherit from Django's base model class
if inspect.isclass(obj) and issubclass(obj, models.Model):
# Grab the field list from the meta class
fields = obj._meta.fields
lines.append(u'')
for field in fields:
# Do not document AutoFields
if type(field).__name__ == 'AutoField' and field.primary_key:
continue
k = type(field).__name__
# Decode and strip any html out of the field's help text
help_text = strip_tags(force_unicode(field.help_text))
# Decode and capitalize the verbose name, for use if there isn't
# any help text
verbose_name = force_unicode(field.verbose_name).capitalize()
lines.append(u'.. attribute:: %s' % field.name)
lines.append(u' ')
# Add the field's type to the docstring
if isinstance(field, models.ForeignKey):
to = field.rel.to
l = u' %s(\':class:`~%s.%s`\')' % (type(field).__name__,
to.__module__,
to.__name__)
elif isinstance(field, models.OneToOneField):
to = field.rel.to
l = u' %s(\':class:`~%s.%s`\')' % (type(field).__name__,
to.__module__,
to.__name__)
else:
l = u' %s' % type(field).__name__
if not field.blank:
l = l + ' (Required)'
if hasattr(field, 'auto_now') and field.auto_now:
l = l + ' (Automatically set when updated)'
if hasattr(field, 'auto_now_add') and field.auto_now_add:
l = l + ' (Automatically set when created)'
lines.append(l)
if help_text:
lines.append(u'')
# Add the model field to the end of the docstring as a param
# using the help text as the description
lines.append(u' %s' % help_text)
lines.append(u' ')
f = model_fields[type(field).__name__]
for key in sorted(f.iterkeys()):
if hasattr(field, key) and getattr(field, key) != f[key] and getattr(field, key):
attr = getattr(field, key)
if key == 'error_messages':
error_dict = {}
for i in sorted(attr.iterkeys()):
error_dict[i] = force_unicode(attr[i])
attr = error_dict
if key == 'validators':
v = []
for i in sorted(attr):
n = ':class:`~%s.%s`' % (type(i).__module__,
type(i).__name__)
v.append(n)
attr = v
lines.append(u' :param %s: %s' % (key, attr))
lines.append(u'')
lines.append(u'.. attribute:: Meta')
lines.append(u'')
for key in sorted(model_meta_fields.iterkeys()):
if hasattr(obj._meta, key) and getattr(obj._meta, key) != model_meta_fields[key]:
lines.append(u' %s = %s' % (key, getattr(obj._meta, key)))
lines.append(u'')
# Only look at objects that inherit from Django's base model class
if inspect.isclass(obj):
if issubclass(obj, forms.Form) or issubclass(obj, forms.ModelForm):
# Grab the field list from the meta class
fields = obj.base_fields
lines.append(u'')
for field in fields:
f = obj.base_fields[field]
# Decode and strip any html out of the field's help text
if hasattr(f, 'help_text'):
help_text = strip_tags(force_unicode(f.help_text))
# Decode and capitalize the verbose name, for use if there isn't
# any help text
label = force_unicode(f.label).capitalize()
lines.append(u'.. attribute:: %s' % field)
lines.append(u'')
# Add the field's type to the docstring
field_inst = obj.base_fields[field]
l = u' :class:`~%s.%s`' % (type(field_inst).__module__,
type(field_inst).__name__)
if field_inst.required:
l = l + ' (Required)'
lines.append(l)
lines.append(u'')
if hasattr(f, 'error_messages') and f.error_messages:
msgs = {}
for key, value in f.error_messages.items():
msgs[key] = force_unicode(value)
lines.append(u':kwarg error_messages: %s' % msgs)
if f.help_text:
# Add the model field to the end of the docstring as a param
# using the help text as the description
lines.append(u':kwarg help_text: %s' % help_text)
if hasattr(f, 'initial') and f.initial:
lines.append(u':kwarg initial: %s' % f.initial)
if hasattr(f, 'localize'):
lines.append(u':kwarg localize: %s' % f.localize)
if hasattr(f, 'validators') and f.validators:
l = []
for v in f.validators:
l.append(':class:`~%s.%s`' % (type(v).__module__,
type(v).__name__))
lines.append(u':kwarg validators: %s' % l)
lines.append(u':kwarg widget: %s' % type(f.widget).__name__)
lines.append(u'')
# Return the extended docstring
return lines
def setup(app):
# Register the docstring processor with sphinx
app.connect('autodoc-process-docstring', process_docstring)
app.add_crossref_type(
directivename = "admin",
rolename = "admin",
indextemplate = "pair: %s; admin",
)
app.add_crossref_type(
directivename = "command",
rolename = "command",
indextemplate = "pair: %s; command",
)
app.add_crossref_type(
directivename = "context_processors",
rolename = "context_processors",
indextemplate = "pair: %s; context_processors",
)
app.add_crossref_type(
directivename = "form",
rolename = "form",
indextemplate = "pair: %s; form",
)
app.add_crossref_type(
directivename = "formfield",
rolename = "formfield",
indextemplate = "pair: %s; formfield",
)
app.add_crossref_type(
directivename = "manager",
rolename = "manager",
indextemplate = "pair: %s; manager",
)
app.add_crossref_type(
directivename = "middleware",
rolename = "middleware",
indextemplate = "pair: %s; middleware",
)
app.add_crossref_type(
directivename = "model",
rolename = "model",
indextemplate = "pair: %s; model",
)
app.add_crossref_type(
directivename = "setting",
rolename = "setting",
indextemplate = "pair: %s; setting",
)
app.add_crossref_type(
directivename = "settings",
rolename = "settings",
indextemplate = "pair: %s; settings",
)
app.add_crossref_type(
directivename = "signal",
rolename = "signal",
indextemplate = "pair: %s; signal",
)
app.add_crossref_type(
directivename = "token",
rolename = "token",
indextemplate = "pair: %s; token",
)
app.add_crossref_type(
directivename = "validator",
rolename = "validator",
indextemplate = "pair: %s; validator",
)
app.add_crossref_type(
directivename = "view",
rolename = "view",
indextemplate = "pair: %s; view",
)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.math_ops.matmul."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import operator
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test as test_lib
def _AddTest(test, op_name, testcase_name, fn):
test_name = "_".join(["test", op_name, testcase_name])
if hasattr(test, test_name):
raise RuntimeError("Test %s defined more than once" % test_name)
setattr(test, test_name, fn)
def _GetTransposedMatrices(x, x_name, kwargs):
if kwargs["transpose_" + x_name] is True:
return x.T
elif kwargs["adjoint_" + x_name] is True:
return np.conj(x.T)
else:
return x
class MatMulTest(test_lib.TestCase):
pass # Filled in below
def _GetMatMulTest(a_np_, b_np_, use_static_shape_, **kwargs_):
def Test(self):
np_val = np.matrix(a_np_) * np.matrix(b_np_)
use_gpu = True
if a_np_.dtype is np.float16 and (
not test_util.CudaSupportsHalfMatMulAndConv()):
use_gpu = False
print("Built without fp16 matmul support for Cuda, running test on CPU.")
# Transpose and possibly conjugate a_np_ and b_np_ according to the
# attributes such that tf.matmul(effective_a_np, effective_b_np, **kwargs)
# results in a valid matrix multiplication and produces the same result as
# np.matrix(a_np_) * np.matrix(b_np_)
effective_a_np = _GetTransposedMatrices(a_np_, "a", kwargs_)
effective_b_np = _GetTransposedMatrices(b_np_, "b", kwargs_)
with self.test_session(use_gpu=use_gpu) as sess:
if use_static_shape_:
a = constant_op.constant(effective_a_np)
b = constant_op.constant(effective_b_np)
res = math_ops.matmul(a, b, **kwargs_)
tf_val = res.eval()
else:
a = array_ops.placeholder(a_np_.dtype)
b = array_ops.placeholder(b_np_.dtype)
res = math_ops.matmul(a, b, **kwargs_)
tf_val = sess.run(res, feed_dict={a: effective_a_np, b: effective_b_np})
self.assertAllCloseAccordingToType(
tf_val,
np_val,
float_rtol=2e-5,
float_atol=2e-5,
half_rtol=0.2,
half_atol=0.2)
return Test
class MatMulGradientTest(test_lib.TestCase):
pass # Will be filled in below.
def _GetMatMulGradientTest(a_np_, b_np_, use_static_shape_, **kwargs_):
def Test(self):
if not use_static_shape_ or a_np_.dtype in (np.int32, np.float16):
self.skipTest("Skipping infeasible gradient test.")
# Transpose and possibly conjugate a_np_ and b_np_ according to the
# attributes such that tf.matmul(effective_a_np, effective_b_np, **kwargs)
# results in a valid matrix multiplication and produces the same result as
# np.matrix(a_np_) * np.matrix(b_np_)
effective_a_np = _GetTransposedMatrices(a_np_, "a", kwargs_)
effective_b_np = _GetTransposedMatrices(b_np_, "b", kwargs_)
epsilon = np.finfo(a_np_.dtype).eps
delta = epsilon**(1.0 / 3.0)
tol = 20 * delta
with self.test_session(use_gpu=True):
a = constant_op.constant(effective_a_np)
b = constant_op.constant(effective_b_np)
res = math_ops.matmul(a, b, **kwargs_)
for x, x_init in [a, effective_a_np], [b, effective_b_np]:
theoretical, numerical = gradient_checker.compute_gradient(
x,
x_init.shape,
res, [a_np_.shape[0], b_np_.shape[1]],
x_init_value=x_init,
delta=delta)
self.assertAllClose(theoretical, numerical, rtol=tol, atol=tol)
return Test
class MatMulStatsTest(test_lib.TestCase):
def testSimpleStatistics(self):
g = ops.Graph()
with g.as_default():
a = variables.Variable(random_ops.random_normal([25, 16]))
b = variables.Variable(random_ops.random_normal([16, 9]))
math_ops.matmul(a, b)
for op in g.get_operations():
flops = ops.get_stats_for_node_def(g, op.node_def, "flops").value
if op.name == "MatMul":
self.assertEqual(7200, flops)
def testTransposedStatistics(self):
g = ops.Graph()
with g.as_default():
a = variables.Variable(random_ops.random_normal([16, 25]))
b = variables.Variable(random_ops.random_normal([16, 9]))
math_ops.matmul(a, b, transpose_a=True)
for op in g.get_operations():
flops = ops.get_stats_for_node_def(g, op.node_def, "flops").value
if op.name == "MatMul":
self.assertEqual(7200, flops)
try:
# @ operator supported since python 3.5.
infix_matmul = operator.matmul
except AttributeError:
# For earlier versions of python, emulate regular behavior.
# Useful to build and test for 3.5+ on earlier versions.
def infix_matmul(x, y): # pylint: disable=invalid-name
try:
r = type(x).__matmul__(x, y)
except AttributeError:
r = NotImplemented
if r is NotImplemented and type(x) is not type(y):
try:
r = type(y).__rmatmul__(y, x)
except AttributeError:
r = NotImplemented
if r is NotImplemented:
raise TypeError("unsupported operand type(s) for @: '{}' and '{}'"
.format(type(x).__name__, type(y).__name__))
return r
class MatMulInfixOperatorTest(test_lib.TestCase):
def testMismatchedShape(self):
with self.assertRaisesWithPredicateMatch(ValueError,
lambda e: "Shape must" in str(e)):
infix_matmul(
ops.convert_to_tensor([10.0, 20.0, 30.0]),
ops.convert_to_tensor([[40.0, 50.0], [60.0, 70.0]]))
def testMismatchedDimensions(self):
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Dimensions must" in str(e)):
infix_matmul(
ops.convert_to_tensor([[10.0, 20.0, 30.0]]),
ops.convert_to_tensor([[40.0, 50.0], [60.0, 70.0]]))
def testInfixMatmulIsTfMatmul(self):
a = ops.convert_to_tensor([[10.0, 20.0, 30.0]])
b = ops.convert_to_tensor([[40.0, 50.0], [60.0, 70.0], [80.0, 90.0]])
c = infix_matmul(a, b)
self.assertEqual(c.op.type, "MatMul")
def testInfixMatmulDoesDotProduct(self):
a = ops.convert_to_tensor([[10.0, 20.0, 30.0]])
b = ops.convert_to_tensor([[40.0, 50.0], [60.0, 70.0], [80.0, 90.0]])
c = infix_matmul(a, b)
d = math_ops.matmul(a, b)
with self.test_session():
self.assertAllEqual(c.eval(), d.eval())
if __name__ == "__main__":
sizes = [1, 3, 5]
trans_options = [[False, False], [True, False], [False, True]]
for use_static_shape in [False, True]:
for dtype in (np.int32, np.float16, np.float32, np.float64, np.complex64,
np.complex128):
if not use_static_shape and dtype == np.int32:
# TODO(rmlarsen): Re-enable this test when we have fixed the underlying
# bug in Windows (b/35935459).
continue
for m in sizes:
for n in sizes:
for k in sizes:
# Construct compatible random matrices a_np of size [m, k] and b_np
# of size [k, n].
a_np = np.random.normal(-5, 5, m * k).astype(dtype).reshape([m, k])
if dtype in (np.complex64, np.complex128):
a_np.imag = np.random.normal(-5, 5,
m * k).astype(dtype).reshape([m, k])
b_np = np.random.normal(-5, 5, k * n).astype(dtype).reshape([k, n])
if dtype in (np.complex64, np.complex128):
b_np.imag = np.random.normal(-5, 5,
k * n).astype(dtype).reshape([k, n])
for adjoint_a, transpose_a in trans_options:
for adjoint_b, transpose_b in trans_options:
name = "%s_%s_%s_%s_%s_%s_%s_%s_%s" % (
use_static_shape, dtype.__name__, m, n, k, adjoint_a,
transpose_a, adjoint_b, transpose_b)
_AddTest(MatMulTest, "MatMulTest", name,
_GetMatMulTest(
a_np,
b_np,
use_static_shape,
adjoint_a=adjoint_a,
transpose_a=transpose_a,
adjoint_b=adjoint_b,
transpose_b=transpose_b))
_AddTest(MatMulGradientTest, "MatMulGradientTest", name,
_GetMatMulGradientTest(
a_np,
b_np,
use_static_shape,
adjoint_a=adjoint_a,
transpose_a=transpose_a,
adjoint_b=adjoint_b,
transpose_b=transpose_b))
test_lib.main()
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1
from google.api_core import grpc_helpers_async
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.tasks_v2beta3.types import cloudtasks
from google.cloud.tasks_v2beta3.types import queue
from google.cloud.tasks_v2beta3.types import queue as gct_queue
from google.cloud.tasks_v2beta3.types import task
from google.cloud.tasks_v2beta3.types import task as gct_task
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
from google.protobuf import empty_pb2 # type: ignore
from .base import CloudTasksTransport, DEFAULT_CLIENT_INFO
from .grpc import CloudTasksGrpcTransport
class CloudTasksGrpcAsyncIOTransport(CloudTasksTransport):
"""gRPC AsyncIO backend transport for CloudTasks.
Cloud Tasks allows developers to manage the execution of
background work in their applications.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "cloudtasks.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "cloudtasks.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def list_queues(
self,
) -> Callable[
[cloudtasks.ListQueuesRequest], Awaitable[cloudtasks.ListQueuesResponse]
]:
r"""Return a callable for the list queues method over gRPC.
Lists queues.
Queues are returned in lexicographical order.
Returns:
Callable[[~.ListQueuesRequest],
Awaitable[~.ListQueuesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_queues" not in self._stubs:
self._stubs["list_queues"] = self.grpc_channel.unary_unary(
"/google.cloud.tasks.v2beta3.CloudTasks/ListQueues",
request_serializer=cloudtasks.ListQueuesRequest.serialize,
response_deserializer=cloudtasks.ListQueuesResponse.deserialize,
)
return self._stubs["list_queues"]
@property
def get_queue(
self,
) -> Callable[[cloudtasks.GetQueueRequest], Awaitable[queue.Queue]]:
r"""Return a callable for the get queue method over gRPC.
Gets a queue.
Returns:
Callable[[~.GetQueueRequest],
Awaitable[~.Queue]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_queue" not in self._stubs:
self._stubs["get_queue"] = self.grpc_channel.unary_unary(
"/google.cloud.tasks.v2beta3.CloudTasks/GetQueue",
request_serializer=cloudtasks.GetQueueRequest.serialize,
response_deserializer=queue.Queue.deserialize,
)
return self._stubs["get_queue"]
@property
def create_queue(
self,
) -> Callable[[cloudtasks.CreateQueueRequest], Awaitable[gct_queue.Queue]]:
r"""Return a callable for the create queue method over gRPC.
Creates a queue.
Queues created with this method allow tasks to live for a
maximum of 31 days. After a task is 31 days old, the task will
be deleted regardless of whether it was dispatched or not.
WARNING: Using this method may have unintended side effects if
you are using an App Engine ``queue.yaml`` or ``queue.xml`` file
to manage your queues. Read `Overview of Queue Management and
queue.yaml <https://cloud.google.com/tasks/docs/queue-yaml>`__
before using this method.
Returns:
Callable[[~.CreateQueueRequest],
Awaitable[~.Queue]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_queue" not in self._stubs:
self._stubs["create_queue"] = self.grpc_channel.unary_unary(
"/google.cloud.tasks.v2beta3.CloudTasks/CreateQueue",
request_serializer=cloudtasks.CreateQueueRequest.serialize,
response_deserializer=gct_queue.Queue.deserialize,
)
return self._stubs["create_queue"]
@property
def update_queue(
self,
) -> Callable[[cloudtasks.UpdateQueueRequest], Awaitable[gct_queue.Queue]]:
r"""Return a callable for the update queue method over gRPC.
Updates a queue.
This method creates the queue if it does not exist and updates
the queue if it does exist.
Queues created with this method allow tasks to live for a
maximum of 31 days. After a task is 31 days old, the task will
be deleted regardless of whether it was dispatched or not.
WARNING: Using this method may have unintended side effects if
you are using an App Engine ``queue.yaml`` or ``queue.xml`` file
to manage your queues. Read `Overview of Queue Management and
queue.yaml <https://cloud.google.com/tasks/docs/queue-yaml>`__
before using this method.
Returns:
Callable[[~.UpdateQueueRequest],
Awaitable[~.Queue]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_queue" not in self._stubs:
self._stubs["update_queue"] = self.grpc_channel.unary_unary(
"/google.cloud.tasks.v2beta3.CloudTasks/UpdateQueue",
request_serializer=cloudtasks.UpdateQueueRequest.serialize,
response_deserializer=gct_queue.Queue.deserialize,
)
return self._stubs["update_queue"]
@property
def delete_queue(
self,
) -> Callable[[cloudtasks.DeleteQueueRequest], Awaitable[empty_pb2.Empty]]:
r"""Return a callable for the delete queue method over gRPC.
Deletes a queue.
This command will delete the queue even if it has tasks in it.
Note: If you delete a queue, a queue with the same name can't be
created for 7 days.
WARNING: Using this method may have unintended side effects if
you are using an App Engine ``queue.yaml`` or ``queue.xml`` file
to manage your queues. Read `Overview of Queue Management and
queue.yaml <https://cloud.google.com/tasks/docs/queue-yaml>`__
before using this method.
Returns:
Callable[[~.DeleteQueueRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_queue" not in self._stubs:
self._stubs["delete_queue"] = self.grpc_channel.unary_unary(
"/google.cloud.tasks.v2beta3.CloudTasks/DeleteQueue",
request_serializer=cloudtasks.DeleteQueueRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_queue"]
@property
def purge_queue(
self,
) -> Callable[[cloudtasks.PurgeQueueRequest], Awaitable[queue.Queue]]:
r"""Return a callable for the purge queue method over gRPC.
Purges a queue by deleting all of its tasks.
All tasks created before this method is called are
permanently deleted.
Purge operations can take up to one minute to take
effect. Tasks might be dispatched before the purge takes
effect. A purge is irreversible.
Returns:
Callable[[~.PurgeQueueRequest],
Awaitable[~.Queue]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "purge_queue" not in self._stubs:
self._stubs["purge_queue"] = self.grpc_channel.unary_unary(
"/google.cloud.tasks.v2beta3.CloudTasks/PurgeQueue",
request_serializer=cloudtasks.PurgeQueueRequest.serialize,
response_deserializer=queue.Queue.deserialize,
)
return self._stubs["purge_queue"]
@property
def pause_queue(
self,
) -> Callable[[cloudtasks.PauseQueueRequest], Awaitable[queue.Queue]]:
r"""Return a callable for the pause queue method over gRPC.
Pauses the queue.
If a queue is paused then the system will stop dispatching tasks
until the queue is resumed via
[ResumeQueue][google.cloud.tasks.v2beta3.CloudTasks.ResumeQueue].
Tasks can still be added when the queue is paused. A queue is
paused if its [state][google.cloud.tasks.v2beta3.Queue.state] is
[PAUSED][google.cloud.tasks.v2beta3.Queue.State.PAUSED].
Returns:
Callable[[~.PauseQueueRequest],
Awaitable[~.Queue]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "pause_queue" not in self._stubs:
self._stubs["pause_queue"] = self.grpc_channel.unary_unary(
"/google.cloud.tasks.v2beta3.CloudTasks/PauseQueue",
request_serializer=cloudtasks.PauseQueueRequest.serialize,
response_deserializer=queue.Queue.deserialize,
)
return self._stubs["pause_queue"]
@property
def resume_queue(
self,
) -> Callable[[cloudtasks.ResumeQueueRequest], Awaitable[queue.Queue]]:
r"""Return a callable for the resume queue method over gRPC.
Resume a queue.
This method resumes a queue after it has been
[PAUSED][google.cloud.tasks.v2beta3.Queue.State.PAUSED] or
[DISABLED][google.cloud.tasks.v2beta3.Queue.State.DISABLED]. The
state of a queue is stored in the queue's
[state][google.cloud.tasks.v2beta3.Queue.state]; after calling
this method it will be set to
[RUNNING][google.cloud.tasks.v2beta3.Queue.State.RUNNING].
WARNING: Resuming many high-QPS queues at the same time can lead
to target overloading. If you are resuming high-QPS queues,
follow the 500/50/5 pattern described in `Managing Cloud Tasks
Scaling
Risks <https://cloud.google.com/tasks/docs/manage-cloud-task-scaling>`__.
Returns:
Callable[[~.ResumeQueueRequest],
Awaitable[~.Queue]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "resume_queue" not in self._stubs:
self._stubs["resume_queue"] = self.grpc_channel.unary_unary(
"/google.cloud.tasks.v2beta3.CloudTasks/ResumeQueue",
request_serializer=cloudtasks.ResumeQueueRequest.serialize,
response_deserializer=queue.Queue.deserialize,
)
return self._stubs["resume_queue"]
@property
def get_iam_policy(
self,
) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], Awaitable[policy_pb2.Policy]]:
r"""Return a callable for the get iam policy method over gRPC.
Gets the access control policy for a
[Queue][google.cloud.tasks.v2beta3.Queue]. Returns an empty
policy if the resource exists and does not have a policy set.
Authorization requires the following `Google
IAM <https://cloud.google.com/iam>`__ permission on the
specified resource parent:
- ``cloudtasks.queues.getIamPolicy``
Returns:
Callable[[~.GetIamPolicyRequest],
Awaitable[~.Policy]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_iam_policy" not in self._stubs:
self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary(
"/google.cloud.tasks.v2beta3.CloudTasks/GetIamPolicy",
request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString,
response_deserializer=policy_pb2.Policy.FromString,
)
return self._stubs["get_iam_policy"]
@property
def set_iam_policy(
self,
) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], Awaitable[policy_pb2.Policy]]:
r"""Return a callable for the set iam policy method over gRPC.
Sets the access control policy for a
[Queue][google.cloud.tasks.v2beta3.Queue]. Replaces any existing
policy.
Note: The Cloud Console does not check queue-level IAM
permissions yet. Project-level permissions are required to use
the Cloud Console.
Authorization requires the following `Google
IAM <https://cloud.google.com/iam>`__ permission on the
specified resource parent:
- ``cloudtasks.queues.setIamPolicy``
Returns:
Callable[[~.SetIamPolicyRequest],
Awaitable[~.Policy]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "set_iam_policy" not in self._stubs:
self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary(
"/google.cloud.tasks.v2beta3.CloudTasks/SetIamPolicy",
request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString,
response_deserializer=policy_pb2.Policy.FromString,
)
return self._stubs["set_iam_policy"]
@property
def test_iam_permissions(
self,
) -> Callable[
[iam_policy_pb2.TestIamPermissionsRequest],
Awaitable[iam_policy_pb2.TestIamPermissionsResponse],
]:
r"""Return a callable for the test iam permissions method over gRPC.
Returns permissions that a caller has on a
[Queue][google.cloud.tasks.v2beta3.Queue]. If the resource does
not exist, this will return an empty set of permissions, not a
[NOT_FOUND][google.rpc.Code.NOT_FOUND] error.
Note: This operation is designed to be used for building
permission-aware UIs and command-line tools, not for
authorization checking. This operation may "fail open" without
warning.
Returns:
Callable[[~.TestIamPermissionsRequest],
Awaitable[~.TestIamPermissionsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "test_iam_permissions" not in self._stubs:
self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary(
"/google.cloud.tasks.v2beta3.CloudTasks/TestIamPermissions",
request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString,
response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString,
)
return self._stubs["test_iam_permissions"]
@property
def list_tasks(
self,
) -> Callable[
[cloudtasks.ListTasksRequest], Awaitable[cloudtasks.ListTasksResponse]
]:
r"""Return a callable for the list tasks method over gRPC.
Lists the tasks in a queue.
By default, only the
[BASIC][google.cloud.tasks.v2beta3.Task.View.BASIC] view is
retrieved due to performance considerations;
[response_view][google.cloud.tasks.v2beta3.ListTasksRequest.response_view]
controls the subset of information which is returned.
The tasks may be returned in any order. The ordering may change
at any time.
Returns:
Callable[[~.ListTasksRequest],
Awaitable[~.ListTasksResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_tasks" not in self._stubs:
self._stubs["list_tasks"] = self.grpc_channel.unary_unary(
"/google.cloud.tasks.v2beta3.CloudTasks/ListTasks",
request_serializer=cloudtasks.ListTasksRequest.serialize,
response_deserializer=cloudtasks.ListTasksResponse.deserialize,
)
return self._stubs["list_tasks"]
@property
def get_task(self) -> Callable[[cloudtasks.GetTaskRequest], Awaitable[task.Task]]:
r"""Return a callable for the get task method over gRPC.
Gets a task.
Returns:
Callable[[~.GetTaskRequest],
Awaitable[~.Task]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_task" not in self._stubs:
self._stubs["get_task"] = self.grpc_channel.unary_unary(
"/google.cloud.tasks.v2beta3.CloudTasks/GetTask",
request_serializer=cloudtasks.GetTaskRequest.serialize,
response_deserializer=task.Task.deserialize,
)
return self._stubs["get_task"]
@property
def create_task(
self,
) -> Callable[[cloudtasks.CreateTaskRequest], Awaitable[gct_task.Task]]:
r"""Return a callable for the create task method over gRPC.
Creates a task and adds it to a queue.
Tasks cannot be updated after creation; there is no UpdateTask
command.
- The maximum task size is 100KB.
Returns:
Callable[[~.CreateTaskRequest],
Awaitable[~.Task]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_task" not in self._stubs:
self._stubs["create_task"] = self.grpc_channel.unary_unary(
"/google.cloud.tasks.v2beta3.CloudTasks/CreateTask",
request_serializer=cloudtasks.CreateTaskRequest.serialize,
response_deserializer=gct_task.Task.deserialize,
)
return self._stubs["create_task"]
@property
def delete_task(
self,
) -> Callable[[cloudtasks.DeleteTaskRequest], Awaitable[empty_pb2.Empty]]:
r"""Return a callable for the delete task method over gRPC.
Deletes a task.
A task can be deleted if it is scheduled or dispatched.
A task cannot be deleted if it has executed successfully
or permanently failed.
Returns:
Callable[[~.DeleteTaskRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_task" not in self._stubs:
self._stubs["delete_task"] = self.grpc_channel.unary_unary(
"/google.cloud.tasks.v2beta3.CloudTasks/DeleteTask",
request_serializer=cloudtasks.DeleteTaskRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_task"]
@property
def run_task(self) -> Callable[[cloudtasks.RunTaskRequest], Awaitable[task.Task]]:
r"""Return a callable for the run task method over gRPC.
Forces a task to run now.
When this method is called, Cloud Tasks will dispatch the task,
even if the task is already running, the queue has reached its
[RateLimits][google.cloud.tasks.v2beta3.RateLimits] or is
[PAUSED][google.cloud.tasks.v2beta3.Queue.State.PAUSED].
This command is meant to be used for manual debugging. For
example,
[RunTask][google.cloud.tasks.v2beta3.CloudTasks.RunTask] can be
used to retry a failed task after a fix has been made or to
manually force a task to be dispatched now.
The dispatched task is returned. That is, the task that is
returned contains the [status][Task.status] after the task is
dispatched but before the task is received by its target.
If Cloud Tasks receives a successful response from the task's
target, then the task will be deleted; otherwise the task's
[schedule_time][google.cloud.tasks.v2beta3.Task.schedule_time]
will be reset to the time that
[RunTask][google.cloud.tasks.v2beta3.CloudTasks.RunTask] was
called plus the retry delay specified in the queue's
[RetryConfig][google.cloud.tasks.v2beta3.RetryConfig].
[RunTask][google.cloud.tasks.v2beta3.CloudTasks.RunTask] returns
[NOT_FOUND][google.rpc.Code.NOT_FOUND] when it is called on a
task that has already succeeded or permanently failed.
Returns:
Callable[[~.RunTaskRequest],
Awaitable[~.Task]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "run_task" not in self._stubs:
self._stubs["run_task"] = self.grpc_channel.unary_unary(
"/google.cloud.tasks.v2beta3.CloudTasks/RunTask",
request_serializer=cloudtasks.RunTaskRequest.serialize,
response_deserializer=task.Task.deserialize,
)
return self._stubs["run_task"]
def close(self):
return self.grpc_channel.close()
__all__ = ("CloudTasksGrpcAsyncIOTransport",)
|
|
#!/usr/local/bin/python
'''Old, file-based code. My ability to care about this is severely limited.'''
import basics
import bfiles
import config
import useful
modnumlist = []
class SetFile(bfiles.ArgFile):
tablecols = ['prefix', 'cols', 'title', 'digits', 'label', 'style']
notcols = ['fulldesc', 'insetdesc', 'fullpic']
def __init__(self, fname):
self.tables = []
self.found = False
self.db = {'model': []}
self.ncols = 0
self.header = ''
self.colheads = {}
self.dirs = {}
bfiles.ArgFile.__init__(self, fname)
def parse_cells(self, llist):
self.header = llist[1:]
self.ncols = 0
for col in self.header:
if not(col in self.notcols):
self.ncols += 1
self.db['ncols'] = self.ncols
def parse_dir(self, llist):
self.dirs[llist[1]] = llist[2]
def parse_field(self, llist):
self.colheads[llist[1]] = llist[2]
def parse_table(self, llist):
if self.found:
self.tables.append(self.db)
self.db = {'model': []}
self.db.update(dict(zip(self.tablecols, llist[1:])))
self.db['cols'] = self.db['cols'].split(',')
self.db['header'] = self.header
self.db['ncols'] = self.ncols
def parse_t(self, llist):
self.model = {'text': llist.get_arg('')}
self.db['model'].append(self.model)
def parse_s(self, llist):
self.model = {'section': llist.get_arg('')}
self.db['model'].append(self.model)
def parse_m(self, llist):
self.found = True
self.model = dict(zip(self.db['cols'], llist[1:]))
self.model['desc'] = []
self.db['model'].append(self.model)
def parse_d(self, llist):
self.model['desc'].append(llist[1])
def parse_end(self):
if self.found:
self.tables.append(self.db)
def do_set(pif, setfile, set_id=None):
pif.render.set_button_comment(pif, '')
tables = setfile.tables
llineups = []
for db in tables:
if len(tables) == 1 or not db['title'] or set_id == db['label'] or set_id == 'all': # or not set_id
llineups.append(print_table(pif, db, setfile))
else:
llineups.append(print_no_table(pif, db))
return pif.render.format_template('sets.html', llineups=llineups)
def print_table(pif, db, setfile):
global modnumlist
entries = []
prefix = db['prefix']
ncols = 0
for field in db['header']:
if field in setfile.colheads:
entries.append({'text': setfile.colheads[field], 'style': str(ncols)})
ncols = ncols + 1
for model in db['model']:
pif.render.comment('print_table', model)
showme = True
for field in db['header']:
if pif.form.has(field):
if (model.get(field, '') != pif.form.get_str(field) or
(not model.get(field, '') and not pif.form.get_str(field))):
showme = False
if not showme:
continue
if 'text' in model:
# Need to calculate the colspan better.
entries.append({'text': model.get('text', ''), 'colspan': len(db['header']) - 1, 'style': '0'})
continue
if 'section' in model:
# Need to calculate the colspan better.
entries.append({'text': model.get('section', ''), 'colspan': len(db['header']) - 1, 'class': 'section'})
continue
ifield = 0
for field in db['header']:
if field == 'desc':
entries.append({'style': ifield, 'text': mod_desc(model.get(field, ''))})
elif field == 'fulldesc':
entries.append({'style': ifield, 'text': mod_desc(model.get('desc', '')), 'colspan': int(db['ncols'])})
elif field == 'insetdesc':
entries.append({'style': ifield, 'text': mod_desc(model.get('desc', '')), 'colspan': int(db['ncols']) - 1})
elif field == 'num':
modnums = [mod_num(prefix, modnum, model.get('rank')) for modnum in model.get(field, '').split(';')]
entries.append({'style': ifield,
'text': '<nobr>%s</nobr>' % "<br>".join(modnums), 'also': {'height': '8'}})
elif field == 'pic':
modnum = model.get('num', '').split(';')
rowspan = 2 if 'insetdesc' in db['header'] else 1
entries.append({
'style': ifield,
'text': img(pif, prefix, modnum, model.get('rank'), int(db['digits']),
(model.get('year', '') != 'not made'), dirs=setfile.dirs),
'rowspan': rowspan})
elif field == 'fullpic':
modnum = model.get('num', '').split(';')
colspan = 2 if 'insetdesc' in db['header'] else int(db['ncols'])
entries.append({
'style': ifield,
'text': img(pif, prefix, modnum, model.get('rank'), int(db['digits']),
(model.get('year', '') != 'not made'), dirs=setfile.dirs),
'colspan': colspan})
elif field == 'name':
entries.append({
'style': ifield, 'text': '<center><b>' + model.get(field, '') + '</b></center>'}
if model.get(field, '') else {'style': ifield})
else:
entries.append({'style': ifield,
'text': model.get(field, '')} if model.get(field, '') else {'style': ifield})
ifield += 1
llineup = {
'anchor': db['label'], 'name': db['title'], 'columns': int(ncols), 'widthauto': True,
'section': [{'id': 'box', 'name': '',
'range': [{'entry': entries}]}],
}
return pif.render.format_matrix_for_template(llineup)
def print_no_table(pif, db):
return {
'anchor': db['label'],
'header': '<h3><a href="/cgi-bin/sets.cgi?page=' + pif.form.get_str('page') +
'&set=%(label)s#%(label)s">%(title)s</a></h3>\n' % db}
def mod_desc(desclist):
if desclist:
ostr = '<ul>\n'
for desc in desclist:
ostr += ' <li>' + desc + '\n'
ostr += '</ul>\n'
else:
ostr = ' \n'
return ostr
def mod_num(prefix, model, suffix):
return ''.join([(prefix + '-') if prefix else '', model, ('-' + suffix) if suffix else ''])
def img(pif, prefix, model, suffix, digits=0, made=True, dirs={}):
pif.render.comment(prefix, model, suffix, digits, made)
if not isinstance(model, list):
model = [model]
modnum = []
for m in model:
try:
fmt = "%%0%dd" % digits
m = fmt % int(m)
except TypeError:
pass
except ValueError:
pass
if prefix:
m = prefix + m
if suffix:
m += suffix
modnum.append(m)
ostr = pif.render.format_image_required(modnum, alt=mod_num(prefix, model[0], suffix), made=made,
pdir=dirs.get(prefix))
return '<center>' + ostr + '</center>'
def select_set(pif):
lran = {
'name': "A few of the special sets produced by Matchbox in recent years:",
'entry': ['<b><a href="?page=%s">%s</a></b> - %s' %
(ent['page_info.id'][5:], ent['page_info.title'], ent['page_info.description'])
for ent in pif.dbh.fetch_pages("id like 'sets.%' and (flags & 1)=0", order='description,title')]}
llineup = {'section': [{'id': 'i', 'range': [lran]}],
'tail': [pif.render.format_button("back", link="..") + " to the main index."]}
return pif.render.format_template('setsel.html', llineup=llineup)
@basics.web_page
def sets_main(pif):
pif.render.print_html()
if pif.form.has('page'):
setfile = SetFile(useful.relpath(config.SRC_DIR, useful.make_alnum(pif.form.get_str('page')) + '.dat'))
return do_set(pif, setfile, pif.form.get_id('set'))
else:
return select_set(pif)
|
|
# *****************************************************************************
#
# Copyright (c) 2016, EPAM SYSTEMS INC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ******************************************************************************
from pprint import pprint
from googleapiclient.discovery import build
from google.cloud import storage
from google.cloud import exceptions
import google.auth
from dlab.fab import *
import actions_lib
import os, re
from googleapiclient import errors
import logging
import traceback
import sys, time
import backoff
class GCPMeta:
def __init__(self, auth_type='service_account'):
@backoff.on_exception(backoff.expo,
google.auth.exceptions.DefaultCredentialsError,
max_tries=15)
def get_gcp_cred():
credentials, project = google.auth.default()
return credentials, project
self.auth_type = auth_type
self.project = os.environ['gcp_project_id']
if os.environ['conf_resource'] == 'ssn':
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = "/root/service_account.json"
credentials, project = google.auth.default()
if credentials.requires_scopes:
credentials = credentials.with_scopes(
['https://www.googleapis.com/auth/compute',
'https://www.googleapis.com/auth/iam',
'https://www.googleapis.com/auth/cloud-platform'])
self.service = build('compute', 'v1', credentials=credentials)
self.service_iam = build('iam', 'v1', credentials=credentials)
self.dataproc = build('dataproc', 'v1', credentials=credentials)
self.service_storage = build('storage', 'v1', credentials=credentials)
self.storage_client = storage.Client(project=project, credentials=credentials)
self.service_resource = build('cloudresourcemanager', 'v1', credentials=credentials)
else:
credentials, project = get_gcp_cred()
self.service = build('compute', 'v1', credentials=credentials)
self.service_iam = build('iam', 'v1', credentials=credentials)
self.dataproc = build('dataproc', 'v1', credentials=credentials)
self.service_storage = build('storage', 'v1', credentials=credentials)
self.storage_client = storage.Client(project=project, credentials=credentials)
self.service_resource = build('cloudresourcemanager', 'v1', credentials=credentials)
def wait_for_operation(self, operation, region='', zone=''):
print('Waiting for operation to finish...')
execution = False
while not execution:
try:
if region != '':
result = self.service.regionOperations().get(
project=self.project,
operation=operation,
region=region).execute()
elif zone != '':
result = self.service.zoneOperations().get(
project=self.project,
operation=operation,
zone=zone).execute()
else:
result = self.service.globalOperations().get(
project=self.project,
operation=operation).execute()
if result['status'] == 'DONE':
print("Done.")
execution = True
time.sleep(1)
except errors.HttpError as err:
if err.resp.status == 404:
print(err)
else:
raise err
def get_vpc(self, network_name):
request = self.service.networks().get(
project=self.project,
network=network_name
)
try:
return request.execute()
except errors.HttpError as err:
if err.resp.status == 404:
return ''
else:
raise err
except Exception as err:
logging.info(
"Unable to get VPC: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
append_result(str({"error": "Unable to get VPC",
"error_message": str(err) + "\n Traceback: " + traceback.print_exc(
file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def get_subnet(self, subnet_name, region):
request = self.service.subnetworks().get(
project=self.project,
region=region,
subnetwork=subnet_name)
try:
return request.execute()
except errors.HttpError as err:
if err.resp.status == 404:
return ''
else:
raise err
except Exception as err:
logging.info(
"Unable to get Subnet: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
append_result(str({"error": "Unable to get Subnet",
"error_message": str(err) + "\n Traceback: " + traceback.print_exc(
file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def get_firewall(self, firewall_name):
request = self.service.firewalls().get(
project=self.project,
firewall=firewall_name)
try:
return request.execute()
except errors.HttpError as err:
if err.resp.status == 404:
return ''
else:
raise err
except Exception as err:
logging.info(
"Unable to get Firewall: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
append_result(str({"error": "Unable to get Firewall",
"error_message": str(err) + "\n Traceback: " + traceback.print_exc(
file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def get_bucket(self, bucket_name):
try:
bucket = self.storage_client.get_bucket(bucket_name)
return bucket
except exceptions.NotFound:
return ''
except Exception as err:
logging.info(
"Unable to get Firewall: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
append_result(str({"error": "Unable to get Firewall",
"error_message": str(err) + "\n Traceback: " + traceback.print_exc(
file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def get_instance(self, instance_name):
request = self.service.instances().get(project=self.project, zone=os.environ['gcp_zone'],
instance=instance_name)
try:
return request.execute()
except errors.HttpError as err:
if err.resp.status == 404:
return ''
else:
raise err
except Exception as err:
logging.info(
"Unable to get Firewall: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
append_result(str({"error": "Unable to get Firewall",
"error_message": str(err) + "\n Traceback: " + traceback.print_exc(
file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def get_instance_status(self, instance_name):
request = self.service.instances().get(project=self.project, zone=os.environ['gcp_zone'],
instance=instance_name)
try:
result = request.execute()
return result.get('status')
except errors.HttpError as err:
if err.resp.status == 404:
return ''
else:
raise err
except Exception as err:
logging.info(
"Unable to get Firewall: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
append_result(str({"error": "Unable to get Firewall",
"error_message": str(err) + "\n Traceback: " + traceback.print_exc(
file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def get_instance_public_ip_by_name(self, instance_name):
try:
result = GCPMeta().get_instance(instance_name)
if result:
for i in result.get('networkInterfaces'):
for j in i.get('accessConfigs'):
return j.get('natIP')
else:
return ''
except Exception as err:
logging.info(
"Unable to get Instance IP: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
append_result(str({"error": "Unable to get Instance IP",
"error_message": str(err) + "\n Traceback: " + traceback.print_exc(
file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def get_service_account(self, service_account_name):
service_account_email = "{}@{}.iam.gserviceaccount.com".format(service_account_name, self.project)
request = self.service_iam.projects().serviceAccounts().get(
name='projects/{}/serviceAccounts/{}'.format(self.project, service_account_email))
try:
return request.execute()
except errors.HttpError as err:
if err.resp.status == 404:
return ''
else:
raise err
except Exception as err:
logging.info(
"Unable to get Service account: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
append_result(str({"error": "Unable to get Service account",
"error_message": str(err) + "\n Traceback: " + traceback.print_exc(
file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def get_role(self, role_name):
request = self.service_iam.projects().roles().get(name='projects/{}/roles/{}'.format(self.project,
role_name.replace('-',
'_')))
try:
return request.execute()
except errors.HttpError as err:
if err.resp.status == 404:
return ''
else:
raise err
except Exception as err:
logging.info(
"Unable to get IAM role: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
append_result(str({"error": "Unable to get IAM role",
"error_message": str(err) + "\n Traceback: " + traceback.print_exc(
file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def get_static_address(self, region, static_address_name):
request = self.service.addresses().get(project=self.project, region=region, address=static_address_name)
try:
return request.execute()
except errors.HttpError as err:
if err.resp.status == 404:
return ''
else:
raise err
def get_private_ip_address(self, instance_name):
try:
result = GCPMeta().get_instance(instance_name)
for i in result['networkInterfaces']:
return i['networkIP']
except Exception as err:
logging.info(
"Unable to get Private IP address: " + str(err) + "\n Traceback: " + traceback.print_exc(
file=sys.stdout))
append_result(str({"error": "Unable to get Private IP address",
"error_message": str(err) + "\n Traceback: " + traceback.print_exc(
file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def get_ami_by_name(self, ami_name):
try:
request = self.service.images().get(project=self.project, image=ami_name)
result = request.execute()
return result
except Exception as err:
logging.info("Error with getting image by name: " + str(err) + "\n Traceback: " + traceback.print_exc(
file=sys.stdout))
append_result(str({"error": "Error with getting image by name",
"error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
return ''
def get_disk(self, disk_name):
try:
request = self.service.disks().get(project=self.project, zone=os.environ['gcp_zone'], disk=disk_name)
result = request.execute()
return result
except Exception as err:
logging.info("Error with getting disk by name: " + str(err) + "\n Traceback: " + traceback.print_exc(
file=sys.stdout))
append_result(str({"error": "Error with getting disk by name",
"error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
return ''
def get_list_service_accounts(self):
try:
service_account_names = []
result = self.service_iam.projects().serviceAccounts().list(
name='projects/{}'.format(self.project)).execute()
for account in result['accounts']:
service_account_names.append(account['displayName'])
if 'nextPageToken' in result:
next_page = True
page_token = result['nextPageToken']
else:
next_page = False
while next_page:
result2 = self.service_iam.projects().serviceAccounts().list(name='projects/{}'.format(self.project),
pageToken=page_token).execute()
for account in result2['accounts']:
service_account_names.append(account['displayName'])
if 'nextPageToken' in result2:
page_token = result2['nextPageToken']
else:
next_page = False
return service_account_names
except Exception as err:
logging.info("Error with getting list service accounts: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
append_result(str({"error": "Error with getting list service accounts",
"error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
return ''
def get_list_roles(self):
try:
role_names = []
result = self.service_iam.projects().roles().list(parent='projects/{}'.format(self.project)).execute()
for role in result['roles']:
role_names.append(role['title'])
if 'nextPageToken' in result:
next_page = True
page_token = result['nextPageToken']
else:
next_page = False
while next_page:
result2 = self.service_iam.projects().roles().list(parent='projects/{}'.format(self.project),
pageToken=page_token).execute()
for role in result2['roles']:
role_names.append(role['title'])
if 'nextPageToken' in result2:
page_token = result2['nextPageToken']
else:
next_page = False
return role_names
except Exception as err:
logging.info("Error with getting list service accounts: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
append_result(str({"error": "Error with getting list service accounts",
"error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
return ''
def get_list_instances(self, zone, filter_string=''):
try:
if not filter_string:
request = self.service.instances().list(project=self.project, zone=zone)
else:
request = self.service.instances().list(project=self.project, zone=zone, filter='name eq {}-.*'.
format(filter_string))
result = request.execute()
return result
except Exception as err:
logging.info("Error with getting list instances: " + str(err) + "\n Traceback: " + traceback.print_exc(
file=sys.stdout))
append_result(str({"error": "Error with getting list instances",
"error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
return ''
def get_list_firewalls(self, filter_string=''):
try:
if not filter_string:
request = self.service.firewalls().list(project=self.project)
else:
request = self.service.firewalls().list(project=self.project, filter='name eq {}.*'.format(
filter_string))
result = request.execute()
return result
except Exception as err:
logging.info("Error with getting list firewalls: " + str(err) + "\n Traceback: " + traceback.print_exc(
file=sys.stdout))
append_result(str({"error": "Error with getting list firewalls",
"error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
return ''
def get_list_subnetworks(self, region, vpc_name='', filter_string=''):
try:
if not filter_string and not vpc_name:
request = self.service.subnetworks().list(project=self.project, region=region)
elif vpc_name and not filter_string:
request = self.service.subnetworks().list(
project=self.project, region=region,
filter=
'(network eq https://www.googleapis.com/compute/v1/projects/{}/global/networks/{}) (name eq .*)'.format(
self.project, vpc_name))
elif filter_string and vpc_name:
request = self.service.subnetworks().list(
project=self.project, region=region,
filter=
'(network eq https://www.googleapis.com/compute/v1/projects/{}/global/networks/{}) (name eq {}-.*)'.format(
self.project, vpc_name, filter_string))
elif filter_string and not vpc_name:
request = self.service.subnetworks().list(
project=self.project, region=region,
filter='name eq {}-.*'.format(filter_string))
result = request.execute()
return result
except Exception as err:
logging.info("Error with getting list subnetworks: " + str(err) + "\n Traceback: " + traceback.print_exc(
file=sys.stdout))
append_result(str({"error": "Error with getting list subnetworks",
"error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
return ''
def get_list_buckets(self, prefix=''):
try:
if not prefix:
request = self.service_storage.buckets().list(project=self.project)
else:
request = self.service_storage.buckets().list(project=self.project, prefix='{}'.format(prefix))
result = request.execute()
return result
except Exception as err:
logging.info("Error with getting list buckets: " + str(err) + "\n Traceback: " + traceback.print_exc(
file=sys.stdout))
append_result(str({"error": "Error with getting list buckets",
"error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
return ''
def get_list_static_addresses(self, region, filter_string=''):
try:
if not filter_string:
request = self.service.addresses().list(project=self.project, region=region)
else:
request = self.service.addresses().list(project=self.project, region=region,
filter='name eq {}.*'.format(filter_string))
result = request.execute()
return result
except Exception as err:
logging.info(
"Error with getting list static addresses: " + str(err) + "\n Traceback: " + traceback.print_exc(
file=sys.stdout))
append_result(str({"error": "Error with getting list static addresses",
"error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
return ''
def get_list_instance_statuses(self, instance_name_list):
data = []
for instance in instance_name_list:
host = {}
try:
request = self.service.instances().get(project=self.project, zone=os.environ['gcp_zone'],
instance=instance)
result = request.execute()
host['id'] = instance
host['status'] = result.get('status').lower().replace("terminated", "stopped")
data.append(host)
except:
host['id'] = instance
host['status'] = 'terminated'
data.append(host)
return data
def get_list_cluster_statuses(self, cluster_names, full_check=True):
data = []
for cluster in cluster_names:
host = {}
try:
request = self.dataproc.projects().regions().clusters().get(projectId=self.project,
region=os.environ['gcp_region'],
clusterName=cluster)
result = request.execute()
host['id'] = cluster
if full_check:
host['version'] = result.get('config').get('softwareConfig').get('imageVersion')[:3]
host['status'] = result.get('status').get('state').lower()
data.append(host)
except:
host['id'] = cluster
host['status'] = 'terminated'
data.append(host)
return data
def get_dataproc_job_status(self, job_id):
request = self.dataproc.projects().regions().jobs().get(projectId=self.project,
region=os.environ['gcp_region'],
jobId=job_id)
try:
res = request.execute()
print("Job status: {}".format(res['status']['state'].lower()))
return res['status']['state'].lower()
except Exception as err:
logging.info(
"Unable to get Dataproc job status: " + str(err) + "\n Traceback: " + traceback.print_exc(
file=sys.stdout))
append_result(str({"error": "Unable to get Dataproc job status",
"error_message": str(err) + "\n Traceback: " + traceback.print_exc(
file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
return ''
def get_dataproc_list(self, labels):
filter_string = ''
for label in labels:
for key in label.keys():
filter_string += 'labels.{}:{}'.format(key, label[key])
filter_string += ' AND '
filter_string = re.sub('AND $', '', filter_string)
request = self.dataproc.projects().regions().clusters().list(projectId=self.project,
region=os.environ['gcp_region'],
filter=filter_string)
try:
res = request.execute()
if res != dict():
return [i['clusterName'] for i in res['clusters']]
else:
return ''
except Exception as err:
logging.info(
"Unable to get Dataproc list clusters: " + str(err) + "\n Traceback: " + traceback.print_exc(
file=sys.stdout))
append_result(str({"error": "Unable to get Dataproc list clusters",
"error_message": str(err) + "\n Traceback: " + traceback.print_exc(
file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
return ''
def get_not_configured_dataproc(self, notebook_instance_name):
cluster_filter = 'labels.{}:not-configured'.format(notebook_instance_name)
request = self.dataproc.projects().regions().clusters().list(projectId=self.project,
region=os.environ['gcp_region'],
filter=cluster_filter)
try:
res = request.execute()
if res != dict():
return res['clusters'][0]['clusterName']
else:
print("No not-configured clusters")
return ''
except Exception as err:
logging.info(
"Error with getting not configured cluster: " + str(err) + "\n Traceback: " + traceback.print_exc(
file=sys.stdout))
append_result(str({"error": "Error with getting not configured cluster",
"error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
return ''
def get_dataproc_jobs(self):
jobs = []
try:
res = self.dataproc.projects().regions().jobs().list(projectId=self.project,
region=os.environ['gcp_region']).execute()
jobs = [job for job in res['jobs']]
page_token = res.get('nextPageToken')
while page_token != 'None':
res2 = self.dataproc.projects().regions().jobs().list(projectId=self.project,
region=os.environ['gcp_region'],
pageToken=page_token).execute()
jobs.extend([job for job in res2['jobs']])
page_token = str(res2.get('nextPageToken'))
return jobs
except KeyError:
return jobs
except Exception as err:
logging.info(
"Error with getting cluster jobs: " + str(err) + "\n Traceback: " + traceback.print_exc(
file=sys.stdout))
append_result(str({"error": "Error with getting cluster jobs",
"error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
return ''
def get_list_private_ip_by_conf_type_and_id(self, conf_type, instance_id):
try:
private_list_ip = []
if conf_type == 'edge_node' or conf_type == 'exploratory':
private_list_ip.append(GCPMeta().get_private_ip_address(
instance_id))
elif conf_type == 'computational_resource':
instance_list = GCPMeta().get_list_instances(
os.environ['gcp_zone'])
for instance in instance_list.get('items'):
if instance.get('labels') != None:
if instance.get('labels').get('name') == instance_id:
private_list_ip.append(
instance.get('networkInterfaces')[0].get(
'networkIP'))
return private_list_ip
except Exception as err:
logging.info(
"Error getting private ip by conf_type and id: " + str(
err) + "\n Traceback: " + traceback.print_exc(
file=sys.stdout))
append_result(str({"error": "Error getting private ip by conf_type and id",
"error_message": str(
err) + "\n Traceback: " + traceback.print_exc(
file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def get_instance_private_ip_address(tag_name, instance_name):
try:
return GCPMeta().get_private_ip_address(instance_name)
except Exception as err:
logging.info(
"Error with getting private ip address by name: " + str(err) + "\n Traceback: " + traceback.print_exc(
file=sys.stdout))
append_result(str({"error": "Error with getting private ip address by name",
"error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
return ''
def node_count(cluster_name):
try:
list_instances = GCPMeta().get_list_instances(os.environ['gcp_zone'], cluster_name)
if list_instances.get('items') is None:
raise Exception
else:
return len(list_instances.get('items'))
except Exception as err:
logging.info(
"Error with getting node count: " + str(err) + "\n Traceback: " + traceback.print_exc(
file=sys.stdout))
append_result(str({"error": "Error with getting noide count",
"error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
return ''
|
|
# Copyright (c) 2014 Cloudbase Solutions SRL
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
import sys
from oslo_config import cfg
from oslo_utils import units
from cinder import exception
from cinder.i18n import _
from cinder.image import image_utils
from cinder.openstack.common import fileutils
from cinder.openstack.common import log as logging
from cinder import utils
from cinder.volume.drivers import smbfs
from cinder.volume.drivers.windows import remotefs
from cinder.volume.drivers.windows import vhdutils
VERSION = '1.0.0'
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.set_default('smbfs_shares_config', r'C:\OpenStack\smbfs_shares.txt')
CONF.set_default('smbfs_mount_point_base', r'C:\OpenStack\_mnt')
CONF.set_default('smbfs_default_volume_format', 'vhd')
class WindowsSmbfsDriver(smbfs.SmbfsDriver):
VERSION = VERSION
def __init__(self, *args, **kwargs):
super(WindowsSmbfsDriver, self).__init__(*args, **kwargs)
self.base = getattr(self.configuration,
'smbfs_mount_point_base',
CONF.smbfs_mount_point_base)
opts = getattr(self.configuration,
'smbfs_mount_options',
CONF.smbfs_mount_options)
self._remotefsclient = remotefs.WindowsRemoteFsClient(
'cifs', root_helper=None, smbfs_mount_point_base=self.base,
smbfs_mount_options=opts)
self.vhdutils = vhdutils.VHDUtils()
def do_setup(self, context):
self._check_os_platform()
super(WindowsSmbfsDriver, self).do_setup(context)
def _check_os_platform(self):
if sys.platform != 'win32':
_msg = _("This system platform (%s) is not supported. This "
"driver supports only Win32 platforms.") % sys.platform
raise exception.SmbfsException(_msg)
def _do_create_volume(self, volume):
volume_path = self.local_path(volume)
volume_format = self.get_volume_format(volume)
volume_size_bytes = volume['size'] * units.Gi
if os.path.exists(volume_path):
err_msg = _('File already exists at: %s') % volume_path
raise exception.InvalidVolume(err_msg)
if volume_format not in (self._DISK_FORMAT_VHD,
self._DISK_FORMAT_VHDX):
err_msg = _("Unsupported volume format: %s ") % volume_format
raise exception.InvalidVolume(err_msg)
self.vhdutils.create_dynamic_vhd(volume_path, volume_size_bytes)
def _ensure_share_mounted(self, smbfs_share):
mnt_options = {}
if self.shares.get(smbfs_share) is not None:
mnt_flags = self.shares[smbfs_share]
mnt_options = self.parse_options(mnt_flags)[1]
self._remotefsclient.mount(smbfs_share, mnt_options)
def _delete(self, path):
fileutils.delete_if_exists(path)
def _get_capacity_info(self, smbfs_share):
"""Calculate available space on the SMBFS share.
:param smbfs_share: example //172.18.194.100/var/smbfs
"""
total_size, total_available = self._remotefsclient.get_capacity_info(
smbfs_share)
total_allocated = self._get_total_allocated(smbfs_share)
return_value = [total_size, total_available, total_allocated]
LOG.info('Smb share %s Total size %s Total allocated %s'
% (smbfs_share, total_size, total_allocated))
return [float(x) for x in return_value]
def _get_total_allocated(self, smbfs_share):
elements = os.listdir(smbfs_share)
total_allocated = 0
for element in elements:
element_path = os.path.join(smbfs_share, element)
if not self._remotefsclient.is_symlink(element_path):
if "snapshot" in element:
continue
if re.search(r'\.vhdx?$', element):
total_allocated += self.vhdutils.get_vhd_size(
element_path)['VirtualSize']
continue
if os.path.isdir(element_path):
total_allocated += self._get_total_allocated(element_path)
continue
total_allocated += os.path.getsize(element_path)
return total_allocated
def _img_commit(self, snapshot_path):
self.vhdutils.merge_vhd(snapshot_path)
self._delete(snapshot_path)
def _rebase_img(self, image, backing_file, volume_format):
# Relative path names are not supported in this case.
image_dir = os.path.dirname(image)
backing_file_path = os.path.join(image_dir, backing_file)
self.vhdutils.reconnect_parent(image, backing_file_path)
def _qemu_img_info(self, path, volume_name=None):
# This code expects to deal only with relative filenames.
# As this method is needed by the upper class and qemu-img does
# not fully support vhdx images, for the moment we'll use Win32 API
# for retrieving image information.
parent_path = self.vhdutils.get_vhd_parent_path(path)
file_format = os.path.splitext(path)[1][1:].lower()
if parent_path:
backing_file_name = os.path.split(parent_path)[1].lower()
else:
backing_file_name = None
class ImageInfo(object):
def __init__(self, image, backing_file):
self.image = image
self.backing_file = backing_file
self.file_format = file_format
return ImageInfo(os.path.basename(path),
backing_file_name)
def _do_create_snapshot(self, snapshot, backing_file, new_snap_path):
backing_file_full_path = os.path.join(
self._local_volume_dir(snapshot['volume']),
backing_file)
self.vhdutils.create_differencing_vhd(new_snap_path,
backing_file_full_path)
def _do_extend_volume(self, volume_path, size_gb):
self.vhdutils.resize_vhd(volume_path, size_gb * units.Gi)
@utils.synchronized('smbfs', external=False)
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
# If snapshots exist, flatten to a temporary image, and upload it
active_file = self.get_active_image_from_info(volume)
active_file_path = os.path.join(self._local_volume_dir(volume),
active_file)
backing_file = self.vhdutils.get_vhd_parent_path(active_file_path)
root_file_fmt = self.get_volume_format(volume)
temp_path = None
try:
if backing_file or root_file_fmt == self._DISK_FORMAT_VHDX:
temp_file_name = '%s.temp_image.%s.%s' % (
volume['id'],
image_meta['id'],
self._DISK_FORMAT_VHD)
temp_path = os.path.join(self._local_volume_dir(volume),
temp_file_name)
self.vhdutils.convert_vhd(active_file_path, temp_path)
upload_path = temp_path
else:
upload_path = active_file_path
image_utils.upload_volume(context,
image_service,
image_meta,
upload_path,
self._DISK_FORMAT_VHD)
finally:
if temp_path:
self._delete(temp_path)
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetch the image from image_service and write it to the volume."""
volume_format = self.get_volume_format(volume, qemu_format=True)
image_meta = image_service.show(context, image_id)
fetch_format = volume_format
fetch_path = self.local_path(volume)
self._delete(fetch_path)
qemu_version = self.get_qemu_version()
needs_conversion = False
if (qemu_version < [1, 7] and (
volume_format == self._DISK_FORMAT_VHDX and
image_meta['disk_format'] != self._DISK_FORMAT_VHDX)):
needs_conversion = True
fetch_format = 'vpc'
temp_file_name = '%s.temp_image.%s.%s' % (
volume['id'],
image_meta['id'],
self._DISK_FORMAT_VHD)
fetch_path = os.path.join(self._local_volume_dir(volume),
temp_file_name)
image_utils.fetch_to_volume_format(
context, image_service, image_id,
fetch_path, fetch_format,
self.configuration.volume_dd_blocksize)
if needs_conversion:
self.vhdutils.convert_vhd(fetch_path, self.local_path(volume))
self._delete(fetch_path)
self.vhdutils.resize_vhd(self.local_path(volume),
volume['size'] * units.Gi)
def _copy_volume_from_snapshot(self, snapshot, volume, volume_size):
"""Copy data from snapshot to destination volume."""
LOG.debug("snapshot: %(snap)s, volume: %(vol)s, "
"volume_size: %(size)s" %
{'snap': snapshot['id'],
'vol': volume['id'],
'size': snapshot['volume_size']})
info_path = self._local_path_volume_info(snapshot['volume'])
snap_info = self._read_info_file(info_path)
vol_dir = self._local_volume_dir(snapshot['volume'])
forward_file = snap_info[snapshot['id']]
forward_path = os.path.join(vol_dir, forward_file)
# Find the file which backs this file, which represents the point
# when this snapshot was created.
img_info = self._qemu_img_info(forward_path)
snapshot_path = os.path.join(vol_dir, img_info.backing_file)
volume_path = self.local_path(volume)
self._delete(volume_path)
self.vhdutils.convert_vhd(snapshot_path,
volume_path)
self.vhdutils.resize_vhd(volume_path, volume_size * units.Gi)
|
|
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.20 (https://github.com/python-versioneer/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig: # pylint: disable=too-few-public-methods
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440-post"
cfg.tag_prefix = "v"
cfg.parentdir_prefix = "simple_parsing-"
cfg.versionfile_source = "simple_parsing/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Create decorator to mark a method as the handler of a VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
# pylint:disable=too-many-arguments,consider-using-with # noqa
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
process = None
for command in commands:
try:
dispcmd = str([command] + args)
# remember shell=False, so use git.cmd on windows, not just git
process = subprocess.Popen(
[command] + args,
cwd=cwd,
env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None),
)
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = process.communicate()[0].strip().decode()
if process.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, process.returncode
return stdout, process.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for _ in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {
"version": dirname[len(parentdir_prefix) :],
"full-revisionid": None,
"dirty": False,
"error": None,
"date": None,
}
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print(
"Tried directories %s but none started with prefix %s"
% (str(rootdirs), parentdir_prefix)
)
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
with open(versionfile_abs, "r") as fobj:
for line in fobj:
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if "refnames" not in keywords:
raise NotThisMethod("Short version file found")
date = keywords.get("date")
if date is not None:
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = {r.strip() for r in refnames.strip("()").split(",")}
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = {r[len(TAG) :] for r in refs if r.startswith(TAG)}
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = {r for r in refs if re.search(r"\d", r)}
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix) :]
# Filter out refs that exactly match prefix or that don't start
# with a number once the prefix is stripped (mostly a concern
# when prefix is '')
if not re.match(r"\d", r):
continue
if verbose:
print("picking %s" % r)
return {
"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": None,
"date": date,
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {
"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": "no suitable tags",
"date": None,
}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, runner=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
_, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = runner(
GITS,
[
"describe",
"--tags",
"--dirty",
"--always",
"--long",
"--match",
"%s*" % tag_prefix,
],
cwd=root,
)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"], cwd=root)
# --abbrev-ref was added in git-1.6.3
if rc != 0 or branch_name is None:
raise NotThisMethod("'git rev-parse --abbrev-ref' returned error")
branch_name = branch_name.strip()
if branch_name == "HEAD":
# If we aren't exactly on a branch, pick a branch which represents
# the current commit. If all else fails, we are on a branchless
# commit.
branches, rc = runner(GITS, ["branch", "--contains"], cwd=root)
# --contains was added in git-1.5.4
if rc != 0 or branches is None:
raise NotThisMethod("'git branch --contains' returned error")
branches = branches.split("\n")
# Remove the first line if we're running detached
if "(" in branches[0]:
branches.pop(0)
# Strip off the leading "* " from the list of branches.
branches = [branch[2:] for branch in branches]
if "master" in branches:
branch_name = "master"
elif not branches:
branch_name = None
else:
# Pick the first branch that is returned. Good or bad.
branch_name = branches[0]
pieces["branch"] = branch_name
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[: git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % (
full_tag,
tag_prefix,
)
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix) :]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = runner(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = runner(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip()
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_branch(pieces):
"""TAG[[.dev0]+DISTANCE.gHEX[.dirty]] .
The ".dev0" means not master branch. Note that .dev0 sorts backwards
(a feature branch will appear "older" than the master branch).
Exceptions:
1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0"
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post0.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post0.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post0.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post0.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_post_branch(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] .
The ".dev0" means not master branch.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None,
}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-branch":
rendered = render_pep440_branch(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-post-branch":
rendered = render_pep440_post_branch(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {
"version": rendered,
"full-revisionid": pieces["long"],
"dirty": pieces["dirty"],
"error": None,
"date": pieces.get("date"),
}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for _ in cfg.versionfile_source.split("/"):
root = os.path.dirname(root)
except NameError:
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None,
}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to compute version",
"date": None,
}
|
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from enum import Enum
from pathlib import Path
import json
import logging
import re
import time
from requests import Response
from clouddq.classes.dataplex_entity import DataplexEntity
from clouddq.integration import USER_AGENT_TAG
from clouddq.integration.dataplex.dataplex_client import DataplexClient
from clouddq.integration.gcp_credentials import GcpCredentials
from clouddq.integration.gcs import upload_blob
from clouddq.utils import exponential_backoff
from clouddq.utils import update_dict
logger = logging.getLogger(__name__)
TARGET_SCOPES = [
"https://www.googleapis.com/auth/cloud-platform",
]
DEFAULT_GCS_BUCKET_NAME = "dataplex-clouddq-artifacts-{gcp_dataplex_region}"
class DATAPLEX_TASK_TRIGGER_TYPE(str, Enum):
ON_DEMAND = "ON_DEMAND"
RECURRING = "RECURRING"
class CloudDqDataplexClient:
_client: DataplexClient
gcs_bucket_name: str
def __init__(
self,
gcp_project_id: str | None = None,
gcp_dataplex_lake_name: str | None = None,
gcp_dataplex_region: str | None = None,
gcs_bucket_name: str | None = None,
gcp_credentials: GcpCredentials | None = None,
dataplex_endpoint: str = "https://dataplex.googleapis.com",
) -> None:
if gcs_bucket_name:
self.gcs_bucket_name = gcs_bucket_name
else:
self.gcs_bucket_name = DEFAULT_GCS_BUCKET_NAME.format(
gcp_dataplex_region=gcp_dataplex_region
)
self._client = DataplexClient(
gcp_credentials=gcp_credentials,
gcp_project_id=gcp_project_id,
gcp_dataplex_lake_name=gcp_dataplex_lake_name,
gcp_dataplex_region=gcp_dataplex_region,
dataplex_endpoint=dataplex_endpoint,
)
def create_clouddq_task( # noqa: C901
self,
task_id: str,
clouddq_yaml_spec_file_path: str,
clouddq_run_project_id: str,
clouddq_run_bq_region: str,
clouddq_run_bq_dataset: str,
task_service_account: str,
target_bq_result_project_name: str,
target_bq_result_dataset_name: str,
target_bq_result_table_name: str,
task_trigger_spec_type: DATAPLEX_TASK_TRIGGER_TYPE = DATAPLEX_TASK_TRIGGER_TYPE.ON_DEMAND, # noqa: E501
task_description: str | None = None,
task_labels: dict | None = None,
clouddq_pyspark_driver_path: str | None = None,
clouddq_executable_path: str | None = None,
clouddq_executable_checksum_path: str | None = None,
validate_only: bool = False,
clouddq_pyspark_driver_filename: str = "clouddq_pyspark_driver.py",
enable_experimental_bigquery_entity_uris: bool = True,
enable_experimental_dataplex_gcs_validation: bool = True,
) -> Response:
# Set default CloudDQ PySpark driver path if not manually overridden
clouddq_pyspark_driver_path = self._validate_clouddq_artifact_path(
clouddq_pyspark_driver_path, clouddq_pyspark_driver_filename
)
# Set default CloudDQ executable path if not manually overridden
clouddq_executable_path = self._validate_clouddq_artifact_path(
clouddq_executable_path, "clouddq-executable.zip"
)
# Set default CloudDQ executable checksum path if not manually overridden
clouddq_executable_checksum_path = self._validate_clouddq_artifact_path(
clouddq_executable_checksum_path, "clouddq-executable.zip.hashsum"
)
# Prepare input CloudDQ YAML specs path
clouddq_yaml_spec_file_path = str(clouddq_yaml_spec_file_path)
if clouddq_yaml_spec_file_path[:5] == "gs://":
clouddq_configs_gcs_path = clouddq_yaml_spec_file_path
else:
clouddq_yaml_spec_file_path = Path(clouddq_yaml_spec_file_path)
if clouddq_yaml_spec_file_path.is_file():
upload_blob(
self.gcs_bucket_name,
clouddq_yaml_spec_file_path.name,
str(clouddq_yaml_spec_file_path.name),
)
gcs_uri = (
f"gs://{self.gcs_bucket_name}/{clouddq_yaml_spec_file_path.name}"
)
clouddq_configs_gcs_path = gcs_uri
else:
raise ValueError(
"'clouddq_yaml_spec_file_path' argument "
f"{clouddq_yaml_spec_file_path} "
"must either be a single file (`.yml` or `.zip`) "
"or a GCS path to the `.yml` or `.zip` configs file."
)
# Add user-agent tag as Task label
allowed_user_agent_label = re.sub("[^0-9a-zA-Z]+", "-", USER_AGENT_TAG.lower())
if task_labels:
task_labels["user-agent"] = allowed_user_agent_label
else:
task_labels = {"user-agent": allowed_user_agent_label}
# Prepare CloudDQ execution argumnets
execution_arguments = (
f"clouddq-executable.zip, "
"ALL, "
f"{clouddq_configs_gcs_path}, "
f'--gcp_project_id="{clouddq_run_project_id}", '
f'--gcp_region_id="{clouddq_run_bq_region}", '
f'--gcp_bq_dataset_id="{clouddq_run_bq_dataset}", '
f"--target_bigquery_summary_table="
f'"{target_bq_result_project_name}.'
f"{target_bq_result_dataset_name}."
f'{target_bq_result_table_name}",'
)
# Set experimental flags
if enable_experimental_bigquery_entity_uris:
execution_arguments += " --enable_experimental_bigquery_entity_uris,"
if enable_experimental_dataplex_gcs_validation:
execution_arguments += "--enable_experimental_dataplex_gcs_validation,"
# Prepare Dataplex Task message body for CloudDQ Job
clouddq_post_body = {
"spark": {
"python_script_file": clouddq_pyspark_driver_path,
"file_uris": [
f"{clouddq_executable_path}",
f"{clouddq_executable_checksum_path}",
f"{clouddq_configs_gcs_path}",
],
},
"execution_spec": {
"args": {"TASK_ARGS": execution_arguments},
"service_account": f"{task_service_account}",
},
"trigger_spec": {"type": task_trigger_spec_type},
"description": task_description,
"labels": task_labels,
}
# Set trigger_spec for RECURRING trigger type
if task_trigger_spec_type == DATAPLEX_TASK_TRIGGER_TYPE.RECURRING:
raise NotImplementedError(
f"task_trigger_spec_type {task_trigger_spec_type} not yet supported."
)
response = self._client.create_dataplex_task(
task_id=task_id,
post_body=clouddq_post_body,
validate_only=validate_only,
)
return response
def get_clouddq_task_status(self, task_id: str) -> str:
"""
Get the dataplex task status
:param task_id: dataplex task id
:return: Task status
"""
res = self._client.get_dataplex_task_jobs(task_id)
logger.info(f"Response status code is {res.status_code}")
logger.info(f"Response text is {res.text}")
resp_obj = json.loads(res.text)
if res.status_code == 200:
if (
"jobs" in resp_obj
and len(resp_obj["jobs"]) > 0 # noqa: W503
and "state" in resp_obj["jobs"][0] # noqa: W503
):
task_status = resp_obj["jobs"][0]["state"]
return task_status
else:
return res
def delete_clouddq_task_if_exists(self, task_id: str) -> Response:
"""
List the dataplex task jobs
:param task_id: task id for dataplex task
:return: Response object
"""
get_task_response = self._client.get_dataplex_task(
task_id=task_id,
)
if get_task_response.status_code == 200:
delete_task_response = self._client.delete_dataplex_task(
task_id=task_id,
)
if delete_task_response.status_code == 200:
retry_iteration = 0
get_task_response = self._client.get_dataplex_task(
task_id=task_id,
)
try:
while get_task_response.status_code != 404:
exponential_backoff(retry_iteration)
retry_iteration += 1
get_task_response = self._client.get_dataplex_task(
task_id=task_id,
)
logger.info(f"Successfully deleted Task ID: {task_id}")
return delete_task_response
except RuntimeError as e:
logger.error(
f"Failed to delete Task ID: {task_id} with error: {e}",
exc_info=True,
)
else:
return delete_task_response
else:
return get_task_response
def get_dataplex_lake(self, lake_name: str) -> Response:
return self._client.get_dataplex_lake(lake_name)
def _validate_clouddq_artifact_path(
self, clouddq_artifact_path: str | None, artifact_name: str
) -> str:
if not clouddq_artifact_path:
clouddq_artifact_gcs_path = f"gs://{self.gcs_bucket_name}/{artifact_name}"
else:
clouddq_artifact_path = str(clouddq_artifact_path)
clouddq_artifact_name = clouddq_artifact_path.split("/")[-1]
if not clouddq_artifact_path[:5] == "gs://":
raise ValueError(
f"Artifact path argument for {artifact_name}: "
f"{clouddq_artifact_path} must be a GCS path."
)
elif clouddq_artifact_name != artifact_name:
raise ValueError(
f"Artifact path argument for {artifact_name}: "
f"{clouddq_artifact_path} must end with '{artifact_name}'."
)
else:
clouddq_artifact_gcs_path = clouddq_artifact_path
return clouddq_artifact_gcs_path
def get_dataplex_entity(
self,
zone_id: str,
entity_id: str,
gcp_project_id: str = None,
location_id: str = None,
lake_name: str = None,
) -> DataplexEntity:
logger.debug(f"CloudDqDataplex.get_dataplex_entity() arguments: {locals()}")
params = {"view": "FULL"}
response = self._client.get_entity(
zone_id=zone_id,
entity_id=entity_id,
gcp_project_id=gcp_project_id,
location_id=location_id,
lake_name=lake_name,
params=params,
)
if response.status_code == 200:
return DataplexEntity.from_dict(
entity_id=entity_id, kwargs=json.loads(response.text)
)
else:
raise RuntimeError(
f"Failed to retrieve Dataplex entity: "
f"'/projects/{gcp_project_id}/locations/{location_id}"
f"/lakes/{lake_name}/zones/{zone_id}/entities/{entity_id}':\n {response.text}"
)
def list_dataplex_entities(
self,
zone_id: str,
prefix: str = None,
data_path: str = None,
gcp_project_id: str = None,
location_id: str = None,
lake_name: str = None,
) -> list[DataplexEntity]:
params = {"page_size": 10}
if prefix and data_path:
raise ValueError("Either prefix or datapath should be passed but not both")
if prefix:
params.update({"filter": f"id=starts_with({prefix})"})
if data_path:
params.update({"filter": f"data_path=starts_with({data_path})"})
response_dict = {}
response = self._client.list_entities(
zone_id=zone_id,
params=params,
gcp_project_id=gcp_project_id,
location_id=location_id,
lake_name=lake_name,
)
response_dict.update(response.json())
while "nextPageToken" in response_dict:
time.sleep(3) # to avoid api limit exceed error of 4 calls per 10 sec
next_page_token = response_dict["nextPageToken"]
logger.debug("Getting next page...")
page_token = {"page_token": f"{next_page_token}"}
params.update(page_token)
next_page_response = self._client.list_entities(
zone_id=zone_id,
params=params,
gcp_project_id=gcp_project_id,
location_id=location_id,
lake_name=lake_name,
).json()
logger.debug(f"Next page response {next_page_response}")
if "nextPageToken" not in next_page_response:
del response_dict["nextPageToken"]
response_dict = update_dict(response_dict, next_page_response)
else:
response_dict = update_dict(response_dict, next_page_response)
response_dict["nextPageToken"] = next_page_response["nextPageToken"]
dataplex_entities = []
if "entities" in response_dict:
for entity in response_dict["entities"]:
entity_with_schema = self.get_dataplex_entity(
entity_id=entity["id"],
zone_id=zone_id,
gcp_project_id=gcp_project_id,
location_id=location_id,
lake_name=lake_name,
)
dataplex_entities.append(entity_with_schema)
return dataplex_entities
|
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import itertools
from weakref import ref
from matplotlib.externals import six
from datetime import datetime
import numpy as np
from numpy.testing.utils import (assert_array_equal, assert_approx_equal,
assert_array_almost_equal)
from nose.tools import (assert_equal, assert_not_equal, raises, assert_true,
assert_raises)
import matplotlib.cbook as cbook
import matplotlib.colors as mcolors
from matplotlib.cbook import delete_masked_points as dmp
def test_is_string_like():
y = np.arange(10)
assert_equal(cbook.is_string_like(y), False)
y.shape = 10, 1
assert_equal(cbook.is_string_like(y), False)
y.shape = 1, 10
assert_equal(cbook.is_string_like(y), False)
assert cbook.is_string_like("hello world")
assert_equal(cbook.is_string_like(10), False)
y = ['a', 'b', 'c']
assert_equal(cbook.is_string_like(y), False)
y = np.array(y)
assert_equal(cbook.is_string_like(y), False)
y = np.array(y, dtype=object)
assert cbook.is_string_like(y)
def test_is_sequence_of_strings():
y = ['a', 'b', 'c']
assert cbook.is_sequence_of_strings(y)
y = np.array(y, dtype=object)
assert cbook.is_sequence_of_strings(y)
def test_restrict_dict():
d = {'foo': 'bar', 1: 2}
d1 = cbook.restrict_dict(d, ['foo', 1])
assert_equal(d1, d)
d2 = cbook.restrict_dict(d, ['bar', 2])
assert_equal(d2, {})
d3 = cbook.restrict_dict(d, {'foo': 1})
assert_equal(d3, {'foo': 'bar'})
d4 = cbook.restrict_dict(d, {})
assert_equal(d4, {})
d5 = cbook.restrict_dict(d, set(['foo', 2]))
assert_equal(d5, {'foo': 'bar'})
# check that d was not modified
assert_equal(d, {'foo': 'bar', 1: 2})
class Test_delete_masked_points(object):
def setUp(self):
self.mask1 = [False, False, True, True, False, False]
self.arr0 = np.arange(1.0, 7.0)
self.arr1 = [1, 2, 3, np.nan, np.nan, 6]
self.arr2 = np.array(self.arr1)
self.arr3 = np.ma.array(self.arr2, mask=self.mask1)
self.arr_s = ['a', 'b', 'c', 'd', 'e', 'f']
self.arr_s2 = np.array(self.arr_s)
self.arr_dt = [datetime(2008, 1, 1), datetime(2008, 1, 2),
datetime(2008, 1, 3), datetime(2008, 1, 4),
datetime(2008, 1, 5), datetime(2008, 1, 6)]
self.arr_dt2 = np.array(self.arr_dt)
self.arr_colors = ['r', 'g', 'b', 'c', 'm', 'y']
self.arr_rgba = mcolors.colorConverter.to_rgba_array(self.arr_colors)
@raises(ValueError)
def test_bad_first_arg(self):
dmp('a string', self.arr0)
def test_string_seq(self):
actual = dmp(self.arr_s, self.arr1)
ind = [0, 1, 2, 5]
expected = (self.arr_s2.take(ind), self.arr2.take(ind))
assert_array_equal(actual[0], expected[0])
assert_array_equal(actual[1], expected[1])
def test_datetime(self):
actual = dmp(self.arr_dt, self.arr3)
ind = [0, 1, 5]
expected = (self.arr_dt2.take(ind),
self.arr3.take(ind).compressed())
assert_array_equal(actual[0], expected[0])
assert_array_equal(actual[1], expected[1])
def test_rgba(self):
actual = dmp(self.arr3, self.arr_rgba)
ind = [0, 1, 5]
expected = (self.arr3.take(ind).compressed(),
self.arr_rgba.take(ind, axis=0))
assert_array_equal(actual[0], expected[0])
assert_array_equal(actual[1], expected[1])
def test_allequal():
assert(cbook.allequal([1, 1, 1]))
assert(not cbook.allequal([1, 1, 0]))
assert(cbook.allequal([]))
assert(cbook.allequal(('a', 'a')))
assert(not cbook.allequal(('a', 'b')))
class Test_boxplot_stats(object):
def setup(self):
np.random.seed(937)
self.nrows = 37
self.ncols = 4
self.data = np.random.lognormal(size=(self.nrows, self.ncols),
mean=1.5, sigma=1.75)
self.known_keys = sorted([
'mean', 'med', 'q1', 'q3', 'iqr',
'cilo', 'cihi', 'whislo', 'whishi',
'fliers', 'label'
])
self.std_results = cbook.boxplot_stats(self.data)
self.known_nonbootstrapped_res = {
'cihi': 6.8161283264444847,
'cilo': -0.1489815330368689,
'iqr': 13.492709959447094,
'mean': 13.00447442387868,
'med': 3.3335733967038079,
'fliers': np.array([
92.55467075, 87.03819018, 42.23204914, 39.29390996
]),
'q1': 1.3597529879465153,
'q3': 14.85246294739361,
'whishi': 27.899688243699629,
'whislo': 0.042143774965502923
}
self.known_bootstrapped_ci = {
'cihi': 8.939577523357828,
'cilo': 1.8692703958676578,
}
self.known_whis3_res = {
'whishi': 42.232049135969874,
'whislo': 0.042143774965502923,
'fliers': np.array([92.55467075, 87.03819018]),
}
self.known_res_percentiles = {
'whislo': 0.1933685896907924,
'whishi': 42.232049135969874
}
self.known_res_range = {
'whislo': 0.042143774965502923,
'whishi': 92.554670752188699
}
def test_form_main_list(self):
assert_true(isinstance(self.std_results, list))
def test_form_each_dict(self):
for res in self.std_results:
assert_true(isinstance(res, dict))
def test_form_dict_keys(self):
for res in self.std_results:
keys = sorted(list(res.keys()))
for key in keys:
assert_true(key in self.known_keys)
def test_results_baseline(self):
res = self.std_results[0]
for key in list(self.known_nonbootstrapped_res.keys()):
if key != 'fliers':
assert_statement = assert_approx_equal
else:
assert_statement = assert_array_almost_equal
assert_statement(
res[key],
self.known_nonbootstrapped_res[key]
)
def test_results_bootstrapped(self):
results = cbook.boxplot_stats(self.data, bootstrap=10000)
res = results[0]
for key in list(self.known_bootstrapped_ci.keys()):
assert_approx_equal(
res[key],
self.known_bootstrapped_ci[key]
)
def test_results_whiskers_float(self):
results = cbook.boxplot_stats(self.data, whis=3)
res = results[0]
for key in list(self.known_whis3_res.keys()):
if key != 'fliers':
assert_statement = assert_approx_equal
else:
assert_statement = assert_array_almost_equal
assert_statement(
res[key],
self.known_whis3_res[key]
)
def test_results_whiskers_range(self):
results = cbook.boxplot_stats(self.data, whis='range')
res = results[0]
for key in list(self.known_res_range.keys()):
if key != 'fliers':
assert_statement = assert_approx_equal
else:
assert_statement = assert_array_almost_equal
assert_statement(
res[key],
self.known_res_range[key]
)
def test_results_whiskers_percentiles(self):
results = cbook.boxplot_stats(self.data, whis=[5, 95])
res = results[0]
for key in list(self.known_res_percentiles.keys()):
if key != 'fliers':
assert_statement = assert_approx_equal
else:
assert_statement = assert_array_almost_equal
assert_statement(
res[key],
self.known_res_percentiles[key]
)
def test_results_withlabels(self):
labels = ['Test1', 2, 'ardvark', 4]
results = cbook.boxplot_stats(self.data, labels=labels)
res = results[0]
for lab, res in zip(labels, results):
assert_equal(res['label'], lab)
results = cbook.boxplot_stats(self.data)
for res in results:
assert('label' not in res)
@raises(ValueError)
def test_label_error(self):
labels = [1, 2]
results = cbook.boxplot_stats(self.data, labels=labels)
@raises(ValueError)
def test_bad_dims(self):
data = np.random.normal(size=(34, 34, 34))
results = cbook.boxplot_stats(data)
class Test_callback_registry(object):
def setup(self):
self.signal = 'test'
self.callbacks = cbook.CallbackRegistry()
def connect(self, s, func):
return self.callbacks.connect(s, func)
def is_empty(self):
assert_equal(self.callbacks._func_cid_map, {})
assert_equal(self.callbacks.callbacks, {})
def is_not_empty(self):
assert_not_equal(self.callbacks._func_cid_map, {})
assert_not_equal(self.callbacks.callbacks, {})
def test_callback_complete(self):
# ensure we start with an empty registry
self.is_empty()
# create a class for testing
mini_me = Test_callback_registry()
# test that we can add a callback
cid1 = self.connect(self.signal, mini_me.dummy)
assert_equal(type(cid1), int)
self.is_not_empty()
# test that we don't add a second callback
cid2 = self.connect(self.signal, mini_me.dummy)
assert_equal(cid1, cid2)
self.is_not_empty()
assert_equal(len(self.callbacks._func_cid_map), 1)
assert_equal(len(self.callbacks.callbacks), 1)
del mini_me
# check we now have no callbacks registered
self.is_empty()
def dummy(self):
pass
def test_to_prestep():
x = np.arange(4)
y1 = np.arange(4)
y2 = np.arange(4)[::-1]
xs, y1s, y2s = cbook.pts_to_prestep(x, y1, y2)
x_target = np.asarray([0, 0, 1, 1, 2, 2, 3], dtype='float')
y1_target = np.asarray([0, 1, 1, 2, 2, 3, 3], dtype='float')
y2_target = np.asarray([3, 2, 2, 1, 1, 0, 0], dtype='float')
assert_array_equal(x_target, xs)
assert_array_equal(y1_target, y1s)
assert_array_equal(y2_target, y2s)
xs, y1s = cbook.pts_to_prestep(x, y1)
assert_array_equal(x_target, xs)
assert_array_equal(y1_target, y1s)
def test_to_poststep():
x = np.arange(4)
y1 = np.arange(4)
y2 = np.arange(4)[::-1]
xs, y1s, y2s = cbook.pts_to_poststep(x, y1, y2)
x_target = np.asarray([0, 1, 1, 2, 2, 3, 3], dtype='float')
y1_target = np.asarray([0, 0, 1, 1, 2, 2, 3], dtype='float')
y2_target = np.asarray([3, 3, 2, 2, 1, 1, 0], dtype='float')
assert_array_equal(x_target, xs)
assert_array_equal(y1_target, y1s)
assert_array_equal(y2_target, y2s)
xs, y1s = cbook.pts_to_poststep(x, y1)
assert_array_equal(x_target, xs)
assert_array_equal(y1_target, y1s)
def test_to_midstep():
x = np.arange(4)
y1 = np.arange(4)
y2 = np.arange(4)[::-1]
xs, y1s, y2s = cbook.pts_to_midstep(x, y1, y2)
x_target = np.asarray([0, .5, .5, 1.5, 1.5, 2.5, 2.5, 3], dtype='float')
y1_target = np.asarray([0, 0, 1, 1, 2, 2, 3, 3], dtype='float')
y2_target = np.asarray([3, 3, 2, 2, 1, 1, 0, 0], dtype='float')
assert_array_equal(x_target, xs)
assert_array_equal(y1_target, y1s)
assert_array_equal(y2_target, y2s)
xs, y1s = cbook.pts_to_midstep(x, y1)
assert_array_equal(x_target, xs)
assert_array_equal(y1_target, y1s)
def test_step_fails():
assert_raises(ValueError, cbook._step_validation,
np.arange(12).reshape(3, 4), 'a')
assert_raises(ValueError, cbook._step_validation,
np.arange(12), 'a')
assert_raises(ValueError, cbook._step_validation,
np.arange(12))
assert_raises(ValueError, cbook._step_validation,
np.arange(12), np.arange(3))
def test_grouper():
class dummy():
pass
a, b, c, d, e = objs = [dummy() for j in range(5)]
g = cbook.Grouper()
g.join(*objs)
assert set(list(g)[0]) == set(objs)
assert set(g.get_siblings(a)) == set(objs)
for other in objs[1:]:
assert g.joined(a, other)
g.remove(a)
for other in objs[1:]:
assert not g.joined(a, other)
for A, B in itertools.product(objs[1:], objs[1:]):
assert g.joined(A, B)
def test_grouper_private():
class dummy():
pass
objs = [dummy() for j in range(5)]
g = cbook.Grouper()
g.join(*objs)
# reach in and touch the internals !
mapping = g._mapping
for o in objs:
assert ref(o) in mapping
base_set = mapping[ref(objs[0])]
for o in objs[1:]:
assert mapping[ref(o)] is base_set
|
|
#!/usr/bin/env python2
'''
description: Convert directory with downloaded zipped KNMI ascii files to
netCDF format. Station information is obtained from a csv file.
Creation of the csv file and downloading of the KNMI data is
performed in another script (knmi_getdata.py).
author: Ronald van Haren, NLeSC ([email protected])
licence: Apache 2.0
'''
from numpy import concatenate as npconcatenate
import csv
import os
def read_knmi_data(reference_station):
'''
Calculate or load KNMI reference data:
pickled file exists -> load
pickled file doesn't exist -> calculate
'''
from load_knmi_data import load_knmi_data
import glob
from numpy import sort
from numpy import concatenate
import collections
# generate filename of KNMI station
filenames = sort(glob.glob('KNMI/uurgeg_' + str(reference_station) + '*.zip' ))
# load all csv files in list of dictionaries
dicts = [load_knmi_data(filename).csvdata for filename in filenames]
# merge all dictionaries in a super dictionary
knmi_data = collections.defaultdict(list)
for idx in range(0,len(dicts)):
try:
knmi_data = dict((k, npconcatenate((knmi_data.get(k), dicts[idx].get(k)))) for k in set(knmi_data.keys() + dicts[idx].keys()))
except ValueError:
# cannot concatenate empty arrays
knmi_data = dict((k, dicts[idx].get(k)) for k in dicts[idx].keys())
# return dictionary with all variables/time steps
return knmi_data
def write_combined_data_netcdf(data, stationid, lon, lat, elevation):
'''
description
'''
from netCDF4 import Dataset as ncdf
import netcdftime
from datetime import datetime
from dateutil import tz
from numpy import zeros
from numpy import nan as npnan
from numpy import dtype
import time
ncfile = ncdf('output'+str(stationid)+'.nc', 'w', format='NETCDF4')
# description of the file
ncfile.description = 'KNMI ' + str(stationid)
ncfile.history = 'Created ' + time.ctime(time.time())
# create time dimension
timevar = ncfile.createDimension('time', None)
# create lon/lat dimensions
lonvar = ncfile.createDimension('longitude', 1)
latvar = ncfile.createDimension('latitude', 1)
# elevation
elvar = ncfile.createDimension('elevation', 1)
# inititalize time axis
timeaxis = [int(round(netcdftime.date2num(data['datetime'][idx], units='minutes since 2010-01-01 00:00:00',
calendar='gregorian'))) for idx in range(0,len(data['datetime']))]
# netcdf time variable UTC
timevar = ncfile.createVariable('time', 'i4', ('time',),
zlib=True)
timevar[:] = timeaxis
timevar.units = 'minutes since 2010-01-01 00:00:00'
timevar.calendar = 'gregorian'
timevar.standard_name = 'time'
timevar.long_name = 'time in UTC'
# lon/lat variables
lonvar = ncfile.createVariable('longitude',dtype('float32').char,('longitude',))
lonvar.units = 'degrees_east'
lonvar.axis = 'X'
lonvar.standard_name = 'longitude'
lonvar[:] = lon
latvar = ncfile.createVariable('latitude',dtype('float32').char,('latitude',))
latvar.units = 'degrees_north'
latvar.axis = 'Y'
latvar.standard_name = 'latitude'
latvar[:] = lat
# elevation variable
elvar = ncfile.createVariable('elevation', dtype('float32').char, ('elevation',))
elvar.units = 'meter'
elvar.axis = 'Z'
elvar.standard_name = 'elevation'
elvar[:] = elevation
# create other variables in netcdf file
for variable in data.keys():
if variable not in ['YYYMMDD', 'Time', '<br>', 'datetime', '# STN', None]:
# add variables in netcdf file
# convert strings to npnan if array contains numbers
if True in [is_number(c)
for c in data[variable]]:
data[variable] = [npnan if isinstance(
fitem(c), str) else fitem(c) for c in data[
variable]]
# check if variable is a string
if not isinstance(data[variable][1], str):
# fill variable
variableName = variable
values = ncfile.createVariable(
variableName, type(data[variable][1]),
('time',), zlib=True, fill_value=-999)
else:
# string variables cannot have fill_value
values = ncfile.createVariable(
variable, type(data[variable][1]),
('time',), zlib=True)
try: # fill variable
values[:] = data[variable][:]
except IndexError:
# for strings the syntax is slightly different
values = data[variable][:]
#self.fill_attribute_data()
def fill_attribute_data():
'''
Function that fills the attribute data of the netcdf file
'''
if variable == 'DD':
values.units = 'degrees'
values.standard_name = 'wind direction'
values.long_name = 'mean wind direction during the 10-minute period preceding the time of observation (990=variable)'
elif variable == 'TemperatureF':
values.units = 'F'
values.standard_name = 'air_temperature'
values.long_name = 'air temperature'
else:
pass
def fitem(item):
try:
item = item.strip()
except AttributeError:
pass
try:
item = float(item)
except ValueError:
pass
return item
def is_number(s):
'''
check if the value in the string is a number and return True or False
'''
try:
float(s)
return True
except ValueError:
pass
return False
def load_csv_data(csvfile):
'''
load data csvfile
'''
with open(csvfile, 'r') as csvin:
reader = csv.DictReader(csvin, delimiter=',')
try:
csvdata
except UnboundLocalError:
reader.next()
try:
csvdata = {k.strip(): [fitem(v)] for k, v in
reader.next().items()}
except StopIteration:
pass
current_row = 0
for line in reader:
current_row += 1
if current_row == 0: # header
# skip the header
continue
for k, v in line.items():
if k is not None: # skip over empty fields
k = k.strip()
csvdata[k].append(fitem(v))
return csvdata
if __name__=="__main__":
knmi_csv_info = load_csv_data('knmi_reference_data.csv')
station_ids = [int(x) for x in knmi_csv_info['station_id']]
for station in station_ids:
if os.path.isfile('output' + str(station) + '.nc'):
continue
print (station)
lat = knmi_csv_info['latitude'][station_ids.index(station)]
lon = knmi_csv_info['longitude'][station_ids.index(station)]
elevation = knmi_csv_info['elevation'][station_ids.index(station)]
data = read_knmi_data(station)
write_combined_data_netcdf(data, station, lon, lat, elevation)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for the setup module."""
import logging
import os
import shutil
import tempfile
import unittest
from apache_beam.io.filesystems import FileSystems
from apache_beam.runners.dataflow.internal import dependency
from apache_beam.runners.dataflow.internal import names
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
# Protect against environments where GCS library is not available.
# pylint: disable=wrong-import-order, wrong-import-position
try:
from apitools.base.py.exceptions import HttpError
except ImportError:
HttpError = None
# pylint: enable=wrong-import-order, wrong-import-position
@unittest.skipIf(HttpError is None, 'GCP dependencies are not installed')
class SetupTest(unittest.TestCase):
def update_options(self, options):
setup_options = options.view_as(SetupOptions)
setup_options.sdk_location = ''
google_cloud_options = options.view_as(GoogleCloudOptions)
if google_cloud_options.temp_location is None:
google_cloud_options.temp_location = google_cloud_options.staging_location
def create_temp_file(self, path, contents):
with open(path, 'w') as f:
f.write(contents)
return f.name
def populate_requirements_cache(self, requirements_file, cache_dir):
_ = requirements_file
self.create_temp_file(os.path.join(cache_dir, 'abc.txt'), 'nothing')
self.create_temp_file(os.path.join(cache_dir, 'def.txt'), 'nothing')
def test_no_staging_location(self):
with self.assertRaises(RuntimeError) as cm:
dependency.stage_job_resources(PipelineOptions())
self.assertEqual('The --staging_location option must be specified.',
cm.exception.message)
def test_no_temp_location(self):
staging_dir = tempfile.mkdtemp()
options = PipelineOptions()
google_cloud_options = options.view_as(GoogleCloudOptions)
google_cloud_options.staging_location = staging_dir
self.update_options(options)
google_cloud_options.temp_location = None
with self.assertRaises(RuntimeError) as cm:
dependency.stage_job_resources(options)
self.assertEqual('The --temp_location option must be specified.',
cm.exception.message)
def test_no_main_session(self):
staging_dir = tempfile.mkdtemp()
options = PipelineOptions()
options.view_as(GoogleCloudOptions).staging_location = staging_dir
options.view_as(SetupOptions).save_main_session = False
self.update_options(options)
self.assertEqual(
[],
dependency.stage_job_resources(options))
def test_with_main_session(self):
staging_dir = tempfile.mkdtemp()
options = PipelineOptions()
options.view_as(GoogleCloudOptions).staging_location = staging_dir
options.view_as(SetupOptions).save_main_session = True
self.update_options(options)
self.assertEqual(
[names.PICKLED_MAIN_SESSION_FILE],
dependency.stage_job_resources(options))
self.assertTrue(
os.path.isfile(
os.path.join(staging_dir, names.PICKLED_MAIN_SESSION_FILE)))
def test_default_resources(self):
staging_dir = tempfile.mkdtemp()
options = PipelineOptions()
options.view_as(GoogleCloudOptions).staging_location = staging_dir
self.update_options(options)
self.assertEqual(
[],
dependency.stage_job_resources(options))
def test_with_requirements_file(self):
try:
staging_dir = tempfile.mkdtemp()
requirements_cache_dir = tempfile.mkdtemp()
source_dir = tempfile.mkdtemp()
options = PipelineOptions()
options.view_as(GoogleCloudOptions).staging_location = staging_dir
self.update_options(options)
options.view_as(SetupOptions).requirements_cache = requirements_cache_dir
options.view_as(SetupOptions).requirements_file = os.path.join(
source_dir, dependency.REQUIREMENTS_FILE)
self.create_temp_file(
os.path.join(source_dir, dependency.REQUIREMENTS_FILE), 'nothing')
self.assertEqual(
sorted([dependency.REQUIREMENTS_FILE,
'abc.txt', 'def.txt']),
sorted(dependency.stage_job_resources(
options,
populate_requirements_cache=self.populate_requirements_cache)))
self.assertTrue(
os.path.isfile(
os.path.join(staging_dir, dependency.REQUIREMENTS_FILE)))
self.assertTrue(os.path.isfile(os.path.join(staging_dir, 'abc.txt')))
self.assertTrue(os.path.isfile(os.path.join(staging_dir, 'def.txt')))
finally:
shutil.rmtree(staging_dir)
shutil.rmtree(requirements_cache_dir)
shutil.rmtree(source_dir)
def test_requirements_file_not_present(self):
staging_dir = tempfile.mkdtemp()
with self.assertRaises(RuntimeError) as cm:
options = PipelineOptions()
options.view_as(GoogleCloudOptions).staging_location = staging_dir
self.update_options(options)
options.view_as(SetupOptions).requirements_file = 'nosuchfile'
dependency.stage_job_resources(
options, populate_requirements_cache=self.populate_requirements_cache)
self.assertEqual(
cm.exception.message,
'The file %s cannot be found. It was specified in the '
'--requirements_file command line option.' % 'nosuchfile')
def test_with_requirements_file_and_cache(self):
staging_dir = tempfile.mkdtemp()
source_dir = tempfile.mkdtemp()
options = PipelineOptions()
options.view_as(GoogleCloudOptions).staging_location = staging_dir
self.update_options(options)
options.view_as(SetupOptions).requirements_file = os.path.join(
source_dir, dependency.REQUIREMENTS_FILE)
options.view_as(SetupOptions).requirements_cache = os.path.join(
tempfile.gettempdir(), 'alternative-cache-dir')
self.create_temp_file(
os.path.join(source_dir, dependency.REQUIREMENTS_FILE), 'nothing')
self.assertEqual(
sorted([dependency.REQUIREMENTS_FILE,
'abc.txt', 'def.txt']),
sorted(dependency.stage_job_resources(
options,
populate_requirements_cache=self.populate_requirements_cache)))
self.assertTrue(
os.path.isfile(
os.path.join(staging_dir, dependency.REQUIREMENTS_FILE)))
self.assertTrue(os.path.isfile(os.path.join(staging_dir, 'abc.txt')))
self.assertTrue(os.path.isfile(os.path.join(staging_dir, 'def.txt')))
def test_with_setup_file(self):
staging_dir = tempfile.mkdtemp()
source_dir = tempfile.mkdtemp()
self.create_temp_file(
os.path.join(source_dir, 'setup.py'), 'notused')
options = PipelineOptions()
options.view_as(GoogleCloudOptions).staging_location = staging_dir
self.update_options(options)
options.view_as(SetupOptions).setup_file = os.path.join(
source_dir, 'setup.py')
self.assertEqual(
[dependency.WORKFLOW_TARBALL_FILE],
dependency.stage_job_resources(
options,
# We replace the build setup command because a realistic one would
# require the setuptools package to be installed. Note that we can't
# use "touch" here to create the expected output tarball file, since
# touch is not available on Windows, so we invoke python to produce
# equivalent behavior.
build_setup_args=[
'python', '-c', 'open(__import__("sys").argv[1], "a")',
os.path.join(source_dir, dependency.WORKFLOW_TARBALL_FILE)],
temp_dir=source_dir))
self.assertTrue(
os.path.isfile(
os.path.join(staging_dir, dependency.WORKFLOW_TARBALL_FILE)))
def test_setup_file_not_present(self):
staging_dir = tempfile.mkdtemp()
options = PipelineOptions()
options.view_as(GoogleCloudOptions).staging_location = staging_dir
self.update_options(options)
options.view_as(SetupOptions).setup_file = 'nosuchfile'
with self.assertRaises(RuntimeError) as cm:
dependency.stage_job_resources(options)
self.assertEqual(
cm.exception.message,
'The file %s cannot be found. It was specified in the '
'--setup_file command line option.' % 'nosuchfile')
def test_setup_file_not_named_setup_dot_py(self):
staging_dir = tempfile.mkdtemp()
source_dir = tempfile.mkdtemp()
options = PipelineOptions()
options.view_as(GoogleCloudOptions).staging_location = staging_dir
self.update_options(options)
options.view_as(SetupOptions).setup_file = (
os.path.join(source_dir, 'xyz-setup.py'))
self.create_temp_file(
os.path.join(source_dir, 'xyz-setup.py'), 'notused')
with self.assertRaises(RuntimeError) as cm:
dependency.stage_job_resources(options)
self.assertTrue(
cm.exception.message.startswith(
'The --setup_file option expects the full path to a file named '
'setup.py instead of '))
def override_file_copy(self, expected_from_path, expected_to_dir):
def file_copy(from_path, to_path):
if not from_path.endswith(names.PICKLED_MAIN_SESSION_FILE):
self.assertEqual(expected_from_path, from_path)
self.assertEqual(FileSystems.join(expected_to_dir,
names.DATAFLOW_SDK_TARBALL_FILE),
to_path)
if from_path.startswith('gs://') or to_path.startswith('gs://'):
logging.info('Faking file_copy(%s, %s)', from_path, to_path)
else:
shutil.copyfile(from_path, to_path)
dependency._dependency_file_copy = file_copy
def override_file_download(self, expected_from_url, expected_to_folder):
def file_download(from_url, _):
self.assertEqual(expected_from_url, from_url)
tarball_path = os.path.join(expected_to_folder, 'sdk-tarball')
with open(tarball_path, 'w') as f:
f.write('Some contents.')
return tarball_path
dependency._dependency_file_download = file_download
return os.path.join(expected_to_folder, 'sdk-tarball')
def override_pypi_download(self, expected_from_url, expected_to_folder):
def pypi_download(_):
tarball_path = os.path.join(expected_to_folder, 'sdk-tarball')
with open(tarball_path, 'w') as f:
f.write('Some contents.')
return tarball_path
dependency._download_pypi_sdk_package = pypi_download
return os.path.join(expected_to_folder, 'sdk-tarball')
def test_sdk_location_default(self):
staging_dir = tempfile.mkdtemp()
expected_from_url = 'pypi'
expected_from_path = self.override_pypi_download(
expected_from_url, staging_dir)
self.override_file_copy(expected_from_path, staging_dir)
options = PipelineOptions()
options.view_as(GoogleCloudOptions).staging_location = staging_dir
self.update_options(options)
options.view_as(SetupOptions).sdk_location = 'default'
self.assertEqual(
[names.DATAFLOW_SDK_TARBALL_FILE],
dependency.stage_job_resources(
options,
file_copy=dependency._dependency_file_copy))
def test_sdk_location_local(self):
staging_dir = tempfile.mkdtemp()
sdk_location = tempfile.mkdtemp()
self.create_temp_file(
os.path.join(
sdk_location,
names.DATAFLOW_SDK_TARBALL_FILE),
'contents')
options = PipelineOptions()
options.view_as(GoogleCloudOptions).staging_location = staging_dir
self.update_options(options)
options.view_as(SetupOptions).sdk_location = sdk_location
self.assertEqual(
[names.DATAFLOW_SDK_TARBALL_FILE],
dependency.stage_job_resources(options))
tarball_path = os.path.join(
staging_dir, names.DATAFLOW_SDK_TARBALL_FILE)
with open(tarball_path) as f:
self.assertEqual(f.read(), 'contents')
def test_sdk_location_local_not_present(self):
staging_dir = tempfile.mkdtemp()
sdk_location = 'nosuchdir'
with self.assertRaises(RuntimeError) as cm:
options = PipelineOptions()
options.view_as(GoogleCloudOptions).staging_location = staging_dir
self.update_options(options)
options.view_as(SetupOptions).sdk_location = sdk_location
dependency.stage_job_resources(options)
self.assertEqual(
'The file "%s" cannot be found. Its '
'location was specified by the --sdk_location command-line option.' %
sdk_location,
cm.exception.message)
def test_sdk_location_gcs(self):
staging_dir = tempfile.mkdtemp()
sdk_location = 'gs://my-gcs-bucket/tarball.tar.gz'
self.override_file_copy(sdk_location, staging_dir)
options = PipelineOptions()
options.view_as(GoogleCloudOptions).staging_location = staging_dir
self.update_options(options)
options.view_as(SetupOptions).sdk_location = sdk_location
self.assertEqual(
[names.DATAFLOW_SDK_TARBALL_FILE],
dependency.stage_job_resources(options))
def test_with_extra_packages(self):
staging_dir = tempfile.mkdtemp()
source_dir = tempfile.mkdtemp()
self.create_temp_file(
os.path.join(source_dir, 'abc.tar.gz'), 'nothing')
self.create_temp_file(
os.path.join(source_dir, 'xyz.tar.gz'), 'nothing')
self.create_temp_file(
os.path.join(source_dir, 'xyz2.tar'), 'nothing')
self.create_temp_file(
os.path.join(source_dir, 'whl.whl'), 'nothing')
self.create_temp_file(
os.path.join(source_dir, dependency.EXTRA_PACKAGES_FILE), 'nothing')
options = PipelineOptions()
options.view_as(GoogleCloudOptions).staging_location = staging_dir
self.update_options(options)
options.view_as(SetupOptions).extra_packages = [
os.path.join(source_dir, 'abc.tar.gz'),
os.path.join(source_dir, 'xyz.tar.gz'),
os.path.join(source_dir, 'xyz2.tar'),
os.path.join(source_dir, 'whl.whl'),
'gs://my-gcs-bucket/gcs.tar.gz']
gcs_copied_files = []
def file_copy(from_path, to_path):
if from_path.startswith('gs://'):
gcs_copied_files.append(from_path)
_, from_name = os.path.split(from_path)
if os.path.isdir(to_path):
to_path = os.path.join(to_path, from_name)
self.create_temp_file(to_path, 'nothing')
logging.info('Fake copied GCS file: %s to %s', from_path, to_path)
elif to_path.startswith('gs://'):
logging.info('Faking file_copy(%s, %s)', from_path, to_path)
else:
shutil.copyfile(from_path, to_path)
dependency._dependency_file_copy = file_copy
self.assertEqual(
['abc.tar.gz', 'xyz.tar.gz', 'xyz2.tar', 'whl.whl', 'gcs.tar.gz',
dependency.EXTRA_PACKAGES_FILE],
dependency.stage_job_resources(options))
with open(os.path.join(staging_dir, dependency.EXTRA_PACKAGES_FILE)) as f:
self.assertEqual(['abc.tar.gz\n', 'xyz.tar.gz\n', 'xyz2.tar\n',
'whl.whl\n', 'gcs.tar.gz\n'], f.readlines())
self.assertEqual(['gs://my-gcs-bucket/gcs.tar.gz'], gcs_copied_files)
def test_with_extra_packages_missing_files(self):
staging_dir = tempfile.mkdtemp()
with self.assertRaises(RuntimeError) as cm:
options = PipelineOptions()
options.view_as(GoogleCloudOptions).staging_location = staging_dir
self.update_options(options)
options.view_as(SetupOptions).extra_packages = ['nosuchfile.tar.gz']
dependency.stage_job_resources(options)
self.assertEqual(
cm.exception.message,
'The file %s cannot be found. It was specified in the '
'--extra_packages command line option.' % 'nosuchfile.tar.gz')
def test_with_extra_packages_invalid_file_name(self):
staging_dir = tempfile.mkdtemp()
source_dir = tempfile.mkdtemp()
self.create_temp_file(
os.path.join(source_dir, 'abc.tgz'), 'nothing')
with self.assertRaises(RuntimeError) as cm:
options = PipelineOptions()
options.view_as(GoogleCloudOptions).staging_location = staging_dir
self.update_options(options)
options.view_as(SetupOptions).extra_packages = [
os.path.join(source_dir, 'abc.tgz')]
dependency.stage_job_resources(options)
self.assertEqual(
cm.exception.message,
'The --extra_package option expects a full path ending with ".tar" or '
'".tar.gz" instead of %s' % os.path.join(source_dir, 'abc.tgz'))
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
|
|
import sublime
from os.path import dirname, basename
from .core import packages_with_overrides, log
from .core import PackageListCollectionThread
###---------------------------------------------------------------------------
# When annotation is turned on in the resource browser, this is the annotation
# that is appended to the end of the resources that are currently overridden.
_annotation = " [*Override*]"
###---------------------------------------------------------------------------
class ResourceType():
"""
This class acts as a simple enumeration of the different styles of
resources that you can browse using the other classes in this module.
"""
OVERRIDE = 1
NONOVERRIDE = 2
ALL = 3
###---------------------------------------------------------------------------
class PackageBrowser():
"""
Provide the ability to browse for a package among all known packages. The
browse can be filtered to show all packages (ALL) or only those packages
that contain at least one override (OVERRIDE) as desired. The NONOVERRIDE
option is treated as ALL for the purposes of this class.
If a filter is provided, it will be invoked once for each package in the
list to see if the package should be presented; this trumps the file_type
argument.
"""
def __init__(self, window=None, file_type=ResourceType.ALL, p_filter=None):
self.window = window or sublime.active_window()
self.file_type = file_type
self.p_filter = p_filter
def _get_contents(self, pkg_list):
if self.p_filter:
return [name for name, pkg in pkg_list if self.p_filter(pkg)]
if self.file_type == ResourceType.OVERRIDE:
return packages_with_overrides(pkg_list)
else:
return [name for name, pkg in pkg_list]
def browse(self, pkg_list, on_done):
"""
Allows the user to select a package from the package list provided,
filtering the list to the criteria set in the file_type attribute.
on_done will be called with the name of the package selected, which can
be None if the panel was cancelled or no packages were found.
"""
items = self._get_contents(pkg_list)
if not items:
log("No packages contain resources of the selected type",
status=True, dialog=True)
return on_done(None)
self.window.show_quick_panel(
items=items,
on_select=lambda idx: on_done(pkg_list[items[idx]] if idx >= 0 else None))
###---------------------------------------------------------------------------
class ResourceBrowser():
"""
Provide the ability to browse for a package file among the list of files
contain in a package. The browse can be filtered to allow for the selection
of any resource (ALL), only overrides (OVERRIDE) or only files that are not
overrides (NONOVERRIDE).
The value of the unknown argument indicates if package files whose status
is unknown should appear in the list or not. An unknown file is one which
appears in the unpacked content of the package but doesn't correspond to a
packed file.
Specifying true for annotate_overrides will make overrides have extra data
appended to the end of their names in the quick panel so that the user can
determine that they're overrides. This is only active when the resource
browsing type is ALL.
The browse will use hierarchy if the package content has a structure.
"""
# These keys in the result dictionary tell us what files and folders exist
# at this particular nesting level in the resulting output dictionary.
FILES='files'
FOLDERS='folders'
# In addition to the above, while browsing package content this key is
# added to stack entries to indicate what the current item is at the
# stack entry chosen.
CURRENT='current'
def __init__(self, window=None, file_type=ResourceType.ALL, unknown=True,
annotate_overrides=False):
self.window = window or sublime.active_window()
self.file_type = file_type
self.unknown = unknown
self.cache = {}
self.annotate = annotate_overrides
def _explode_files(self, files):
"""
Explode a list of files that represent packge file contents into a dict
which describes their logical path layout.
The dict has keys that indicate the files and folders stored at this
level, with each folder also being represented as a key that points to
another similar dict for the content at that level.
"""
def handle_file(file_spec, branch):
parts = file_spec.split('/', 1)
if len(parts) == 1:
return branch[self.FILES].append(parts[0])
subdir, remaining_path = parts[0] + '/', parts[1]
if subdir not in branch:
branch[self.FOLDERS].append(subdir)
branch[subdir] = {self.FILES: [], self.FOLDERS: []}
handle_file(remaining_path, branch[subdir])
retVal = {self.FILES: [], self.FOLDERS: []}
for file in sorted(files, key=lambda fn: (dirname(fn), basename(fn))):
handle_file(file, retVal)
return retVal
def _get_pkg_content(self, pkg_info):
"""
Get the browseable contents of the packge given, based on the browse
type. This caches the result in case the user goes out of a package and
then comes back in as part of a larger overall browse operation.
"""
if pkg_info.name not in self.cache:
# Collect all of the contents of the package; if there is a package
# file, that is the cannonical content; otherwise get the list of
# unpacked files.
if pkg_info.package_file():
contents = pkg_info.package_contents()
else:
contents = pkg_info.unpacked_contents()
overrides = pkg_info.override_files(simple=True)
if self.file_type == ResourceType.ALL:
if not self.annotate:
res_list = contents
else:
res_list = contents - overrides
res_list |= {res + _annotation for res in overrides }
else:
if self.file_type == ResourceType.OVERRIDE:
res_list = overrides
else:
res_list = contents - overrides
# Include files of unknown status if required.
if self.unknown:
res_list |= pkg_info.unknown_override_files()
self.cache[pkg_info.name] = self._explode_files(res_list)
return self.cache[pkg_info.name]
def select_item(self, captions, items, prior_text, stack, index):
if index >= 0:
if index == 0 and len(stack) > 0:
items = stack.pop()
return self._display_panel(items, prior_text, stack)
selected = captions[index]
children = items.get(selected, None)
if children is not None:
items[self.CURRENT] = selected
stack.append(items)
return self._display_panel(children, prior_text, stack)
if selected.endswith(_annotation):
selected = selected[:-len(_annotation)]
resource = [entry[self.CURRENT] for entry in stack]
resource.append(selected)
return self.on_done(''.join(resource))
return self.on_done(None)
def _display_panel(self, items, prior_text, stack):
captions = items[self.FOLDERS] + items[self.FILES]
if len(stack) > 0 or self.return_to_pkg:
captions.insert(0, prior_text)
self.window.show_quick_panel(
items=captions,
on_select=lambda index: self.select_item(captions, items, prior_text, stack, index))
def browse(self, pkg_info, return_to_pkg, on_done):
"""
Allows the user to select a resource from the contents of the package
provided, filtering the list to the criteria set in the file_type
attribute.
If return_to_pkg is True, the first item in the list will be an item
that indicates that the user can go up a level; it is up to the caller
to handle what happens with this item is selected however.
on_done will be called with the name of the package selected, which can
be None if the panel was cancelled or no packages were found.
"""
self.on_done = on_done
self.return_to_pkg = return_to_pkg
items = self._get_pkg_content(pkg_info)
if not items:
log("Package '%s' has no resources of the selected type" % pkg_name,
status=True, dialog=True)
return on_done(None)
self._display_panel(items, "..", [])
###---------------------------------------------------------------------------
class PackageResourceBrowser():
"""
Opens a quick panel in the provided window to allow the user to browse for
a package resource of a given type.
Depending on the options provided, the user will be able to browse for a
package or just files within a given package. The list of resources is
filtered by the resource type provided and may annotate existing overrides
depending on the options provided.
on_done is invoked when the user makes a selection and given the package
and resource selected; both will be None if the browse was canceled by the
user.
"""
def __init__(self, pkg_name=None, resource=None, window=None,
file_type=ResourceType.ALL, pkg_list=None, unknown=True,
annotate_overrides=False, p_filter=None, on_done=None):
self.pkg_name = pkg_name
self.resource = resource
self.window = window or sublime.active_window()
self.file_type = file_type
self.pkg_list = pkg_list
self.on_done = on_done
self.cache = {}
self.pkg_browser = PackageBrowser(self.window, self.file_type, p_filter)
self.res_browser = ResourceBrowser(self.window, self.file_type, unknown, annotate_overrides)
def _on_done(self, pkg_info, resource_name):
if self.on_done is not None:
sublime.set_timeout(lambda: self.on_done(pkg_info, resource_name))
def _res_select(self, pkg_info, file):
if file == "..":
return self._start_browse(thread=None)
pkg_info = pkg_info if file is not None else None
self._on_done(pkg_info, file)
def _pkg_select(self, pkg_info, return_to_pkg):
if pkg_info is not None:
if self.resource is not None:
return self._on_done(pkg_info, self.resource)
self.res_browser.browse(pkg_info,
return_to_pkg,
lambda name: self._res_select(pkg_info, name))
else:
self._on_done(None, None)
def _start_browse(self, thread):
if thread is not None:
self.pkg_list = thread.pkg_list
if self.pkg_name is None:
return self.pkg_browser.browse(self.pkg_list, lambda pkg_info: self._pkg_select(pkg_info, True))
if self.pkg_name in self.pkg_list:
return self._pkg_select(self.pkg_list[self.pkg_name], False)
log("Package '%s' does not exist" % self.pkg_name,
status=True, dialog=True)
def browse(self):
"""
Start a browse operation based on the properties given at construction
time or set before the call.
If a package list was pre-supplied, the browse starts immediately;
otherwise a background thread captures the package information first
and then implicitly starts the browse.
The on_done callback will be invoked with the name of the package and
resource selected; both are None if the browse was cancelled.
"""
if self.pkg_list is not None:
return self._start_browse(None)
PackageListCollectionThread(self.window, "Collecting Package List",
lambda thr: self._start_browse(thr),
name_list=self.pkg_name,
get_overrides=True).start()
###---------------------------------------------------------------------------
|
|
from unittest import TestCase
from mock import ANY, Mock, call, mock_open, patch
from ceres import *
def fetch_mock_open_writes(open_mock):
handle = open_mock()
return ''.join([ c[0][0] for c in handle.write.call_args_list])
class ModuleFunctionsTest(TestCase):
@patch('ceres.isdir', new=Mock(return_value=False))
@patch('ceres.CeresTree', new=Mock(spec=CeresTree))
def test_get_tree_with_no_tree(self):
tree = getTree('/graphite/storage/ceres/foo/bar')
self.assertEqual(None, tree)
@patch('ceres.CeresTree', spec=CeresTree)
@patch('ceres.isdir')
def test_get_tree_with_tree_samedir(self, isdir_mock, ceres_tree_mock):
isdir_mock.return_value = True
tree = getTree('/graphite/storage/ceres')
self.assertNotEqual(None, tree)
isdir_mock.assert_called_once_with('/graphite/storage/ceres/.ceres-tree')
ceres_tree_mock.assert_called_once_with('/graphite/storage/ceres')
class TimeSeriesDataTest(TestCase):
def setUp(self):
self.time_series = TimeSeriesData(0, 50, 5, [float(x) for x in xrange(0, 10)])
def test_timestamps_property(self):
self.assertEqual(10, len(self.time_series.timestamps))
self.assertEqual(0, self.time_series.timestamps[0])
self.assertEqual(45, self.time_series.timestamps[-1])
def test_iter_values(self):
values = list(self.time_series)
self.assertEqual(10, len(values))
self.assertEqual((0, 0.0), values[0])
self.assertEqual((45, 9.0), values[-1])
def test_merge_no_missing(self):
# merge only has effect if time series has no gaps
other_series = TimeSeriesData(0, 25, 5, [float(x * x) for x in xrange(1, 6)])
original_values = list(self.time_series)
self.time_series.merge(other_series)
self.assertEqual(original_values, list(self.time_series))
def test_merge_with_empty(self):
new_series = TimeSeriesData(0, 50, 5, [None] * 10)
new_series.merge(self.time_series)
self.assertEqual(list(self.time_series), list(new_series))
def test_merge_with_holes(self):
values = []
for x in xrange(0, 10):
if x % 2 == 0:
values.append(x)
else:
values.append(None)
new_series = TimeSeriesData(0, 50, 5, values)
new_series.merge(self.time_series)
self.assertEqual(list(self.time_series), list(new_series))
class CeresTreeTest(TestCase):
def setUp(self):
with patch('ceres.isdir', new=Mock(return_value=True)):
self.ceres_tree = CeresTree('/graphite/storage/ceres')
@patch('ceres.isdir', new=Mock(return_value=False))
def test_init_invalid(self):
self.assertRaises(ValueError, CeresTree, '/nonexistent_path')
@patch('ceres.isdir', new=Mock(return_value=True))
@patch('ceres.abspath')
def test_init_valid(self, abspath_mock):
abspath_mock.return_value = '/var/graphite/storage/ceres'
tree = CeresTree('/graphite/storage/ceres')
abspath_mock.assert_called_once_with('/graphite/storage/ceres')
self.assertEqual('/var/graphite/storage/ceres', tree.root)
@patch('ceres.isdir', new=Mock(return_value=False))
@patch.object(CeresTree, '__init__')
@patch('os.makedirs')
def test_create_tree_new_dir(self, makedirs_mock, ceres_tree_init_mock):
ceres_tree_init_mock.return_value = None
with patch('__builtin__.open', mock_open()) as open_mock:
CeresTree.createTree('/graphite/storage/ceres')
makedirs_mock.assert_called_once_with('/graphite/storage/ceres/.ceres-tree', DIR_PERMS)
self.assertFalse(open_mock.called)
ceres_tree_init_mock.assert_called_once_with('/graphite/storage/ceres')
@patch('ceres.isdir', new=Mock(return_value=True))
@patch.object(CeresTree, '__init__')
@patch('os.makedirs')
def test_create_tree_existing_dir(self, makedirs_mock, ceres_tree_init_mock):
ceres_tree_init_mock.return_value = None
with patch('__builtin__.open', mock_open()) as open_mock:
CeresTree.createTree('/graphite/storage/ceres')
self.assertFalse(makedirs_mock.called)
self.assertFalse(open_mock.called)
ceres_tree_init_mock.assert_called_once_with('/graphite/storage/ceres')
@patch('ceres.isdir', new=Mock(return_value=True))
@patch.object(CeresTree, '__init__', new=Mock(return_value=None))
@patch('os.makedirs', new=Mock())
def test_create_tree_write_props(self):
props = {
"foo_prop": "foo_value",
"bar_prop": "bar_value"}
with patch('__builtin__.open', mock_open()) as open_mock:
CeresTree.createTree('/graphite/storage/ceres', **props)
for (prop,value) in props.items():
open_mock.assert_any_call(join('/graphite/storage/ceres', '.ceres-tree', prop), 'w')
open_mock.return_value.write.assert_any_call(value)
@patch('ceres.abspath', new=Mock(side_effect=lambda x: x))
def test_get_node_path_clean(self):
result = self.ceres_tree.getNodePath('/graphite/storage/ceres/metric/foo')
self.assertEqual('metric.foo', result)
@patch('ceres.abspath', new=Mock(side_effect=lambda x: x))
def test_get_node_path_trailing_slash(self):
result = self.ceres_tree.getNodePath('/graphite/storage/ceres/metric/foo/')
self.assertEqual('metric.foo', result)
@patch('ceres.abspath', new=Mock(side_effect=lambda x: x))
def test_get_node_path_outside_tree(self):
self.assertRaises(ValueError, self.ceres_tree.getNodePath, '/metric/foo')
@patch('ceres.CeresNode', spec=CeresNode)
def test_get_node_uncached(self, ceres_node_mock):
ceres_node_mock.isNodeDir.return_value = True
result = self.ceres_tree.getNode('metrics.foo')
ceres_node_mock.assert_called_once_with(
self.ceres_tree,
'metrics.foo',
'/graphite/storage/ceres/metrics/foo')
self.assertEqual(result, ceres_node_mock())
@patch('ceres.CeresNode', spec=CeresNode)
@patch('ceres.abspath', new=Mock(side_effect=lambda x: x))
@patch('ceres.glob', new=Mock(side_effect=lambda x: [x]))
def test_find_explicit_metric(self, ceres_node_mock):
ceres_node_mock.isNodeDir.return_value = True
result = list(self.ceres_tree.find('metrics.foo'))
self.assertEqual(1, len(result))
self.assertEqual(result[0], ceres_node_mock())
@patch('ceres.CeresNode', spec=CeresNode)
@patch('ceres.abspath', new=Mock(side_effect=lambda x: x))
@patch('ceres.glob')
def test_find_wildcard(self, glob_mock, ceres_node_mock):
matches = ['foo', 'bar', 'baz']
glob_mock.side_effect = lambda x: [x.replace('*', m) for m in matches]
ceres_node_mock.isNodeDir.return_value = True
result = list(self.ceres_tree.find('metrics.*'))
self.assertEqual(3, len(result))
ceres_node_mock.assert_any_call(self.ceres_tree, 'metrics.foo', ANY)
ceres_node_mock.assert_any_call(self.ceres_tree, 'metrics.bar', ANY)
ceres_node_mock.assert_any_call(self.ceres_tree, 'metrics.baz', ANY)
@patch('ceres.CeresNode', spec=CeresNode)
@patch('ceres.abspath', new=Mock(side_effect=lambda x: x))
@patch('ceres.glob', new=Mock(return_value=[]))
def test_find_wildcard_no_matches(self, ceres_node_mock):
ceres_node_mock.isNodeDir.return_value = False
result = list(self.ceres_tree.find('metrics.*'))
self.assertEqual(0, len(result))
self.assertFalse(ceres_node_mock.called)
@patch('ceres.CeresNode', spec=CeresNode)
@patch('ceres.abspath', new=Mock(side_effect=lambda x: x))
@patch('ceres.glob', new=Mock(side_effect=lambda x: [x]))
def test_find_metric_with_interval(self, ceres_node_mock):
ceres_node_mock.isNodeDir.return_value = True
ceres_node_mock.return_value.hasDataForInterval.return_value = False
result = list(self.ceres_tree.find('metrics.foo', 0, 1000))
self.assertEqual(0, len(result))
ceres_node_mock.return_value.hasDataForInterval.assert_called_once_with(0, 1000)
@patch('ceres.CeresNode', spec=CeresNode)
@patch('ceres.abspath', new=Mock(side_effect=lambda x: x))
@patch('ceres.glob', new=Mock(side_effect=lambda x: [x]))
def test_find_metric_with_interval_not_found(self, ceres_node_mock):
ceres_node_mock.isNodeDir.return_value = True
ceres_node_mock.return_value.hasDataForInterval.return_value = True
result = list(self.ceres_tree.find('metrics.foo', 0, 1000))
self.assertEqual(result[0], ceres_node_mock())
ceres_node_mock.return_value.hasDataForInterval.assert_called_once_with(0, 1000)
def test_store_invalid_node(self):
with patch.object(self.ceres_tree, 'getNode', new=Mock(return_value=None)):
datapoints = [(100, 1.0)]
self.assertRaises(NodeNotFound, self.ceres_tree.store, 'metrics.foo', datapoints)
@patch('ceres.CeresNode', spec=CeresNode)
def test_store_valid_node(self, ceres_node_mock):
datapoints = [(100, 1.0)]
self.ceres_tree.store('metrics.foo', datapoints)
ceres_node_mock.assert_called_once_with(self.ceres_tree, 'metrics.foo', ANY)
ceres_node_mock.return_value.write.assert_called_once_with(datapoints)
def fetch_invalid_node(self):
with patch.object(self.ceres_tree, 'getNode', new=Mock(return_value=None)):
self.assertRaises(NodeNotFound, self.ceres_tree.fetch, 'metrics.foo')
@patch('ceres.CeresNode', spec=CeresNode)
def fetch_metric(self, ceres_node_mock):
read_mock = ceres_node_mock.return_value.read
read_mock.return_value = Mock(spec=TimeSeriesData)
result = self.ceres_tree.fetch('metrics.foo', 0, 1000)
ceres_node_mock.assert_called_once_with(self.ceres_tree, 'metrics.foo', ANY)
read_mock.assert_called_once_with(0, 1000)
self.assertEqual(Mock(spec=TimeSeriesData), result)
class CeresNodeTest(TestCase):
def setUp(self):
with patch('ceres.isdir', new=Mock(return_value=True)):
with patch('ceres.exists', new=Mock(return_value=True)):
self.ceres_tree = CeresTree('/graphite/storage/ceres')
self.ceres_node = CeresNode(self.ceres_tree, 'sample_metric', '/graphite/storage/ceres/sample_metric')
self.ceres_node.timeStep = 60
slice_configs = [
( 1200, 1800, 60 ),
( 600, 1200, 60 )]
self.ceres_slices = []
for start, end, step in slice_configs:
slice_mock = Mock(spec=CeresSlice)
slice_mock.startTime = start
slice_mock.endTime = end
slice_mock.timeStep = step
self.ceres_slices.append(slice_mock)
def test_init_sets_default_cache_behavior(self):
ceres_node = CeresNode(self.ceres_tree, 'sample_metric', '/graphite/storage/ceres/sample_metric')
self.assertEqual(DEFAULT_SLICE_CACHING_BEHAVIOR, ceres_node.sliceCachingBehavior)
@patch('ceres.os.makedirs', new=Mock())
@patch('ceres.CeresNode.writeMetadata')
def test_create_sets_a_default_timestep(self, write_metadata_mock):
ceres_node = CeresNode.create(self.ceres_tree, 'sample_metric')
write_metadata_mock.assert_called_with(dict(timeStep=DEFAULT_TIMESTEP))
@patch('ceres.os.makedirs', new=Mock())
@patch('ceres.CeresNode.writeMetadata', new=Mock())
def test_create_returns_new_ceres_node(self):
ceres_node = CeresNode.create(self.ceres_tree, 'sample_metric')
self.assertTrue(isinstance(ceres_node, CeresNode))
def test_write_metadata(self):
import json
open_mock = mock_open()
metadata = dict(timeStep=60, aggregationMethod='avg')
with patch('__builtin__.open', open_mock):
self.ceres_node.writeMetadata(metadata)
self.assertEquals(json.dumps(metadata), fetch_mock_open_writes(open_mock))
def test_read_metadata_sets_timestep(self):
import json
metadata = dict(timeStep=60, aggregationMethod='avg')
json_metadata = json.dumps(metadata)
open_mock = mock_open(read_data=json_metadata)
with patch('__builtin__.open', open_mock):
self.ceres_node.readMetadata()
open_mock().read.assert_called_once()
self.assertEqual(60, self.ceres_node.timeStep)
def test_set_slice_caching_behavior_validates_names(self):
self.ceres_node.setSliceCachingBehavior('none')
self.assertEquals('none', self.ceres_node.sliceCachingBehavior)
self.ceres_node.setSliceCachingBehavior('all')
self.assertEquals('all', self.ceres_node.sliceCachingBehavior)
self.ceres_node.setSliceCachingBehavior('latest')
self.assertEquals('latest', self.ceres_node.sliceCachingBehavior)
self.assertRaises(ValueError, self.ceres_node.setSliceCachingBehavior, 'foo')
# Assert unchanged
self.assertEquals('latest', self.ceres_node.sliceCachingBehavior)
def test_slices_is_a_generator(self):
from types import GeneratorType
self.assertTrue(isinstance(self.ceres_node.slices, GeneratorType))
def test_slices_returns_cached_set_when_behavior_is_all(self):
def mock_slice():
return Mock(spec=CeresSlice)
self.ceres_node.setSliceCachingBehavior('all')
cached_contents = [ mock_slice for c in range(4) ]
self.ceres_node.sliceCache = cached_contents
with patch('ceres.CeresNode.readSlices') as read_slices_mock:
slice_list = list(self.ceres_node.slices)
self.assertFalse(read_slices_mock.called)
self.assertEquals(cached_contents, slice_list)
def test_slices_returns_first_cached_when_behavior_is_latest(self):
self.ceres_node.setSliceCachingBehavior('latest')
cached_contents = Mock(spec=CeresSlice)
self.ceres_node.sliceCache = cached_contents
read_slices_mock = Mock(return_value=[])
with patch('ceres.CeresNode.readSlices', new=read_slices_mock):
slice_iter = self.ceres_node.slices
self.assertEquals(cached_contents, slice_iter.next())
# We should be yielding cached before trying to read
self.assertFalse(read_slices_mock.called)
def test_slices_reads_remaining_when_behavior_is_latest(self):
self.ceres_node.setSliceCachingBehavior('latest')
cached_contents = Mock(spec=CeresSlice)
self.ceres_node.sliceCache = cached_contents
read_slices_mock = Mock(return_value=[(0,60)])
with patch('ceres.CeresNode.readSlices', new=read_slices_mock):
slice_iter = self.ceres_node.slices
slice_iter.next()
# *now* we expect to read from disk
try:
while True:
slice_iter.next()
except StopIteration:
pass
read_slices_mock.assert_called_once_with()
def test_slices_reads_from_disk_when_behavior_is_none(self):
self.ceres_node.setSliceCachingBehavior('none')
read_slices_mock = Mock(return_value=[(0,60)])
with patch('ceres.CeresNode.readSlices', new=read_slices_mock):
slice_iter = self.ceres_node.slices
slice_iter.next()
read_slices_mock.assert_called_once_with()
def test_slices_reads_from_disk_when_cache_empty_and_behavior_all(self):
self.ceres_node.setSliceCachingBehavior('all')
read_slices_mock = Mock(return_value=[(0,60)])
with patch('ceres.CeresNode.readSlices', new=read_slices_mock):
slice_iter = self.ceres_node.slices
slice_iter.next()
read_slices_mock.assert_called_once_with()
def test_slices_reads_from_disk_when_cache_empty_and_behavior_latest(self):
self.ceres_node.setSliceCachingBehavior('all')
read_slices_mock = Mock(return_value=[(0,60)])
with patch('ceres.CeresNode.readSlices', new=read_slices_mock):
slice_iter = self.ceres_node.slices
slice_iter.next()
read_slices_mock.assert_called_once_with()
@patch('ceres.exists', new=Mock(return_value=False))
def test_read_slices_raises_when_node_doesnt_exist(self):
self.assertRaises(NodeDeleted, self.ceres_node.readSlices)
@patch('ceres.exists', new=Mock(return_Value=True))
def test_read_slices_ignores_not_slices(self):
listdir_mock = Mock(return_value=['[email protected]', '[email protected]', 'foo'])
with patch('ceres.os.listdir', new=listdir_mock):
self.assertEquals(2, len(self.ceres_node.readSlices()))
@patch('ceres.exists', new=Mock(return_Value=True))
def test_read_slices_parses_slice_filenames(self):
listdir_mock = Mock(return_value=['[email protected]', '[email protected]'])
with patch('ceres.os.listdir', new=listdir_mock):
slice_infos = self.ceres_node.readSlices()
self.assertTrue((0,60) in slice_infos)
self.assertTrue((0,300) in slice_infos)
@patch('ceres.exists', new=Mock(return_Value=True))
def test_read_slices_reverse_sorts_by_time(self):
listdir_mock = Mock(return_value=[
'[email protected]',
'[email protected]',
'[email protected]',
'[email protected]',
'[email protected]'])
with patch('ceres.os.listdir', new=listdir_mock):
slice_infos = self.ceres_node.readSlices()
slice_timestamps = [ s[0] for s in slice_infos ]
self.assertEqual([600,320,120,0,0], slice_timestamps)
def test_no_data_exists_if_no_slices_exist(self):
with patch('ceres.CeresNode.readSlices', new=Mock(return_value=[])):
self.assertFalse(self.ceres_node.hasDataForInterval(0,60))
def test_no_data_exists_if_no_slices_exist_and_no_time_specified(self):
with patch('ceres.CeresNode.readSlices', new=Mock(return_value=[])):
self.assertFalse(self.ceres_node.hasDataForInterval(None,None))
def test_data_exists_if_slices_exist_and_no_time_specified(self):
with patch('ceres.CeresNode.slices', new=self.ceres_slices):
self.assertTrue(self.ceres_node.hasDataForInterval(None,None))
def test_data_exists_if_slice_covers_interval_completely(self):
with patch('ceres.CeresNode.slices', new=[self.ceres_slices[0]]):
self.assertTrue(self.ceres_node.hasDataForInterval(1200,1800))
def test_data_exists_if_slice_covers_interval_end(self):
with patch('ceres.CeresNode.slices', new=[self.ceres_slices[0]]):
self.assertTrue(self.ceres_node.hasDataForInterval(600, 1260))
def test_data_exists_if_slice_covers_interval_start(self):
with patch('ceres.CeresNode.slices', new=[self.ceres_slices[0]]):
self.assertTrue(self.ceres_node.hasDataForInterval(1740, 2100))
def test_no_data_exists_if_slice_touches_interval_end(self):
with patch('ceres.CeresNode.slices', new=[self.ceres_slices[0]]):
self.assertFalse(self.ceres_node.hasDataForInterval(600, 1200))
def test_no_data_exists_if_slice_touches_interval_start(self):
with patch('ceres.CeresNode.slices', new=[self.ceres_slices[0]]):
self.assertFalse(self.ceres_node.hasDataForInterval(1800, 2100))
def test_compact_returns_empty_if_passed_empty(self):
self.assertEqual([], self.ceres_node.compact([]))
def test_compact_filters_null_values(self):
self.assertEqual([], self.ceres_node.compact([(60,None)]))
def test_compact_rounds_timestamps_down_to_step(self):
self.assertEqual([[(600,0)]], self.ceres_node.compact([(605,0)]))
def test_compact_drops_duplicate_timestamps(self):
datapoints = [ (600, 0), (600, 0) ]
compacted = self.ceres_node.compact(datapoints)
self.assertEqual([[(600, 0)]], compacted)
def test_compact_groups_contiguous_points(self):
datapoints = [ (600, 0), (660, 0), (840,0) ]
compacted = self.ceres_node.compact(datapoints)
self.assertEqual([[(600, 0), (660,0)], [(840,0)]], compacted)
def test_write_noops_if_no_datapoints(self):
with patch('ceres.CeresNode.slices', new=self.ceres_slices):
self.ceres_node.write([])
self.assertFalse(self.ceres_slices[0].write.called)
def test_write_within_first_slice(self):
datapoints = [(1200, 0.0), (1260, 1.0), (1320, 2.0)]
with patch('ceres.CeresNode.slices', new=self.ceres_slices):
self.ceres_node.write(datapoints)
self.ceres_slices[0].write.assert_called_once_with(datapoints)
@patch('ceres.CeresSlice.create')
def test_write_within_first_slice_doesnt_create(self, slice_create_mock):
datapoints = [(1200, 0.0), (1260, 1.0), (1320, 2.0)]
with patch('ceres.CeresNode.slices', new=self.ceres_slices):
self.ceres_node.write(datapoints)
self.assertFalse(slice_create_mock.called)
@patch('ceres.CeresSlice.create', new=Mock())
def test_write_within_first_slice_with_gaps(self):
datapoints = [ (1200,0.0), (1320,2.0) ]
with patch('ceres.CeresNode.slices', new=self.ceres_slices):
self.ceres_node.write(datapoints)
# sorted most recent first
calls = [call.write([datapoints[1]]), call.write([datapoints[0]])]
self.ceres_slices[0].assert_has_calls(calls)
@patch('ceres.CeresSlice.create', new=Mock())
def test_write_within_previous_slice(self):
datapoints = [ (720,0.0), (780,2.0) ]
with patch('ceres.CeresNode.slices', new=self.ceres_slices):
self.ceres_node.write(datapoints)
# 2nd slice has this range
self.ceres_slices[1].write.assert_called_once_with(datapoints)
@patch('ceres.CeresSlice.create')
def test_write_within_previous_slice_doesnt_create(self, slice_create_mock):
datapoints = [ (720,0.0), (780,2.0) ]
with patch('ceres.CeresNode.slices', new=self.ceres_slices):
self.ceres_node.write(datapoints)
self.assertFalse(slice_create_mock.called)
@patch('ceres.CeresSlice.create', new=Mock())
def test_write_within_previous_slice_with_gaps(self):
datapoints = [ (720,0.0), (840,2.0) ]
with patch('ceres.CeresNode.slices', new=self.ceres_slices):
self.ceres_node.write(datapoints)
calls = [call.write([datapoints[1]]), call.write([datapoints[0]])]
self.ceres_slices[1].assert_has_calls(calls)
@patch('ceres.CeresSlice.create', new=Mock())
def test_write_across_slice_boundaries(self):
datapoints = [ (1080,0.0), (1140,1.0), (1200, 2.0), (1260, 3.0) ]
with patch('ceres.CeresNode.slices', new=self.ceres_slices):
self.ceres_node.write(datapoints)
self.ceres_slices[0].write.assert_called_once_with(datapoints[2:4])
self.ceres_slices[1].write.assert_called_once_with(datapoints[0:2])
@patch('ceres.CeresSlice.create')
def test_write_before_earliest_slice_creates_new(self, slice_create_mock):
datapoints = [ (300, 0.0) ]
with patch('ceres.CeresNode.slices', new=self.ceres_slices):
self.ceres_node.write(datapoints)
slice_create_mock.assert_called_once_with(self.ceres_node, 300, 60)
@patch('ceres.CeresSlice.create')
def test_write_before_earliest_slice_writes_to_new_one(self, slice_create_mock):
datapoints = [ (300, 0.0) ]
with patch('ceres.CeresNode.slices', new=self.ceres_slices):
self.ceres_node.write(datapoints)
slice_create_mock.return_value.write.assert_called_once_with(datapoints)
@patch('ceres.CeresSlice.create')
def test_create_during_write_clears_slice_cache(self, slice_create_mock):
self.ceres_node.setSliceCachingBehavior('all')
self.ceres_node.sliceCache = self.ceres_slices
datapoints = [ (300, 0.0) ]
with patch('ceres.CeresNode.slices', new=self.ceres_slices):
self.ceres_node.write(datapoints)
self.assertEquals(None, self.ceres_node.sliceCache)
class CeresSliceTest(TestCase):
def setUp(self):
with patch('ceres.isdir', new=Mock(return_value=True)):
with patch('ceres.exists', new=Mock(return_value=True)):
self.ceres_tree = CeresTree('/graphite/storage/ceres')
self.ceres_node = CeresNode(self.ceres_tree, 'sample_metric', '/graphite/storage/ceres/sample_metric')
def test_init_sets_fspath_name(self):
ceres_slice = CeresSlice(self.ceres_node, 0, 60)
self.assertTrue(ceres_slice.fsPath.endswith('[email protected]'))
|
|
#!/usr/bin/env python
"""Test basic features of the core web.claims application.
Create Segments. Create Loops with Segments. Create Messages with Loops and
Segments.
"""
from __future__ import print_function
from django.test import TestCase
import os.path
from X12.parse import SegmentToken
from web.claims.models import Factory, X12Segment, X12Loop, X12Message
from web.claims.models import ClaimGroup, GroupStatus
import extras.standardSegment
from web.claims.parse import parser
import json
class SegmentTest(TestCase):
"""Builds Segment and assures that various getters and setters work."""
def setUp( self ):
segText= "ISA*03*gjohnson2 *01*0000000000*ZZ*0000000Eliginet*ZZ*BLUECROSS BLUES*071015*0903*U*00401*000242835*0*P*:"
segToken= SegmentToken( segText.split("*") )
self.isa= Factory.makeSegment( segToken )
self.isa.save()
self.id= self.isa.id
def tearDown( self ):
X12Segment.objects.all().delete()
X12Loop.objects.all().delete()
X12Message.objects.all().delete()
def testPositionalContent( self ):
#print( "testPositionalContent" )
#print( self.isa )
self.assertEquals( "ISA", self.isa.getByPos(0) )
self.assertEquals( "03", self.isa.getByPos(1) )
self.assertEquals( ":", self.isa.getByPos(16) )
def testNamedContent( self ):
self.isa.bind( extras.standardSegment.isa )
self.assertEquals( "03", self.isa.getByName( "ISA01" ))
self.assertEquals( "000242835", self.isa.getByName( "ISA13"))
def testSetter( self ):
isa= X12Segment.objects.get( pk=self.id )
isa.setByPos( 1, "ZZ" )
isa.save()
reFetch= X12Segment.objects.get( pk=self.id )
self.assertEquals( "ZZ", reFetch.getByPos(1) )
class LoopTest( TestCase ):
"""Builds Loop and Segment. Loop consists of mixed Segments and Loops."""
def setUp( self ):
isaText= "ISA*03*gjohnson2 *01*0000000000*ZZ*0000000Eliginet*ZZ*BLUECROSS BLUES*071015*0903*U*00401*000242835*0*P*:"
isaToken= SegmentToken( isaText.split("*") )
ieaText= "IEA*1*000242835"
ieaToken= SegmentToken( ieaText.split("*") )
self.isa= Factory.makeSegment( isaToken )
self.iea= Factory.makeSegment( ieaToken )
self.gs_loop= Factory.makeLoop( "GS_LOOP" )
self.isa_loop= Factory.makeLoop( "ISA_LOOP", self.isa, self.gs_loop, self.iea)
self.id= self.isa_loop.id
def tearDown( self ):
X12Segment.objects.all().delete()
X12Loop.objects.all().delete()
X12Message.objects.all().delete()
def testLoop( self ):
#print( "testLoop" )
# Get the test loop from the DB
isa_loop= X12Loop.objects.get(pk=self.id)
#print( isa_loop )
for loop in isa_loop.subloop_set.all():
#print( loop.name, loop.kind, loop.thePosition, loop.theParent.name )
pass
children= isa_loop.children()
self.assertEquals( 3, len(children) )
self.assertEquals( self.isa, children[0] )
self.assertEquals( self.gs_loop, children[1] )
self.assertEquals( self.iea, children[2] )
class MessageTest( TestCase ):
"""Builds Message of Loops and Segments."""
def setUp( self ):
self.sample_278="ISA*03*gjohnson2 *01*0000000000*ZZ*0000000Eliginet*ZZ*BLUECROSS BLUES*071015*0903*U*00401*000242835*0*P*:~GS*HI*0000000Eliginet*BLUECROSS BLUES*20071015*0903*241935*X*004010X094A1~ST*278*242835~BHT*0078*13*GXEDWLXQYKII*20071015*0903~HL*1**20*1~NM1*X3*2*BLUECROSS BLUESHIELD OF WESTERN NEW*****PI*55204~HL*2*1*21*1~NM1*1P*1*SHEIKH*ZIA****24*161590688~REF*ZH*000524454008~N3*4039 ROUTE 219*SUITE 102~N4*SALAMANCA*NY*14779~HL*3*2*22*1~HI*BF:706.1~NM1*IL*1*burton*amanda****MI*yjw88034076701~DMG*D8*19900815*U~HL*4*3*19*1~NM1*SJ*1*JAREMKO*WILLIAM****24*161482964~REF*ZH*000511127003~N3*2646 WEST STATE STREET*SUITE 405~N4*OLEAN*NY*147600000~HL*5*4*SS*0~TRN*1*1*9999955204~UM*SC*I*******Y~DTP*472*RD8*20071015-20080415~HSD*VS*30~SE*24*242835~GE*1*241935~IEA*1*000242835~"
segments= [ SegmentToken(s.split('*')) for s in self.sample_278.split("~") ]
self.msg= Factory.makeMessage( "278" )
self.loop_isa= Factory.makeLoop( "LOOP_ISA" )
self.loop_isa.addChild( Factory.makeSegment( segments[0] ) )
self.loop_gs= Factory.makeLoop( "LOOP_GS" )
self.loop_gs.addChild( Factory.makeSegment( segments[1] ) )
self.loop_st= Factory.makeLoop( "LOOP_ST" )
self.loop_st.addChild( Factory.makeSegment( segments[2] ) )
self.loop_st.addChild( Factory.makeSegment( segments[3] ) )
self.loop_st.addChild( Factory.makeSegment( segments[-4] ) )
self.loop_gs.addChild( self.loop_st )
self.loop_gs.addChild( Factory.makeSegment( segments[-3] ) )
self.loop_isa.addChild( self.loop_gs )
self.loop_isa.addChild( Factory.makeSegment( segments[-2] ) )
self.msg.addChild( self.loop_isa )
def tearDown( self ):
X12Segment.objects.all().delete()
X12Loop.objects.all().delete()
X12Message.objects.all().delete()
def testMessage( self ):
#print( self.msg )
self.assertEquals( "278", self.msg.name )
self.assertEquals( 1, self.msg.x12loop_set.count() )
def testSegments( self ):
segs= self.msg.segs()
self.assertEquals( 7, len(segs) )
self.assertEquals( "ISA", segs[0].getByPos(0) )
self.assertEquals( "GS", segs[1].getByPos(0) )
self.assertEquals( "ST", segs[2].getByPos(0) )
self.assertEquals( "BHT", segs[3].getByPos(0) )
self.assertEquals( "SE", segs[4].getByPos(0) )
self.assertEquals( "GE", segs[5].getByPos(0) )
self.assertEquals( "IEA", segs[6].getByPos(0) )
def testMarshall( self ):
txt= self.msg.marshall()
start, mid, end = txt.partition( "~SE" )
self.assertTrue( self.sample_278.startswith( start+"~" ) )
self.assertTrue( self.sample_278.endswith( "SE"+end ) )
class ParseTest( TestCase ):
"""Uses the manually-build :samp:`278` parser to parse and persist a complete message."""
def setUp( self ):
import os
#print( "Working Dir", os.getcwd() )
# The example_278.py builds parse_278
# execfile("../tests/example_278.py")
from test.example_278 import parse_278
self.parser= parse_278
self.mssgs= []
def tearDown( self ):
self.gs.delete()
self.group.delete()
X12Segment.objects.all().delete()
X12Loop.objects.all().delete()
X12Message.objects.all().delete()
def testParseToSave( self ):
self.gs, _ = GroupStatus.objects.get_or_create( name='Base' )
self.group, status = ClaimGroup.objects.get_or_create(
name= "Test Group",
description= "Sample Claims",
status= GroupStatus.objects.get( name='Base' ),
owner= "me" )
count= 0
source= {}
with open( os.path.join("test","TEST 278_13 TXNS.txt"),"r") as example:
for msg in example:
msg= msg.strip()
if len(msg) == 0: continue
x12msg= self.parser.unmarshall( msg, Factory )
x12msg.name= "MSG %d" % ( count, )
x12msg.group= self.group
x12msg.save()
source[ x12msg.id ]= msg
count += 1
self.mssgs.append( x12msg )
for msgId, msgTxt in source.items():
dbMsg= X12Message.objects.get( pk=msgId )
self.assertEquals( msgTxt, dbMsg.marshall() )
# Locate unique id's (ISA13)
isaLoop= dbMsg.x12loop_set.get( name="ISA" )
isaSeg= isaLoop.child("segment","ISA")[0]
self.assertTrue( isaSeg.getByPos(13) in ( '000032679','000242835','000242836','000032674','000242839' ), "Not %r" % ( isaSeg.getByPos(13) , ) )
# XXX - should look at structure more closely, but source definition
# for the 278 parser is not structured well.
x12msg= X12Message.objects.get( name="MSG 0" )
print( x12msg )
class TestWS( TestCase ):
def setUp( self ):
sample= os.path.join("test","837-example.txt")
with open( sample, "rU" ) as claims:
self.claimText= "".join(x.strip() for x in claims)
msg= parser.unmarshall( self.claimText, Factory )
msg.name= '837_example'
msg.save()
def test_load( self ):
properties = {
'TYPE': '', # a :class:`ClaimType`
'SECONDARY': '', # a :class:`SecondaryCoverage`
'LOCATION': '', # a :class:`Location`
'BENEFIT': '', # a :class:`Benefit`
'TYPE-OF-SERVICE': '', # a :class:`TypeOfService`
}
prop_json= json.dumps( properties )
constraints = {
'GENDER': '', #
'AGE-FROM': 0, #
'AGE-TO': 199, #
}
cons_json= json.dumps( constraints )
params= {'claim':self.claimText, 'claim_id':'test_load',
'properties':prop_json, 'constraints':cons_json}
response= self.client.post( "/claim/load/", params )
#print( response.content )
self.assertEquals( 201, response.status_code )
object= json.loads( response.content )
self.assertEqual( "test_load", object['claim_id'] )
self.assertEqual( self.claimText, object['claim'] )
def test_fetch( self ):
response= self.client.get( "/claim/837_example/" )
self.assertEquals( 200, response.status_code )
object= json.loads( response.content )
self.assertEqual( "837_example", object['claim_id'] )
self.assertIsNone( object['message'] )
self.assertEqual( self.claimText, object['claim'] )
from test.test_navigation import TestNavigationX12
import web.claims.models
class TestNavigationClaims( TestNavigationX12 ):
"""Test message structure navigation using :mod:`web.claims` class definitions
instead of the X12 package class definitions.
This plugs in the :class:`web.claims.models.Factory`.
"""
factory= web.claims.models.Factory
|
|
#!/usr/bin/env python
"""ug2html.py -- Creates HTML version of Robot Framework User Guide
Usage: ug2html.py [ cr(eate) | dist | zip ]
create .. Creates the user guide so that it has relative links to images,
library docs, etc. Mainly used to test how changes look in HTML.
dist .... Creates the user guide under 'robotframework-userguide-<version>'
directory and also copies all needed images and other link targets
there. Also compiles library docs to ensure latest versions are
included. The created output directory can thus be distributed
independently.
zip ..... Uses 'dist' to create a stand-alone distribution and then packages
it into 'robotframework-userguide-<version>.zip'
Version number to use is got automatically from 'src/robot/version.py' file
created by 'package.py'.
"""
import os
import sys
import shutil
# First part of this file is Pygments configuration and actual
# documentation generation follows it.
#
#
# Pygments configuration
# ----------------------
#
# This code is from 'external/rst-directive.py' file included in Pygments 0.9
# distribution. For more details see http://pygments.org/docs/rstdirective/
#
"""
The Pygments MoinMoin Parser
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This fragment is a Docutils_ 0.4 directive that renders source code
(to HTML only, currently) via Pygments.
To use it, adjust the options below and copy the code into a module
that you import on initialization. The code then automatically
registers a ``sourcecode`` directive that you can use instead of
normal code blocks like this::
.. sourcecode:: python
My code goes here.
If you want to have different code styles, e.g. one with line numbers
and one without, add formatters with their names in the VARIANTS dict
below. You can invoke them instead of the DEFAULT one by using a
directive option::
.. sourcecode:: python
:linenos:
My code goes here.
Look at the `directive documentation`_ to get all the gory details.
.. _Docutils: http://docutils.sf.net/
.. _directive documentation:
http://docutils.sourceforge.net/docs/howto/rst-directives.html
:copyright: 2007 by Georg Brandl.
:license: BSD, see LICENSE for more details.
"""
# Options
# ~~~~~~~
# Set to True if you want inline CSS styles instead of classes
INLINESTYLES = False
from pygments.formatters import HtmlFormatter
# The default formatter
DEFAULT = HtmlFormatter(noclasses=INLINESTYLES)
# Add name -> formatter pairs for every variant you want to use
VARIANTS = {
# 'linenos': HtmlFormatter(noclasses=INLINESTYLES, linenos=True),
}
from docutils import nodes
from docutils.parsers.rst import directives
from pygments import highlight
from pygments.lexers import get_lexer_by_name, TextLexer
def pygments_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
try:
if arguments[0] == 'robotframework':
try:
from robotframeworklexer import RobotFrameworkLexer
lexer = RobotFrameworkLexer()
except ImportError:
sys.exit('RobotFrameworkLexer needed for syntax highlighting '
'until Pygments version with RF 2.9 syntax is released.\n\n'
'\tpip install -U robotframeworklexer')
else:
lexer = get_lexer_by_name(arguments[0])
except ValueError:
# no lexer found - use the text one instead of an exception
lexer = TextLexer()
# take an arbitrary option if more than one is given
formatter = options and VARIANTS[options.keys()[0]] or DEFAULT
# possibility to read the content from an external file
filtered = [ line for line in content if line.strip() ]
if len(filtered) == 1:
path = filtered[0].replace('/', os.sep)
if os.path.isfile(path):
content = open(path).read().splitlines()
parsed = highlight(u'\n'.join(content), lexer, formatter)
return [nodes.raw('', parsed, format='html')]
pygments_directive.arguments = (1, 0, 1)
pygments_directive.content = 1
pygments_directive.options = dict([(key, directives.flag) for key in VARIANTS])
directives.register_directive('sourcecode', pygments_directive)
#
# Create the user guide using docutils
#
# This code is based on rst2html.py distributed with docutils
#
CURDIR = os.path.dirname(os.path.abspath(__file__))
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
def create_userguide():
from docutils.core import publish_cmdline
print 'Creating user guide ...'
version, version_file = _update_version()
install_file = _copy_installation_instructions()
description = 'HTML generator for Robot Framework User Guide.'
arguments = ['--time',
'--stylesheet-path', ['src/userguide.css'],
'src/RobotFrameworkUserGuide.rst',
'RobotFrameworkUserGuide.html']
os.chdir(CURDIR)
publish_cmdline(writer_name='html', description=description, argv=arguments)
os.unlink(version_file)
os.unlink(install_file)
ugpath = os.path.abspath(arguments[-1])
print ugpath
return ugpath, version
def _update_version():
version = _get_version()
print 'Version:', version
with open(os.path.join(CURDIR, 'src', 'version.rst'), 'w') as vfile:
vfile.write('.. |version| replace:: %s\n' % version)
return version, vfile.name
def _get_version():
namespace = {}
execfile(os.path.join(CURDIR, '..', '..', 'src', 'robot', 'version.py'),
namespace)
return namespace['get_version']()
def _copy_installation_instructions():
source = os.path.join(CURDIR, '..', '..', 'INSTALL.rst')
target = os.path.join(CURDIR, 'src', 'GettingStarted', 'INSTALL.rst')
include = True
with open(source) as source_file:
with open(target, 'w') as target_file:
for line in source_file:
if 'START USER GUIDE IGNORE' in line:
include = False
if include:
target_file.write(line)
if 'END USER GUIDE IGNORE' in line:
include = True
return target
#
# Create user guide distribution directory
#
def create_distribution():
import re
from urlparse import urlparse
dist = os.path.normpath(os.path.join(CURDIR, '..', '..', 'dist'))
ugpath, version = create_userguide() # we are in doc/userguide after this
outdir = os.path.join(dist, 'robotframework-userguide-%s' % version)
templates = os.path.join(outdir, 'templates')
libraries = os.path.join(outdir, 'libraries')
images = os.path.join(outdir, 'images')
print 'Creating distribution directory ...'
if os.path.exists(outdir):
print 'Removing previous user guide distribution'
shutil.rmtree(outdir)
elif not os.path.exists(dist):
os.mkdir(dist)
print 'Recompiling library docs'
sys.path.insert(0, os.path.join(CURDIR, '..', 'libraries'))
import lib2html
lib2html.create_all()
for dirname in [outdir, templates, libraries, images]:
print "Creating output directory '%s'" % dirname
os.mkdir(dirname)
def replace_links(res):
if not res.group(5):
return res.group(0)
scheme, _, path, _, _, fragment = urlparse(res.group(5))
if scheme or (fragment and not path):
return res.group(0)
replaced_link = '%s %s="%%s/%s"' % (res.group(1), res.group(4),
os.path.basename(path))
if path.startswith('../../templates'):
copy(path, templates)
replaced_link = replaced_link % 'templates'
elif path.startswith('../libraries'):
copy(path, libraries)
replaced_link = replaced_link % 'libraries'
elif path.startswith('src/'):
copy(path, images)
replaced_link = replaced_link % 'images'
else:
raise ValueError('Invalid link target: %s (context: %s)'
% (path, res.group(0)))
print "Modified link '%s' -> '%s'" % (res.group(0), replaced_link)
return replaced_link
def copy(source, dest):
print "Copying '%s' -> '%s'" % (source, dest)
shutil.copy(source, dest)
link_regexp = re.compile('''
(<(a|img)\s+.*?)
(\s+(href|src)="(.*?)"|>)
''', re.VERBOSE | re.DOTALL | re.IGNORECASE)
with open(ugpath) as infile:
content = link_regexp.sub(replace_links, infile.read())
with open(os.path.join(outdir, os.path.basename(ugpath)), 'wb') as outfile:
outfile.write(content)
print os.path.abspath(outfile.name)
return outdir
#
# Create a zip distribution package
#
def create_zip():
ugdir = create_distribution()
print 'Creating zip package ...'
zip_path = zip_distribution(ugdir)
print 'Removing distribution directory', ugdir
shutil.rmtree(ugdir)
print zip_path
def zip_distribution(dirpath):
from zipfile import ZipFile, ZIP_DEFLATED
zippath = os.path.normpath(dirpath) + '.zip'
arcroot = os.path.dirname(dirpath)
with ZipFile(zippath, 'w', compression=ZIP_DEFLATED) as zipfile:
for root, _, files in os.walk(dirpath):
for name in files:
path = os.path.join(root, name)
arcpath = os.path.relpath(path, arcroot)
print "Adding '%s'" % arcpath
zipfile.write(path, arcpath)
return os.path.abspath(zippath)
if __name__ == '__main__':
actions = { 'create': create_userguide, 'cr': create_userguide,
'dist': create_distribution, 'zip': create_zip }
try:
actions[sys.argv[1]](*sys.argv[2:])
except (KeyError, IndexError, TypeError):
print __doc__
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import py4j
class CapturedException(Exception):
def __init__(self, desc, stackTrace, cause=None):
self.desc = desc
self.stackTrace = stackTrace
self.cause = convert_exception(cause) if cause is not None else None
def __str__(self):
return repr(self.desc)
class AnalysisException(CapturedException):
"""
Failed to analyze a SQL query plan.
"""
class ParseException(CapturedException):
"""
Failed to parse a SQL command.
"""
class IllegalArgumentException(CapturedException):
"""
Passed an illegal or inappropriate argument.
"""
class StreamingQueryException(CapturedException):
"""
Exception that stopped a :class:`StreamingQuery`.
"""
class QueryExecutionException(CapturedException):
"""
Failed to execute a query.
"""
class UnknownException(CapturedException):
"""
None of the above exceptions.
"""
def convert_exception(e):
s = e.toString()
stackTrace = '\n\t at '.join(map(lambda x: x.toString(), e.getStackTrace()))
c = e.getCause()
if s.startswith('org.apache.spark.sql.AnalysisException: '):
return AnalysisException(s.split(': ', 1)[1], stackTrace, c)
if s.startswith('org.apache.spark.sql.catalyst.analysis'):
return AnalysisException(s.split(': ', 1)[1], stackTrace, c)
if s.startswith('org.apache.spark.sql.catalyst.parser.ParseException: '):
return ParseException(s.split(': ', 1)[1], stackTrace, c)
if s.startswith('org.apache.spark.sql.streaming.StreamingQueryException: '):
return StreamingQueryException(s.split(': ', 1)[1], stackTrace, c)
if s.startswith('org.apache.spark.sql.execution.QueryExecutionException: '):
return QueryExecutionException(s.split(': ', 1)[1], stackTrace, c)
if s.startswith('java.lang.IllegalArgumentException: '):
return IllegalArgumentException(s.split(': ', 1)[1], stackTrace, c)
return UnknownException(s, stackTrace, c)
def capture_sql_exception(f):
def deco(*a, **kw):
try:
return f(*a, **kw)
except py4j.protocol.Py4JJavaError as e:
converted = convert_exception(e.java_exception)
if not isinstance(converted, UnknownException):
raise converted
else:
raise
return deco
def install_exception_handler():
"""
Hook an exception handler into Py4j, which could capture some SQL exceptions in Java.
When calling Java API, it will call `get_return_value` to parse the returned object.
If any exception happened in JVM, the result will be Java exception object, it raise
py4j.protocol.Py4JJavaError. We replace the original `get_return_value` with one that
could capture the Java exception and throw a Python one (with the same error message).
It's idempotent, could be called multiple times.
"""
original = py4j.protocol.get_return_value
# The original `get_return_value` is not patched, it's idempotent.
patched = capture_sql_exception(original)
# only patch the one used in py4j.java_gateway (call Java API)
py4j.java_gateway.get_return_value = patched
def toJArray(gateway, jtype, arr):
"""
Convert python list to java type array
:param gateway: Py4j Gateway
:param jtype: java type of element in array
:param arr: python type list
"""
jarr = gateway.new_array(jtype, len(arr))
for i in range(0, len(arr)):
jarr[i] = arr[i]
return jarr
def require_minimum_pandas_version():
""" Raise ImportError if minimum version of Pandas is not installed
"""
# TODO(HyukjinKwon): Relocate and deduplicate the version specification.
minimum_pandas_version = "0.23.2"
from distutils.version import LooseVersion
try:
import pandas
have_pandas = True
except ImportError:
have_pandas = False
if not have_pandas:
raise ImportError("Pandas >= %s must be installed; however, "
"it was not found." % minimum_pandas_version)
if LooseVersion(pandas.__version__) < LooseVersion(minimum_pandas_version):
raise ImportError("Pandas >= %s must be installed; however, "
"your version was %s." % (minimum_pandas_version, pandas.__version__))
def require_minimum_pyarrow_version():
""" Raise ImportError if minimum version of pyarrow is not installed
"""
# TODO(HyukjinKwon): Relocate and deduplicate the version specification.
minimum_pyarrow_version = "0.12.1"
from distutils.version import LooseVersion
try:
import pyarrow
have_arrow = True
except ImportError:
have_arrow = False
if not have_arrow:
raise ImportError("PyArrow >= %s must be installed; however, "
"it was not found." % minimum_pyarrow_version)
if LooseVersion(pyarrow.__version__) < LooseVersion(minimum_pyarrow_version):
raise ImportError("PyArrow >= %s must be installed; however, "
"your version was %s." % (minimum_pyarrow_version, pyarrow.__version__))
def require_test_compiled():
""" Raise Exception if test classes are not compiled
"""
import os
import glob
try:
spark_home = os.environ['SPARK_HOME']
except KeyError:
raise RuntimeError('SPARK_HOME is not defined in environment')
test_class_path = os.path.join(
spark_home, 'sql', 'core', 'target', '*', 'test-classes')
paths = glob.glob(test_class_path)
if len(paths) == 0:
raise RuntimeError(
"%s doesn't exist. Spark sql test classes are not compiled." % test_class_path)
class ForeachBatchFunction(object):
"""
This is the Python implementation of Java interface 'ForeachBatchFunction'. This wraps
the user-defined 'foreachBatch' function such that it can be called from the JVM when
the query is active.
"""
def __init__(self, sql_ctx, func):
self.sql_ctx = sql_ctx
self.func = func
def call(self, jdf, batch_id):
from pyspark.sql.dataframe import DataFrame
try:
self.func(DataFrame(jdf, self.sql_ctx), batch_id)
except Exception as e:
self.error = e
raise e
class Java:
implements = ['org.apache.spark.sql.execution.streaming.sources.PythonForeachBatchFunction']
|
|
import time
import unittest
import uuid
import datetime
import cassandra
import numpy as np
from hecuba import config
from hecuba.tools import discrete_token_ranges
from hecuba.storageobj import StorageObj
from storage.api import getByID
from hecuba.IStorage import build_remotely
from ..app.words import Words
class Test2StorageObj(StorageObj):
'''
@ClassField name str
@ClassField age int
'''
pass
class Result(StorageObj):
'''
@ClassField instances dict<<word:str>, numinstances:int>
'''
pass
class TestStorageObj(StorageObj):
'''
@ClassField test dict<<position:int>, text:str>
'''
pass
class TestStorageIndexedArgsObj(StorageObj):
'''
@ClassField test dict<<position:int>, x:float, y:float, z:float>
@Index_on test x,y,z
'''
pass
class Test2StorageObjFloat(StorageObj):
'''
@ClassField name str
@ClassField age float
'''
pass
class Test3StorageObj(StorageObj):
'''
@ClassField myso tests.withcassandra.storageobj_tests.Test2StorageObj
@ClassField myso2 tests.withcassandra.storageobj_tests.TestStorageObj
@ClassField myint int
@ClassField mystr str
'''
pass
class Test4StorageObj(StorageObj):
'''
@ClassField myotherso tests.withcassandra.storageobj_tests.Test2StorageObj
'''
pass
class Test4bStorageObj(StorageObj):
'''
@ClassField myotherso tests.withcassandra.test2storageobj.Test2StorageObj
'''
pass
class Test5StorageObj(StorageObj):
'''
@ClassField test2 dict<<position:int>, myso:tests.withcassandra.storageobj_tests.Test2StorageObj>
'''
pass
class Test6StorageObj(StorageObj):
'''
@ClassField test3 dict<<key0:int>, val0:str, val1:str>
'''
pass
class Test7StorageObj(StorageObj):
'''
@ClassField test2 dict<<key0:int>, val0:tests.withcassandra.storageobj_tests.Test2StorageObj>
'''
pass
class TestStorageObjNumpy(StorageObj):
'''
@ClassField mynumpy numpy.ndarray
'''
pass
class TestStorageObjNumpyDict(StorageObj):
'''
@ClassField mynumpydict dict<<key:int>, val:numpy.ndarray>
'''
pass
class TestStorageObjDict(StorageObj):
'''
@ClassField MyAttribute_1 int
@ClassField MyAttribute_2 dict <<int>, str>
@ClassField MyAttribute_3 dict <<int, str>, int>
'''
class TestAttributes(StorageObj):
'''
@ClassField key int
'''
value = None
def do_nothing_at_all(self):
pass
def setvalue(self, v):
self.value = v
def getvalue(self):
return self.value
class mixObj(StorageObj):
'''
@ClassField floatfield float
@ClassField intField int
@ClassField strField str
@ClassField intlistField list<int>
@ClassField floatlistField list<float>
@ClassField strlistField list<str>
@ClassField dictField dict<<key0:int>, val0:str>
@ClassField inttupleField tuple<int,int>
'''
class TestDate(StorageObj):
'''
@ClassField attr date
'''
class TestTime(StorageObj):
'''
@ClassField attr time
'''
class TestDateTime(StorageObj):
'''
@ClassField attr datetime
'''
class StorageObjTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.old = config.execution_name
config.execution_name = "StorageObjTest".lower()
@classmethod
def tearDownClass(cls):
config.session.execute("DROP KEYSPACE IF EXISTS {}".format(config.execution_name), timeout=60)
config.execution_name = cls.old
def setUp(self):
self.current_ksp = config.execution_name
pass
def tearDown(self):
pass
def test_build_remotely(self):
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".TestStorageObj")
time.sleep(1)
tablename="test_build_remotely"
obj = TestStorageObj(config.execution_name + "." + tablename)
r = {"built_remotely": False, "storage_id": uuid.uuid3(uuid.NAMESPACE_DNS, config.execution_name + '.' + tablename),
"ksp": config.execution_name,
"class_name": str(TestStorageObj.__module__) + "." + TestStorageObj.__name__, "name": tablename,
"columns": [('val1', 'str')], "entry_point": 'localhost', "primary_keys": [('pk1', 'int')],
"istorage_props": {},
"tokens": discrete_token_ranges([token.value for token in config.cluster.metadata.token_map.ring])}
nopars = build_remotely(r)
self.assertEqual('TestStorageObj'.lower(), nopars._table)
self.assertEqual(config.execution_name, nopars._ksp)
self.assertEqual(uuid.uuid3(uuid.NAMESPACE_DNS, config.execution_name + '.' + tablename), nopars.storage_id)
name, tkns = \
config.session.execute("SELECT name, tokens FROM hecuba.istorage WHERE storage_id = %s",
[nopars.storage_id])[
0]
self.assertEqual(name, config.execution_name + '.' + tablename)
self.assertEqual(tkns, r['tokens'])
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".TestStorageObj")
def test_init_create_pdict(self):
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".TestStorageObj")
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".Result")
time.sleep(1)
tablename = 'test_init_create_pdict'
r = {"built_remotely": False, "storage_id": uuid.uuid3(uuid.NAMESPACE_DNS, config.execution_name + '.' + tablename),
"ksp": config.execution_name,
"class_name": str(TestStorageObj.__module__) + "." + TestStorageObj.__name__, "name": tablename,
"columns": [('val1', 'str')], "entry_point": 'localhost', "primary_keys": [('pk1', 'int')],
"istorage_props": {},
"tokens": discrete_token_ranges([token.value for token in config.cluster.metadata.token_map.ring])}
nopars = build_remotely(r)
self.assertEqual(nopars._built_remotely, False)
self.assertEqual('TestStorageObj'.lower(), nopars._table)
self.assertEqual(config.execution_name, nopars._ksp)
self.assertEqual(uuid.uuid3(uuid.NAMESPACE_DNS, config.execution_name + '.' + tablename), nopars.storage_id)
name, tkns = \
config.session.execute("SELECT name,tokens FROM hecuba.istorage WHERE storage_id = %s",
[nopars.storage_id])[0]
self.assertEqual(name, config.execution_name + '.' + r['name'])
self.assertEqual(tkns, r['tokens'])
tkns = discrete_token_ranges(
[8508619251581300691, 8514581128764531689, 8577968535836399533, 8596162846302799189,
8603491526474728284, 8628291680139169981, 8687301163739303017, 9111581078517061776])
tablename2 = tablename+'2'
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + '.' + tablename2)
nopars = Result(name=tablename2,
tokens=tkns)
self.assertEqual('Result'.lower(), nopars._table)
self.assertEqual(config.execution_name, nopars._ksp)
self.assertEqual(uuid.uuid3(uuid.NAMESPACE_DNS, config.execution_name + '.' + tablename2), nopars.storage_id)
self.assertEqual(True, nopars._is_persistent)
self.assertTrue(hasattr(nopars, 'instances'))
name, read_tkns = config.session.execute("SELECT name,tokens FROM hecuba.istorage WHERE storage_id = %s",
[nopars.storage_id])[0]
self.assertEqual(name, config.execution_name + '.' + tablename2)
self.assertEqual(tkns, read_tkns)
def test_mixed_class(self):
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".mixObj")
time.sleep(1)
myObj = mixObj()
myObj.make_persistent("test_mixed_class")
myObj.floatfield = 5.0
myObj.intField = 5
myObj.strField = "6"
myObj.intlistField = [7, 8, 9]
myObj.floatlistField = [10.0, 11.0, 12.0]
myObj.strlistField = ["13.0", "14.0", "15.0"]
myObj.inttupleField = (1, 2)
floatfield, intField, strField, intlistField, floatlistField, strlistField, inttupleField = \
config.session.execute("SELECT floatField, "
"intField, "
"strField, "
"intlistField, "
"floatlistField, "
"strlistField, "
"inttupleField "
"FROM " + self.current_ksp + ".mixObj WHERE storage_id =" + str(myObj.storage_id))[0]
self.assertEquals(floatfield, myObj.floatfield)
self.assertEquals(intField, myObj.intField)
self.assertEquals(strField, myObj.strField)
self.assertEquals(intlistField, myObj.intlistField)
self.assertEquals(floatlistField, myObj.floatlistField)
self.assertEquals(strlistField, myObj.strlistField)
self.assertEquals(inttupleField, myObj.inttupleField)
def test_init_empty(self):
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".TestStorageObj")
time.sleep(1)
nopars = TestStorageObj('{}.test_init_empty'.format(self.current_ksp))
self.assertEqual('TestStorageObj'.lower(), nopars._table)
self.assertEqual(self.current_ksp, nopars._ksp)
res = config.session.execute(
'SELECT storage_id, class_name, name, tokens, istorage_props FROM hecuba.istorage WHERE storage_id = %s',
[nopars.storage_id])[0]
storage_id, storageobj_classname, name, tokens, istorage_props = res
self.assertEqual(storage_id, nopars.storage_id)
self.assertEqual(storageobj_classname, TestStorageObj.__module__ + "." + TestStorageObj.__name__)
self.assertEqual(name, '{}.test_init_empty'.format(self.current_ksp))
rebuild = build_remotely(res._asdict())
self.assertEqual(rebuild._built_remotely, True)
self.assertEqual('TestStorageObj'.lower(), rebuild._table)
self.assertEqual(self.current_ksp.lower(), rebuild._ksp)
self.assertEqual(storage_id, rebuild.storage_id)
self.assertEqual(nopars._is_persistent, rebuild._is_persistent)
# self.assertEqual(vars(nopars), vars(rebuild))
def test_make_persistent(self):
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".Words")
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".Test6StorageObj")
time.sleep(1)
nopars = Words()
self.assertFalse(nopars._is_persistent)
nopars.ciao = 1
nopars.ciao2 = "1"
nopars.ciao3 = [1, 2, 3]
nopars.ciao4 = (1, 2, 3)
for i in range(10):
nopars.words[i] = 'ciao' + str(i)
#count, = config.session.execute(
# "SELECT count(*) FROM system_schema.tables WHERE keyspace_name = '"+self.current_ksp+"' and table_name = 'words'")[0]
#self.assertEqual(0, count)
nopars.make_persistent(self.current_ksp+".test_make_persistentsso")
tablename = nopars.words._table
del nopars
count, = config.session.execute('SELECT count(*) FROM '+self.current_ksp +"."+ tablename)[0]
self.assertEqual(10, count)
nopars2 = Test6StorageObj(self.current_ksp+".test_make_persistentnonames")
nopars2.test3[0] = ['1', '2']
tablename = nopars2.test3._table
time.sleep(2)
result = config.session.execute("SELECT val0, val1 FROM "+self.current_ksp +"."+ tablename+" WHERE key0 = 0")
rval0 = None
rval1 = None
for row in result:
rval0 = row.val0
rval1 = row.val1
self.assertEqual('1', rval0)
self.assertEqual('2', rval1)
def test_empty_persistent(self):
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".Words")
time.sleep(1)
so = Words()
so.make_persistent("test_empty_persistent")
so.ciao = "an attribute"
so.another = 123
config.batch_size = 1
config.cache_activated = False
for i in range(10):
so.words[i] = str.join(',', map(lambda a: "ciao", range(i)))
table_name = so.words._name
del so
import gc
gc.collect()
# The attribute words has been persisted...
count, = config.session.execute("SELECT COUNT(*) FROM "+self.current_ksp+".Words")[0]
self.assertEqual(count, 1)
# The 10 dict items have been persisted...
count, = config.session.execute('SELECT count(*) FROM '+table_name)[0]
self.assertEqual(10, count)
#so = Words() FIXME: This is not possible anymore as the table_name for the attribute is randomly generated
#so.make_persistent(self.current_ksp+".test_empty_persistent")
#so.delete_persistent()
#count, = config.session.execute('SELECT count(*) FROM '+self.current_ksp+'.test_empty_persistent_words')[0]
#self.assertEqual(0, count)
def test_simple_stores_after_make_persistent(self):
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".Test2StorageObj")
time.sleep(1)
so = Test2StorageObj()
so.name = 'caio'
so.age = 1000
so.make_persistent("test_simple_stores_after_make_persistent")
count, = config.session.execute("SELECT COUNT(*) FROM "+self.current_ksp+".Test2StorageObj")[0]
self.assertEqual(count, 1)
self.assertEqual(so.name, 'caio')
self.assertEqual(so.age, 1000)
def test_simple_attributes(self):
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".Test2StorageObj")
time.sleep(1)
so = Test2StorageObj()
so.make_persistent("test_simple_attributes")
so.name = 'caio'
so.age = 1000
count, = config.session.execute("SELECT COUNT(*) FROM "+self.current_ksp+".Test2StorageObj")[0]
self.assertEqual(count, 1)
self.assertEqual(so.name, 'caio')
self.assertEqual(so.age, 1000)
def test_modify_simple_attributes(self):
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".Test2StorageObj")
time.sleep(1)
so = Test2StorageObj()
so.make_persistent("test_modify_simple_attributes")
so.name = 'caio'
so.age = 1000
count, = config.session.execute("SELECT COUNT(*) FROM "+self.current_ksp+".Test2StorageObj")[0]
self.assertEqual(count, 1)
self.assertEqual(so.name, 'caio')
self.assertEqual(so.age, 1000)
so.name = 'addio'
so.age = 2000
self.assertEqual(so.name, 'addio')
self.assertEqual(so.age, 2000)
def test_delattr_nonpersistent(self):
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".Test2StorageObj")
time.sleep(1)
so = Test2StorageObj()
so.name = 'caio'
del so.name
def del_attr():
my_val = so.name
self.assertRaises(AttributeError, del_attr)
def test_delattr_persistent(self):
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".Test2StorageObj")
time.sleep(1)
so = Test2StorageObj("test_delattr_persistent")
so.name = 'caio'
del so.name
def del_attr1():
my_val = so.name
self.assertRaises(AttributeError, del_attr1)
def del_attr2():
my_val = so.random_val
self.assertRaises(AttributeError, del_attr1)
def test_delattr_persistent_nested(self):
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".Test3StorageObj")
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".Test2StorageObj")
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".TestStorageObj")
time.sleep(1)
so = Test3StorageObj("test_delattr_persistent_nested")
nestedSo = Test2StorageObj()
nestedSo.name = 'caio'
so.myint = 123
so.myso = nestedSo
# Make sure the inner object has been made persistent
self.assertTrue(nestedSo._is_persistent)
# Delete the attribute
del so.myint
def del_attr1():
my_val = so.myint
# Accessing deleted attr of type StorageOb should raise AttrErr
self.assertRaises(AttributeError, del_attr1)
# We assign again, nestedSo still existed (no one called delete on it)
so.myso = nestedSo
# Delete a nested attribute of the shared StorageObj
del so.myso.name
# Make sure that the nested attribute deleted has been successfully deleted from both objects
def del_attr2():
my_val = nestedSo.name
def del_attr3():
my_val = so.myso.name
self.assertRaises(AttributeError, del_attr2)
self.assertRaises(AttributeError, del_attr3)
def test_modify_simple_before_mkp_attributes(self):
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".Test2StorageObj")
time.sleep(1)
so = Test2StorageObj()
so.name = 'caio'
so.age = 1000
so.make_persistent("test_modify_simple_before_mkp_attributes")
count, = config.session.execute("SELECT COUNT(*) FROM "+self.current_ksp+".Test2StorageObj")[0]
self.assertEqual(count, 1)
self.assertEqual(so.name, 'caio')
self.assertEqual(so.age, 1000)
so.name = 'addio'
so.age = 2000
self.assertEqual(so.name, 'addio')
self.assertEqual(so.age, 2000)
def test_paranoid_setattr_nonpersistent(self):
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".Test2StorageObj")
time.sleep(1)
so = Test2StorageObj("test_paranoid_setattr_nonpersistent")
so.name = 'my_name'
self.assertEquals(so.name, 'my_name')
def set_name_test():
so.name = 1
self.assertRaises(cassandra.InvalidRequest, set_name_test)
so.age = 1
self.assertEquals(so.age, 1)
def set_age_test():
so.age = 'my_name'
self.assertRaises(cassandra.InvalidRequest, set_age_test)
def test_paranoid_setattr_persistent(self):
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".Test2StorageObj")
time.sleep(1)
so = Test2StorageObj("t2")
so.name = 'my_name'
result = config.session.execute("SELECT name FROM "+self.current_ksp+".Test2StorageObj")
for row in result:
cass_name = row.name
self.assertEquals(cass_name, 'my_name')
def setNameTest():
so.name = 1
self.assertRaises(cassandra.InvalidRequest, setNameTest)
so.age = 1
result = config.session.execute("SELECT age FROM "+self.current_ksp+".Test2StorageObj")
for row in result:
cass_age = row.age
self.assertEquals(cass_age, 1)
def setAgeTest():
so.age = 'my_name'
self.assertRaises(cassandra.InvalidRequest, setAgeTest)
def test_paranoid_setattr_float(self):
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".Test2StorageObjFloat")
time.sleep(1)
so = Test2StorageObjFloat("test_paranoid_setattr_float")
so.age = 2.0
def test_nestedso_notpersistent(self):
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".Test3StorageObj")
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".TestStorageObj")
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".Test2StorageObj")
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".Test4StorageObj")
time.sleep(1)
my_nested_so = Test3StorageObj()
my_nested_so.myso.name = 'Link'
self.assertEquals('Link', my_nested_so.myso.name)
my_nested_so.myso.age = 10
self.assertEquals(10, my_nested_so.myso.age)
error = False
try:
config.session.execute('SELECT * FROM '+self.current_ksp+'.Test3StorageObj')
except cassandra.InvalidRequest:
error = True
self.assertEquals(True, error)
my_nested_so.myso2.test[0] = 'position0'
self.assertEquals('position0', my_nested_so.myso2.test[0])
my_nested_so2 = Test4StorageObj()
my_nested_so2.myotherso.name = 'Link'
self.assertEquals('Link', my_nested_so2.myotherso.name)
my_nested_so2.myotherso.age = 10
self.assertEquals(10, my_nested_so2.myotherso.age)
error = False
try:
config.session.execute('SELECT * FROM '+self.current_ksp+'.myso')
except cassandra.InvalidRequest:
error = True
self.assertEquals(True, error)
my_nested_so3 = Test4bStorageObj('mynested')
my_nested_subso = my_nested_so3.myotherso
my_other_nested = getByID(my_nested_subso.storage_id)
my_other_nested.name = 'bla'
my_other_nested.age = 5
error = False
try:
result = config.session.execute('SELECT * FROM '+self.current_ksp+'.Test2StorageObj')
except cassandra.InvalidRequest:
error = True
self.assertEquals(False, error)
for row in result:
query_res = row
self.assertEquals(5, query_res.age)
self.assertEquals('bla', query_res.name)
def test_nestedso_persistent(self):
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".Test3StorageObj")
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".Test2StorageObj")
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".TestStorageObj")
time.sleep(1)
my_nested_so = Test3StorageObj('test_nestedso_persistent')
self.assertEquals(True, my_nested_so._is_persistent)
self.assertEquals(True, my_nested_so.myso._is_persistent)
self.assertEquals(True, my_nested_so.myso2._is_persistent)
my_nested_so.myso.name = 'Link'
my_nested_so.myso.age = 10
error = False
try:
result = config.session.execute('SELECT * FROM '+self.current_ksp+'.test2storageobj')
except cassandra.InvalidRequest:
error = True
self.assertEquals(False, error)
for row in result:
query_res = row
self.assertEquals(10, query_res.age)
self.assertEquals('Link', query_res.name)
my_nested_so.myso2.name = 'position0'
self.assertEquals('position0', my_nested_so.myso2.name)
def test_nestedso_topersistent(self):
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".Test3StorageObj")
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".TestStorageObj")
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".Test2StorageObj")
time.sleep(1)
my_nested_so = Test3StorageObj()
my_nested_so.myso.name = 'Link'
self.assertEquals('Link', my_nested_so.myso.name)
my_nested_so.myso.age = 10
self.assertEquals(10, my_nested_so.myso.age)
error = False
try:
result = config.session.execute('SELECT * FROM '+self.current_ksp+'.test2storageobj')
except cassandra.InvalidRequest:
error = True
self.assertEquals(True, error)
my_nested_so.make_persistent('mynewso')
error = False
try:
result = config.session.execute('SELECT * FROM '+self.current_ksp+'.test2storageobj')
except cassandra.InvalidRequest:
error = True
self.assertEquals(False, error)
for row in result:
query_res = row
self.assertEquals(10, query_res.age)
self.assertEquals('Link', query_res.name)
def test_nestedso_sets_gets(self):
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".Test3StorageObj")
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".TestStorageObj")
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".Test2StorageObj")
time.sleep(1)
my_nested_so = Test3StorageObj()
my_nested_so.myso.name = 'Link'
self.assertEquals('Link', my_nested_so.myso.name)
my_nested_so.myso.age = 10
self.assertEquals(10, my_nested_so.myso.age)
my_nested_so.myso.weight = 70
self.assertEquals(70, my_nested_so.myso.weight)
#error = False
#try:
# result = config.session.execute('SELECT * FROM '+self.current_ksp+'.test_nestedso_sets_gets_myso')
#except cassandra.InvalidRequest:
# error = True
#self.assertEquals(True, error)
my_nested_so.make_persistent('test_nestedso_sets_gets_myso')
error = False
try:
result = config.session.execute('SELECT * FROM '+self.current_ksp+'.Test2StorageObj')
except cassandra.InvalidRequest:
error = True
self.assertEquals(False, error)
for row in result:
query_res = row
self.assertEquals(10, query_res.age)
self.assertEquals('Link', query_res.name)
error = False
try:
_ = query_res.weight
except:
error = True
self.assertEquals(True, error)
my_nested_so.myso.weight = 50
self.assertEquals(50, my_nested_so.myso.weight)
result = config.session.execute('SELECT * FROM '+self.current_ksp+'.Test2StorageObj')
for row in result:
query_res = row
error = False
try:
_ = query_res.weight
except:
error = True
self.assertEquals(True, error)
def test_nestedso_sets_gets_complex(self):
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".Test3StorageObj")
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".TestStorageObj")
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".Test2StorageObj")
time.sleep(1)
my_nested_so = Test3StorageObj()
error = False
try:
_ = config.session.execute('SELECT * FROM '+self.current_ksp+'.TestStorageObj')
except cassandra.InvalidRequest:
error = True
self.assertEquals(True, error)
my_nested_so.make_persistent('tnsgc')
# We create the nested persistent objects only after they are accessed by the first time
error = False
try:
_ = config.session.execute('SELECT * FROM '+self.current_ksp+'.TestStorageObj')
except cassandra.InvalidRequest:
error = True
self.assertEquals(True, error)
for i in range(0, 100):
my_nested_so.myso2.test[i] = 'position' + str(i)
time.sleep(5)
table_name = my_nested_so.myso2.test._table
count, = config.session.execute("SELECT COUNT(*) FROM "+self.current_ksp+"." + table_name)[0]
self.assertEquals(100, count)
def test_nestedso_deletepersistent(self):
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".Test3StorageObj")
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".TestStorageObj")
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".Test2StorageObj")
time.sleep(1)
my_nested_so = Test3StorageObj('tndp')
self.assertEquals(True, my_nested_so._is_persistent)
my_nested_so.myso.name = 'Link'
self.assertEquals('Link', my_nested_so.myso.name)
my_nested_so.myso.age = 10
self.assertEquals(10, my_nested_so.myso.age)
my_nested_so.delete_persistent()
self.assertEquals(False, my_nested_so._is_persistent)
entries = 0
try:
_ = config.session.execute('SELECT * FROM '+self.current_ksp+'.test2storageobj')
except cassandra.InvalidRequest:
entries += 1
self.assertEquals(0, entries)
def test_delete_persistent_obj(self):
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".Test2StorageObj")
time.sleep(1)
sobj = Test2StorageObj()
sobj.name = "nom1"
sobj.age = 42
sobj.make_persistent("testpobj1")
sobj2 = Test2StorageObj()
sobj2.name = "nom2"
sobj2.age = 666
sobj2.make_persistent("testpobj2")
sobj2.delete_persistent()
del sobj2
del sobj
sobj3 = Test2StorageObj("testpobj1")
self.assertEquals(sobj3.name, "nom1")
sobj4 = Test2StorageObj("testpobj2")
with self.assertRaises(AttributeError): # The object should be EMPTY
name = sobj4.name
def test_nestedso_dictofsos(self):
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".Test5StorageObj")
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".Test2StorageObj")
time.sleep(1)
my_nested_so = Test5StorageObj()
my_nested_so.test2[0] = Test2StorageObj()
my_nested_so.make_persistent('test_nestedso_dictofsos')
self.assertEquals(True, my_nested_so._is_persistent)
self.assertEquals(True, my_nested_so.test2._is_persistent)
self.assertEquals(True, my_nested_so.test2[0]._is_persistent)
my_nested_so.test2[0].name = 'Link'
self.assertEquals('Link', my_nested_so.test2[0].name)
my_nested_so.test2[0].age = 10
self.assertEquals(10, my_nested_so.test2[0].age)
def test_nestedso_dictofsos_noname(self):
#'''
#this test similar to test_nestedso_dictofsos with the difference that the StorageDict
#used as an attribute in Test7StorageObj has the form <int,StorageObj> where no name has been given for the
#StorageObj nor the Integer. In this case, a default name is used (key0,val0).
#'''
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".Test2StorageObj")
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".Test7StorageObj")
time.sleep(1)
my_nested_so = Test7StorageObj()
my_nested_so.test2[0] = Test2StorageObj()
my_nested_so.make_persistent('test_nestedso_dictofsos_noname')
self.assertEquals(True, my_nested_so._is_persistent)
self.assertEquals(True, my_nested_so.test2._is_persistent)
self.assertEquals(True, my_nested_so.test2[0]._is_persistent)
my_nested_so.test2[0].name = 'Link'
self.assertEquals('Link', my_nested_so.test2[0].name)
my_nested_so.test2[0].age = 10
self.assertEquals(10, my_nested_so.test2[0].age)
def test_nestedso_retrievedata(self):
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".Test5StorageObj")
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".Test2StorageObj")
time.sleep(1)
my_nested_so = Test5StorageObj('tnr')
my_nested_so.test2[0] = Test2StorageObj('tnr_something')
self.assertEquals(True, my_nested_so._is_persistent)
self.assertEquals(True, my_nested_so.test2._is_persistent)
self.assertEquals(True, my_nested_so.test2[0]._is_persistent)
my_nested_so.test2[0].name = 'Link'
self.assertEquals('Link', my_nested_so.test2[0].name)
my_nested_so.test2[0].age = 10
self.assertEquals(10, my_nested_so.test2[0].age)
del my_nested_so
my_nested_so2 = Test5StorageObj('tnr')
self.assertEquals('Link', my_nested_so2.test2[0].name)
self.assertEquals(10, my_nested_so2.test2[0].age)
def test_numpy_persistent(self):
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".TestStorageObjNumpy")
time.sleep(1)
my_so = TestStorageObjNumpy('tnp')
def test_numpy_set(self):
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".TestStorageObjNumpy")
time.sleep(1)
my_so = TestStorageObjNumpy()
my_so.mynumpy = np.random.rand(3, 2)
my_so.make_persistent('test_numpy_set')
def test_numpy_get(self):
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".TestStorageObjNumpy")
time.sleep(1)
my_so = TestStorageObjNumpy('test_numpy_get')
mynumpy = np.random.rand(3, 2)
my_so.mynumpy = mynumpy
time.sleep(2)
self.assertTrue(np.array_equal(mynumpy, my_so.mynumpy))
def test_numpy_topersistent(self):
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".TestStorageObjNumpy")
time.sleep(1)
my_so = TestStorageObjNumpy()
my_so.mynumpy = np.random.rand(3, 2)
my_so.make_persistent('test_numpy_topersistent')
def test_numpydict_persistent(self):
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".TestStorageObjNumpyDict")
time.sleep(1)
my_so = TestStorageObjNumpyDict('test_numpydict_persistent')
def test_numpydict_set(self):
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".TestStorageObjNumpyDict")
time.sleep(1)
my_so = TestStorageObjNumpyDict('test_numpydict_set')
my_so.mynumpydict[0] = np.random.rand(3, 2)
def test_numpydict_to_persistent(self):
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".TestStorageObjNumpyDict")
time.sleep(1)
my_so = TestStorageObjNumpyDict()
my_so.mynumpydict[0] = np.random.rand(3, 2)
my_so.make_persistent('test_numpydict_to_persistent')
def test_numpydict_get(self):
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".TestStorageObjNumpyDict")
time.sleep(1)
my_so = TestStorageObjNumpyDict()
mynumpydict = np.random.rand(3, 2)
my_so.mynumpydict[0] = mynumpydict
my_so.make_persistent('test_numpydict_get')
time.sleep(2)
self.assertTrue(np.allclose(mynumpydict, my_so.mynumpydict[0]))
def test_numpy_operations(self):
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".TestStorageObjNumpy")
time.sleep(1)
my_so = TestStorageObjNumpy()
base_numpy = np.arange(2048)
my_so.mynumpy = np.arange(2048)
my_so.make_persistent('test_numpy_operations')
time.sleep(2)
self.assertTrue(np.array_equal(base_numpy, my_so.mynumpy))
base_numpy += 1
my_so.mynumpy += 1
self.assertTrue(np.array_equal(base_numpy, my_so.mynumpy))
self.assertEqual(np.average(base_numpy), np.average(my_so.mynumpy))
self.assertEqual(np.mean(base_numpy), np.mean(my_so.mynumpy))
def test_numpy_ops_persistent(self):
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".TestStorageObjNumpy")
time.sleep(1)
my_so = TestStorageObjNumpy()
base_numpy = np.arange(2048)
my_so.mynumpy = np.arange(2048)
my_so.make_persistent(self.current_ksp+'.test_numpy_ops_persistent')
self.assertTrue(np.array_equal(base_numpy, my_so.mynumpy))
base_numpy += 1
my_so.mynumpy += 1
self.assertTrue(np.array_equal(base_numpy, my_so.mynumpy))
my_so.sync()
reloaded_so = TestStorageObjNumpy(self.current_ksp+'.test_numpy_ops_persistent')
self.assertTrue(np.allclose(reloaded_so.mynumpy, base_numpy))
self.assertEqual(np.average(base_numpy), np.average(reloaded_so.mynumpy))
self.assertEqual(np.mean(base_numpy), np.mean(reloaded_so.mynumpy))
def test_numpy_reloading(self):
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".TestStorageObjNumpy")
time.sleep(1)
sizea, sizeb = (1000, 1000)
no = TestStorageObjNumpy(self.current_ksp +".test_numpy_reloading_%d_%d" % (sizea, sizeb))
a = np.ones((sizea, sizeb))
no.mynumpy = a
del no
import gc
gc.collect()
no = TestStorageObjNumpy(self.current_ksp +".test_numpy_reloading_%d_%d" % (sizea, sizeb))
a = no.mynumpy
self.assertEqual(np.shape(a), (sizea, sizeb))
self.assertEqual(np.sum(a), sizea * sizeb)
def test_numpy_reloading_internals(self):
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".TestStorageObjNumpy")
time.sleep(1)
sizea, sizeb = (1000, 1000)
no = TestStorageObjNumpy(self.current_ksp +".test_numpy_reloading_internals_%d_%d" % (sizea, sizeb))
a = np.ones((sizea, sizeb))
no.mynumpy = a
initial_name_so = no._ksp + '.' + no._table
initial_name_np = no.mynumpy._ksp + '.' + no.mynumpy._table
del no
import gc
gc.collect()
no = TestStorageObjNumpy(self.current_ksp +".test_numpy_reloading_internals_%d_%d" % (sizea, sizeb))
a = no.mynumpy
final_name_so = no._ksp + '.' + no._table
final_name_np = no.mynumpy._ksp + '.' + no.mynumpy._table
self.assertEqual(initial_name_so, final_name_so)
self.assertEqual(initial_name_np, final_name_np)
def test_storagedict_assign(self):
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".TestStorageObj")
time.sleep(1)
so = TestStorageObj("t2_1")
table_name = so.test._table
so.test = {}
self.assertNotEquals(table_name, so.test._table)
so.test = {1: 'a', 2: 'b'}
self.assertNotEquals(table_name, so.test._table)
so.test = {3: 'c', 4: 'd'}
self.assertNotEquals(table_name, so.test._table)
def test_storageobj_coherence_basic(self):
#'''
#test that two StorageObjs pointing to the same table work correctly.
#Changing data on one StorageObj is reflected on the other StorageObj.
#'''
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".Test2StorageObj")
time.sleep(1)
so = Test2StorageObj('test_storageobj_coherence_basic')
so.name = 'Oliver'
so.age = 21
so2 = Test2StorageObj('test_storageobj_coherence_basic')
self.assertEqual(so.name, so2.name)
self.assertEqual(so.age, so2.age)
so.name = 'Benji'
so2 = Test2StorageObj('test_storageobj_coherence_basic')
self.assertEqual(so.name, so2.name)
self.assertEqual(so.age, so2.age)
def test_storageobj_coherence_complex1(self):
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".Test2StorageObj")
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".TestStorageObj")
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".Test3StorageObj")
time.sleep(1)
so = Test3StorageObj('test_storageobj_coherence_complex1')
myso_attr = Test2StorageObj()
myso_attr.name = 'Oliver'
myso_attr.age = 21
so.myso = myso_attr # creates my_app.test_myso_0, the original attribute pointed to test_myso
self.assertEqual(myso_attr.name, so.myso.name)
del myso_attr
self.assertEqual(so.myso.age, 21)
def test_storageobj_coherence_complex2(self):
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".Test2StorageObj")
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".TestStorageObj")
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".Test3StorageObj")
time.sleep(1)
so = Test3StorageObj('test_storageobj_coherence_complex2')
myso_attr = Test2StorageObj()
myso_attr.name = 'Oliver'
myso_attr.age = 21
so.myso = myso_attr # creates my_app.test_myso_0, the original attribute pointed to test_myso
# now my_attr is persistent too, because it has been asigned to a persistent object
# Python behaviour, now the attribute points to the object, no copy made
self.assertTrue(so.myso is myso_attr)
# any change on the nested attribute should change the original and backwards
attr_value = 123
myso_attr.some_attribute = attr_value
myso_attr.name = 'Benji'
self.assertTrue(hasattr(so.myso, 'some_attribute'))
self.assertEqual(so.myso.some_attribute, attr_value)
self.assertEqual(so.myso.name, 'Benji')
# now we unreference the top persistent object called so which was made persistent as 'test'
del so
# The object pointed by 'so.myso' should still exist because we still have one reference called 'myso_attr'
self.assertTrue(myso_attr is not None)
self.assertTrue(isinstance(myso_attr, Test2StorageObj))
self.assertEqual(myso_attr.name, 'Benji')
def test_get_attr_1(self):
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".TestAttributes")
time.sleep(1)
storage_obj = TestAttributes()
storage_obj.do_nothing_at_all()
value = 123
storage_obj.setvalue(value)
returned = storage_obj.getvalue()
self.assertEqual(value, returned)
def test_get_attr_2(self):
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".TestAttributes")
time.sleep(1)
storage_obj = TestAttributes()
storage_obj.do_nothing_at_all()
value = 123
storage_obj.setvalue(value)
storage_obj.make_persistent("test_get_attr_2")
# check that the in memory attribute is kept
returned = storage_obj.getvalue()
self.assertEqual(value, returned)
# check that the method added by inheritance is correctly called
storage_obj.do_nothing_at_all()
def method_nonexistent():
storage_obj.i_dont_exist()
# check that an attribute method which doesn't exist is detected
self.assertRaises(AttributeError, method_nonexistent)
# check for private methods too (starting with underscore)
def method_nonexistent_pvt():
storage_obj._pvt_dont_exist()
self.assertRaises(AttributeError, method_nonexistent_pvt)
def test_get_attr_3(self):
# the same as test_get_attr_2 but the object is persistent since the beginning
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".TestAttributes")
time.sleep(1)
storage_obj = TestAttributes("test_get_attr_3")
storage_obj.do_nothing_at_all()
value = 123
storage_obj.setvalue(value)
# check that the in memory attribute is kept
returned = storage_obj.getvalue()
self.assertEqual(value, returned)
# check that the method added by inheritance is correctly called
storage_obj.do_nothing_at_all()
def method_nonexistent():
storage_obj.i_dont_exist()
# check that an attribute method which doesn't exist is detected
self.assertRaises(AttributeError, method_nonexistent)
# check for private methods too (starting with underscore)
def method_nonexistent_pvt():
storage_obj._pvt_dont_exist()
self.assertRaises(AttributeError, method_nonexistent_pvt)
storage_obj.key = 123
returned = storage_obj.key
self.assertEqual(storage_obj.key, returned)
def test_recreation_init(self):
#"""
#New StorageObj
#Persistent attributes
#Made persistent on the constructor.
#"""
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".Test2StorageObj")
time.sleep(1)
sobj_name = self.current_ksp+".test_recreation_init"
attr1 = 'Test1'
attr2 = 23
storage_obj = Test2StorageObj(sobj_name)
storage_obj.name = attr1
storage_obj.age = attr2
uuid_sobj = storage_obj.storage_id
storage_obj = None
result_set = iter(config.session.execute("SELECT * FROM hecuba.istorage WHERE storage_id={}".format(uuid_sobj)))
try:
result = result_set.next()
except StopIteration as ex:
self.fail("StorageObj istorage data was not saved")
self.assertEqual(result.name, sobj_name)
storage_obj = Test2StorageObj(sobj_name)
self.assertEqual(storage_obj.name, attr1)
self.assertEqual(storage_obj.age, attr2)
def test_recreation_init2(self):
#"""
#New StorageObj
#Has persistent and volatile attributes
#Made persistent on the constructor.
#"""
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".Test2StorageObj")
time.sleep(1)
sobj_name = self.current_ksp +".test_recreation_init2"
attr1 = 'Test1'
attr2 = 23
storage_obj = Test2StorageObj(sobj_name)
storage_obj.name = attr1
storage_obj.nonpersistent = attr2
uuid_sobj = storage_obj.storage_id
storage_obj = None
result_set = iter(config.session.execute("SELECT * FROM hecuba.istorage WHERE storage_id={}".format(uuid_sobj)))
try:
result = result_set.next()
except StopIteration as ex:
self.fail("StorageObj istorage data was not saved")
self.assertEqual(result.name, sobj_name)
storage_obj = Test2StorageObj(sobj_name)
self.assertEqual(storage_obj.name, attr1)
with self.assertRaises(AttributeError):
attr = storage_obj.age
with self.assertRaises(AttributeError):
attr = storage_obj.nonpersistent
def test_recreation_make_pers(self):
#"""
#New StorageObj
#Persistent attributes
#Made persistent with make_persistent.
#"""
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".Test2StorageObj")
time.sleep(1)
sobj_name = self.current_ksp +".test_recreation_make_pers"
attr1 = 'Test1'
attr2 = 23
storage_obj = Test2StorageObj()
storage_obj.make_persistent(sobj_name)
uuid_sobj = storage_obj.storage_id
storage_obj = None
result_set = iter(config.session.execute("SELECT * FROM hecuba.istorage WHERE storage_id={}".format(uuid_sobj)))
try:
result = result_set.next()
except StopIteration as ex:
self.fail("StorageObj istorage data was not saved")
self.assertEqual(result.name, sobj_name)
storage_obj = Test2StorageObj()
storage_obj.name = attr1
storage_obj.volatile = attr2
storage_obj.make_persistent(sobj_name)
self.assertEqual(storage_obj.name, attr1)
self.assertEqual(storage_obj.volatile, attr2)
with self.assertRaises(AttributeError):
attr = storage_obj.age
def test_recreation_make_pers2(self):
#"""
#New StorageObj
#Persistent attributes
#Made persistent with make_persistent.
#"""
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".Test2StorageObj")
time.sleep(1)
sobj_name = self.current_ksp+".test_recreation_make_pers2"
attr1 = 'Test1'
attr2 = 23
storage_obj = Test2StorageObj()
storage_obj.name = attr1
storage_obj.volatile = 'Ofcourse'
storage_obj.make_persistent(sobj_name)
uuid_sobj = storage_obj.storage_id
storage_obj = None
result_set = iter(config.session.execute("SELECT * FROM hecuba.istorage WHERE storage_id={}".format(uuid_sobj)))
try:
result = result_set.next()
except StopIteration as ex:
self.fail("StorageObj istorage data was not saved")
self.assertEqual(result.name, sobj_name)
storage_obj = Test2StorageObj()
storage_obj.age = attr2
storage_obj.make_persistent(sobj_name)
self.assertEqual(storage_obj.name, attr1)
self.assertEqual(storage_obj.age, attr2)
with self.assertRaises(AttributeError):
attr = storage_obj.volatile
def test_nested_recreation(self):
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".Test2StorageObj")
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".Test4StorageObj")
time.sleep(1)
sobj_name = self.current_ksp +".test_nested_recreation"
storage_obj = Test2StorageObj()
name_attr = 'Test1'
age_attr = 23
storage_obj.name = name_attr
storage_obj.age = age_attr
external_sobj = Test4StorageObj(sobj_name)
external_sobj.myotherso = storage_obj
uuid_sobj_internal = storage_obj.storage_id
uuid_sobj_external = external_sobj.storage_id
internal_name = external_sobj.myotherso._get_name()
storage_obj = None
external_sobj = None
# Check that they have been correctly stored into hecuba.istorage
result_set = iter(
config.session.execute("SELECT * FROM hecuba.istorage WHERE storage_id={}".format(uuid_sobj_external)))
try:
result = result_set.next()
except StopIteration as exc:
self.fail("StorageObj istorage data was not saved")
self.assertEqual(result.name, sobj_name)
result_set = iter(
config.session.execute("SELECT * FROM hecuba.istorage WHERE storage_id={}".format(uuid_sobj_internal)))
try:
result = result_set.next()
except StopIteration as exc:
self.fail("StorageObj istorage data was not saved")
self.assertEqual(result.name, internal_name)
# They are both present in hecuba.istorage
result_set = iter(
config.session.execute(
"SELECT * FROM {} WHERE storage_id={}".format(self.current_ksp+".Test4StorageObj", uuid_sobj_external)))
try:
result = result_set.next()
except StopIteration as exc:
self.fail("StorageObj istorage data was not saved")
self.assertEqual(result.myotherso, uuid_sobj_internal)
# They have been saved with the expected istorage ids
external_sobj = Test4StorageObj(sobj_name)
# Check internal configuration is correct
self.assertEqual(external_sobj.storage_id, uuid_sobj_external)
self.assertEqual(external_sobj.myotherso.storage_id, uuid_sobj_internal)
self.assertEqual(external_sobj._get_name(), sobj_name)
self.assertEqual(external_sobj.myotherso._get_name(), internal_name)
# Check data is correct
self.assertEqual(external_sobj.myotherso.name, name_attr)
self.assertEqual(external_sobj.myotherso.age, age_attr)
def test_single_table(self):
config.session.execute("DROP TABLE IF EXISTS " + self.current_ksp+".Test2StorageObj")
time.sleep(1)
my_obj1 = Test2StorageObj(self.current_ksp+".test_single_tablemy_obj1")
my_obj2 = Test2StorageObj(self.current_ksp+".test_single_tablemy_obj2")
my_obj1.name, my_obj2.name = "Adrian", "Adri"
my_obj1.age, my_obj2.age = 21, 23
self.assertEqual(my_obj1._ksp, my_obj2._ksp)
self.assertEqual(my_obj1._table, my_obj2._table)
res = config.session.execute("SELECT * FROM "+self.current_ksp+".Test2StorageObj WHERE storage_id = %s" % my_obj1.storage_id)
res2 = config.session.execute(
"SELECT * FROM "+self.current_ksp+".Test2StorageObj WHERE storage_id = %s" % my_obj2.storage_id)
self.assertEqual(res.one().name, "Adrian")
self.assertEqual(res2.one().name, "Adri")
self.assertEqual(res.one().age, 21)
self.assertEqual(res2.one().age, 23)
def test_dict_single_table(self):
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".Test5StorageObj")
time.sleep(1)
my_dict = Test5StorageObj(self.current_ksp+".my_dict4")
for i in range(0, 20):
aux = Test2StorageObj(self.current_ksp+".test_dict_single_table" + str(i))
aux.name, aux.age = "RandomName" + str(i), 18 + i
my_dict.test2[i] = aux
for i in range(0, 20):
self.assertEqual(my_dict.test2[i]._ksp, self.current_ksp)
self.assertEqual(my_dict.test2[i]._table, "Test2StorageObj".lower())
res = config.session.execute(
"SELECT * FROM "+self.current_ksp+".Test2StorageObj WHERE storage_id = %s" % my_dict.test2[i].storage_id)
self.assertEqual(res.one().name, "RandomName" + str(i))
self.assertEqual(res.one().age, 18 + i)
def test_time(self):
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".TestTime")
time.sleep(1)
d = TestTime(self.current_ksp+".test_time")
mytime =datetime.time(hour=11, minute=43, second=2, microsecond=90)
d.attr = mytime
del d
mynew_d = TestTime(self.current_ksp+".test_time")
self.assertEqual(mynew_d.attr, mytime)
def test_date(self):
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".TestDate")
time.sleep(1)
d = TestDate(self.current_ksp +".test_date")
mydate = datetime.date(year=1992, month=7, day=25)
d.attr = mydate
del d
mynew_d = TestDate(self.current_ksp+".test_date")
self.assertEqual(mynew_d.attr, mydate)
def test_datetime(self):
config.session.execute("DROP TABLE IF EXISTS " + config.execution_name + ".TestDateTime")
time.sleep(1)
d = TestDateTime(self.current_ksp+".test_datetime")
dtime = datetime.datetime(year=1940, month=10, day=16,
hour=23, minute=59, second=59)
d.attr = dtime
del d
mynew_d = TestDateTime(self.current_ksp+".test_datetime")
self.assertEqual(mynew_d.attr, dtime)
def test_storageobjdict_unnamed(self):
d = TestStorageObjDict("test_sobjdict_unnamed")
d.MyAttribute_2[1]="hola"
d.MyAttribute_3[[42,"hola"]]=666
d.sync()
d = TestStorageObjDict("test_sobjdict_unnamed")
self.assertEqual(d.MyAttribute_2[1], "hola")
self.assertEqual(d.MyAttribute_3[[42,"hola"]], 666)
if __name__ == '__main__':
unittest.main()
|
|
""" Function to convert raw modbus value """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import json
from . import time_delta_json
def unit(raw_table, base_index):
""" Direct word value """
raw_value = raw_table[base_index]
sign = 1
if raw_value & 0x8000:
sign = -1
return sign * (raw_value & 0x7FFF)
def tenth(raw_table, base_index):
""" Word value divide by ten """
raw_value = raw_table[base_index]
if raw_value == 0xFFFF:
return None
sign = 1
if raw_value & 0x8000:
sign = -1
return sign * (raw_value & 0x7FFF) / 10
def unit_and_ten(raw_table, base_index):
""" Two word values, 0000x and xxxx0 """
return raw_table[base_index] + 10 * raw_table[base_index + 1]
def anticipation(raw_table, base_index):
""" 101 for None or value divide by ten """
raw_value = raw_table[base_index]
if raw_value == 101:
return None
return tenth(raw_table, base_index)
def footprint(raw_table, base_index):
""" 150 for None or value divide by ten """
raw_value = raw_table[base_index]
if raw_value == 150:
return None
return tenth(raw_table, base_index)
def power(raw_table, base_index):
""" Value of MWh, KWh, Wh or None if 65535 """
if (raw_table[base_index] == 0xFFFF
or raw_table[base_index + 1] == 0xFFFF
or raw_table[base_index + 2] == 0xFFFF):
return None
return (raw_table[base_index] * 1000 + raw_table[base_index + 1]) * 1000 + raw_table[base_index + 2]
BIT_ANTIFREEZE = 1
BIT_NIGHT = 2
BIT_DAY = 4
BIT_AUTO = 8
BIT_DHW = 16
BIT_END_OF_PROGRAM = 32
BIT_DHW_END_OF_PROGRAM = 64
BIT_ALL_ZONE = 128
def derog_bit_french(raw_table, base_index):
""" Convert derog bit flag to french """
value = raw_table[base_index]
stringvalue = ""
if value & BIT_ANTIFREEZE:
stringvalue += "Antigel "
if value & BIT_NIGHT:
stringvalue += "Nuit "
if value & BIT_DAY:
stringvalue += "Jour "
if value & BIT_AUTO:
stringvalue += "Automatique "
if value & BIT_DHW:
stringvalue += "Eau "
if value & BIT_END_OF_PROGRAM:
stringvalue += "jusqu'a la fin du programme "
if value & BIT_DHW_END_OF_PROGRAM:
stringvalue += "jusqu'a la fin du programme (eau) "
if value & BIT_ALL_ZONE:
stringvalue += "toutes les zones"
return stringvalue
def derog_bit_english(raw_table, base_index):
""" Convert derog bit flag to English """
value = raw_table[base_index]
stringvalue = ""
if value & BIT_ANTIFREEZE:
stringvalue += "Antifreeze/vacation "
if value & BIT_NIGHT:
stringvalue += "Night "
if value & BIT_DAY:
stringvalue += "Day "
if value & BIT_AUTO:
stringvalue += "Automatic "
if value & BIT_DHW:
stringvalue += "Water "
if value & BIT_END_OF_PROGRAM:
stringvalue += "until the end of program "
if value & BIT_DHW_END_OF_PROGRAM:
stringvalue += "until the end of program (warm water) "
if value & BIT_ALL_ZONE:
stringvalue += "all zones"
return stringvalue
def derog_bit_simple_french(raw_table, base_index):
""" Convert derog bit flag to french do not handle all case """
value = raw_table[base_index]
stringvalue = ""
if value & BIT_ANTIFREEZE:
stringvalue = "Vacances"
if value & BIT_NIGHT:
stringvalue = "Nuit"
if value & BIT_DAY:
stringvalue = "Jour"
if value & BIT_AUTO:
stringvalue = "Automatique"
return stringvalue
def derog_bit_simple_english(raw_table, base_index):
""" Convert derog bit flag to English do not handle all case """
value = raw_table[base_index]
stringvalue = ""
if value & BIT_ANTIFREEZE:
stringvalue = "Vacation"
if value & BIT_NIGHT:
stringvalue = "Night"
if value & BIT_DAY:
stringvalue = "Day"
if value & BIT_AUTO:
stringvalue = "Automatic"
return stringvalue
def boiler_state_bit_english(raw_table, base_index):
""" Convert derog bit flag to English """
value = raw_table[base_index]
stringvalue = ""
if value & (1 << 1):
stringvalue += "[Direct Circuit OFF] "
if value & (1 << 2):
stringvalue += "[3WV Circuit OFF] "
if value & (1 << 3):
stringvalue += "[Secondary pump] "
if value & (1 << 15):
stringvalue += "[Cascade faliure] "
return stringvalue
def hp_state_english(raw_table, base_index):
""" Convert HP state to English """
value = raw_table[base_index]
if value == 0:
return "Stop"
if value == 1:
return "Heating mode"
if value == 2:
return "Heating mode+comp"
if value == 4:
return "Cooling mode"
if value == 5:
return "Cooling mode+comp on"
return "Unknown"
def hp_state_bit_english(raw_table, base_index):
""" Convert derog bit flag to English """
value = raw_table[base_index]
stringvalue = ""
if value & (1 << 1):
stringvalue += "[Defrosting] "
if value & (1 << 1):
stringvalue += "[Boiler Pump Backup] "
if value & (1 << 1):
stringvalue += "[Boiler Backup] "
if value & (1 << 1):
stringvalue += "[HP Pump] "
if value & (1 << 1):
stringvalue += "[Backup 2] "
if value & (1 << 1):
stringvalue += "[Backup 1] "
if value & (1 << 1):
stringvalue += "[Compressor] "
return stringvalue
def system_input_state_english(raw_table, base_index):
""" Convert system input state to English """
value = raw_table[base_index]
if value == 0:
return "Disable"
if value == 1:
return "System"
if value == 2:
return "Storage tank"
if value == 3:
return "DHW STRAT"
if value == 4:
return "Storage tank+ DHW"
return "Unknown"
def zone_aux_type_english(raw_table, base_index):
""" Convert zone aux type to English """
value = raw_table[base_index]
if value == 0:
return "NA"
if value == 1:
return "NA"
if value == 2:
return "NA"
if value == 3:
return "DHW loop"
if value == 4:
return "NA"
if value == 5:
return "Program"
if value == 8:
return "primary pump"
if value == 9:
return "burner command"
if value == 11:
return "DHW"
if value == 13:
return "failure"
if value == 15:
return "Electrical DHW"
if value == 17:
return "VM pump"
if value == 18:
return "cascade failure"
return "Unknown"
def active_mode_french(raw_table, base_index):
""" Convert mode to french """
value = raw_table[base_index]
if value == 0:
return "Antigel"
if value == 2:
return "Nuit"
if value == 4:
return "Jour"
return "Inconnu"
def active_mode_english(raw_table, base_index):
""" Convert mode to English """
value = raw_table[base_index]
if value == 0:
return "Vacation"
if value == 2:
return "Night"
if value == 4:
return "Day"
return "Unknown"
def boiler_mode_french(raw_table, base_index):
""" Convert boiler mode to french """
value = raw_table[base_index]
if value == 4:
return "Ete"
if value == 5:
return "Hiver"
return "Inconnu"
def boiler_mode_english(raw_table, base_index):
""" Convert boiler mode to french """
value = raw_table[base_index]
if value == 4:
return "Summer"
if value == 5:
return "Winter"
return "Unknown"
def day_schedule(raw_table, base_index):
""" Convert schedule of present/away """
current_mode = 0
start_time = datetime.timedelta()
current_time = datetime.timedelta()
schedule = []
interval_for_bit = datetime.timedelta(minutes=30)
for word in raw_table[base_index:base_index + 3]:
for _ in range(16):
mode = word & 0x8000
word <<= 1
# end of period
if mode == 0 and current_mode != 0:
schedule.append((start_time, current_time))
current_mode = mode
current_time += interval_for_bit
# before period
if mode == 0:
start_time = current_time
current_mode = mode
if current_mode != 0:
schedule.append((start_time, current_time))
return schedule
def zone_a_type_english(raw_table, base_index):
""" Convert zone b type to English """
value = raw_table[base_index]
if value == 0:
return "Disable"
if value == 1:
return "Direct"
if value == 2:
return "NA"
if value == 3:
return "NA"
if value == 4:
return "NA"
if value == 5:
return "Program"
if value == 6:
return "NA"
if value == 7:
return "H.temp"
if value == 8:
return "NA"
if value == 9:
return "NA"
if value == 10:
return "NA"
if value == 11:
return "DHW"
if value == 12:
return "NA"
if value == 13:
return "NA"
if value == 14:
return "NA"
if value == 15:
return "Electrical DHW"
return "Unknown"
def zone_bc_type_english(raw_table, base_index):
""" Convert zone b/c type to English """
value = raw_table[base_index]
if value == 0:
return "NA"
if value == 1:
return "Direct"
if value == 2:
return "3WV"
if value == 3:
return "NA"
if value == 4:
return "swiming pool"
return "Unknown"
def error_code(raw_table, base_index):
""" Convert error codes """
value = raw_table[base_index]
if value == 0x0000:
return "D3:OUTL S.B FAIL."
if value == 0x0001:
return "D4:OUTL S.C FAIL."
if value == 0x0002:
return "D5:OUTSI.S.FAIL."
if value == 0x0003:
return "D7:SYST.SENS.FAIL."
if value == 0x0004:
return "D9:DHW S.FAILURE"
if value == 0x0005:
return "D11:ROOM S.A FAIL."
if value == 0x0006:
return "D12:ROOM S.B FAIL."
if value == 0x0007:
return "D13:ROOM S.C FAIL."
if value == 0x0008:
return "D14:MC COM.FAIL"
if value == 0x0009:
return "D15:ST.TANK S.FAIL"
if value == 0x000A:
return "D16:SWIM.P.B.S.FA"
if value == 0x000B:
return "D16:SWIM.P.C.S.FA"
if value == 0x000C:
return "D17:DHW 2 S.FAIL"
if value == 0x000D:
return "D27:PCU COM.FAIL"
if value == 0x000E:
return "Not Available"
if value == 0x000F:
return "Not Available"
if value == 0x0010:
return "Not Available"
if value == 0x0011:
return "Not Available"
if value == 0x0012:
return "D32:5 RESET:ON/OFF"
if value == 0x0013:
return "D37:TA-S SHORT-CIR"
if value == 0x0014:
return "D38:TA-S DISCONNEC"
if value == 0x0015:
return "D39:TA-S FAILURE"
if value == 0x0016:
return "D50:OTH COM.FAIL"
if value == 0x0017:
return "D51:DEF :SEE BOILER"
if value == 0x0018:
return "D18:SOL.HW S.FAIL"
if value == 0x0019:
return "D19:SOL.COL.S.FAIL"
if value == 0x001A:
return "D20:SOL COM.FAIL"
if value == 0x001B:
return "D99:DEF.BAD PCU"
if value == 0x001C:
return "D40:FAIL UNKNOWN"
if value == 0x001D:
return "D254:FAIL UNKNOWN"
if value == 0x800:
return "B0:PSU FAIL"
if value == 0x801:
return "B1:PSU PARAM FAIL"
if value == 0x802:
return "B2:EXCHAN.S.FAIL"
if value == 0x803:
return "B3:EXCHAN.S.FAIL"
if value == 0x804:
return "B4:EXCHAN.S.FAIL"
if value == 0x805:
return "B5:STB EXCHANGE"
if value == 0x806:
return "B6:BACK S.FAILURE"
if value == 0x807:
return "B7:BACK S.FAILURE"
if value == 0x808:
return "B8:BACK S.FAILURE"
if value == 0x809:
return "B9:STB BACK"
if value == 0x80A:
return "B10:DT.EXCH.BAC.FAIL"
if value == 0x80B:
return "B11:DT.BAC.EXCH.FAIL"
if value == 0x80C:
return "B12:STB OPEN"
if value == 0x80D:
return "B14:BURNER FAILURE"
if value == 0x80E:
return "B15:CCE.TST.FAIL"
if value == 0x80F:
return "B16:PARASIT FLAME"
if value == 0x810:
return "B17:VALVE FAIL"
if value == 0x811:
return "B32:DEF.OUTLET S."
if value == 0x812:
return "B33:DEF.OUTLET S."
if value == 0x813:
return "B34:FAN FAILURE"
if value == 0x814:
return "B35:BACK>BOIL FAIL"
if value == 0x815:
return "B36:I-CURRENT FAIL"
if value == 0x816:
return "B37:SU COM.FAIL"
if value == 0x817:
return "B38:PCU COM.FAIL"
if value == 0x818:
return "B39:BL OPEN FAIL"
if value == 0x819:
return "B255:FAIL UNKNOWN"
if value == 0x81A:
return "B254:FAIL UNKNOWN"
if value == 0x1000:
return "DEF.PSU 00"
if value == 0x1001:
return "DEF.PSU PARAM 01"
if value == 0x1002:
return "DEF.S.DEPART 02"
if value == 0x1003:
return "DEF.S.DEPART 03"
if value == 0x1004:
return "DEF.S.DEPART 04"
if value == 0x1005:
return "STB DEPART 05"
if value == 0x1006:
return "DEF.S.RETOUR 06"
if value == 0x1007:
return "DEF.S.RETOUR 07"
if value == 0x1008:
return "DEF.S.RETOUR 08"
if value == 0x1009:
return "STB RETOUR 09"
if value == 0x100A:
return "DT.DEP-RET<MIN 10"
if value == 0x100B:
return "DT.DEP-RET>MAX 11"
if value == 0x100C:
return "STB OUVERT 12"
if value == 0x100D:
return "DEF.ALLUMAGE 14"
if value == 0x100E:
return "FLAM.PARASI. 16"
if value == 0x100F:
return "DEF.VANNE GAZ 17"
if value == 0x1010:
return "DEF.VENTILO 34"
if value == 0x1011:
return "DEF.RET>CHAUD 35"
if value == 0x1012:
return "DEF.IONISATION 36"
if value == 0x1013:
return "DEF.COM.SU 37"
if value == 0x1014:
return "DEF.COM PCU 38"
if value == 0x1015:
return "DEF BL OUVERT 39"
if value == 0x1016:
return "DEF.TEST.HRU 40"
if value == 0x1017:
return "DEF.MANQUE EAU 250"
if value == 0x1018:
return "DEF.MANOMETRE 251"
if value == 0x1019:
return "DEF.INCONNU 255"
if value == 0x101A:
return "DEF.INCONNU 254"
if value == 0x1800:
return "L0:PSU FAIL"
if value == 0x1801:
return "L1:PSU PARAM FAIL"
if value == 0x1802:
return "L2:STB OUTLET"
if value == 0x1803:
return "L3:DEF.OIL.SENSOR"
if value == 0x1804:
return "L4:BURNER FAILURE"
if value == 0x1805:
return "L5:DEF.INTERNAL"
if value == 0x1806:
return "L6:DEF.SPEED.MOT"
if value == 0x1807:
return "L7:DEF.T.WARM UP"
if value == 0x1808:
return "L8:DEF.PAR.FLAME"
if value == 0x1809:
return "L9:OIL.PRES FAIL."
if value == 0x180A:
return "L30:SMOKE PRE.FAIL"
if value == 0x180B:
return "L31:DEF.SMOKE.TEMP"
if value == 0x180C:
return "L32:DEF.OUTLET S."
if value == 0x180D:
return "L33:DEF.OUTLET S."
if value == 0x180E:
return "L34:BACK S.FAILURE"
if value == 0x180F:
return "L35:BACK S.FAILURE"
if value == 0x1810:
return "L36:DEF.FLAME LOS"
if value == 0x1811:
return "L37:SU COM.FAIL"
if value == 0x1812:
return "L38:PCU COM.FAIL"
if value == 0x1813:
return "L39:BL OPEN FAIL"
if value == 0x1814:
return "L250:DEF.WATER MIS."
if value == 0x1815:
return "L251:MANOMETRE FAIL"
if value == 0x1816:
return "L255:FAIL UNKNOWN"
if value == 0x1817:
return "L254:FAIL UNKNOWN"
if value == 0x2000:
return "L1:DEF.COMP.PAC"
if value == 0x2001:
return "L2:DEF.V4V PAC"
if value == 0x2002:
return "L3:DEF.POMPE PAC"
if value == 0x2003:
return "L4:PAC HORS LIMIT"
if value == 0x2004:
return "L5:DEF.DEB.PAC 6"
if value == 0x2005:
return "L6:DEF.DEB.PAC 8"
if value == 0x2006:
return "L7:DEF.COM.PAC"
if value == 0x2007:
return "L8:DEF.S.SOR.COMP"
if value == 0x2008:
return "L9:DEF.H.P PAC"
if value == 0x2009:
return "L10:DEF.B.P PAC"
if value == 0x200A:
return "L11:DEF.PRES.SOURC"
if value == 0x200B:
return "L12:DEF.ANTI.SOUR."
if value == 0x200C:
return "L13:DEF.P.SOURCE"
if value == 0x200D:
return "L14:DEF.ANTI.COND."
if value == 0x200E:
return "L15:DEF.DEGIVRAGE"
if value == 0x200F:
return "L16:DEF.PROT.MOT."
if value == 0x2010:
return "L17:DEF.S.GAZ.CH."
if value == 0x2011:
return "L18:DEF.COM.PAC"
if value == 0x2012:
return "L19:DEF.S.DEP.PAC"
if value == 0x2013:
return "L20:DEF.S.RET.PAC"
if value == 0x2014:
return "L21:DEF.S.EXT.ENT."
if value == 0x2015:
return "L22:DEF.S.EXT.SOR."
if value == 0x2016:
return "L23:DEF.S.GAZ EXP."
if value == 0x2017:
return "L24:DEF.S.EVAPO."
if value == 0x2018:
return "L25:DEF.S.CONDENS."
if value == 0x2019:
return "L32:BL.USER.RESET"
if value == 0x201A:
return "L33:DEF.DEBIT"
if value == 0x201B:
return "L255:DEF.INCONNU"
if value == 0x201C:
return "L254:DEF.INCONNU"
if value == 0xFFFF:
return "no error"
return "Unknown"
def language_english(raw_table, base_index):
""" Convert language name to English """
value = raw_table[base_index]
if value == 0:
return "French"
if value == 1:
return "German"
if value == 2:
return "English"
if value == 3:
return "Italian"
if value == 4:
return "Spanish"
if value == 5:
return "Dutch"
if value == 6:
return "Polish"
if value == 7:
return "Turkish"
if value == 8:
return "Russian"
return "Unknown"
def json_week_schedule(raw_table, base_index):
""" Convert week schedule to a JSON """
schedule = {}
for day in range(7):
schedule[day] = day_schedule(raw_table, base_index + day * 3)
encoder = time_delta_json.CustomDateJSONEncoder()
return encoder.encode(schedule)
def hours_minutes_secondes(raw_table, base_index):
""" Convert raw value to hours """
return "%02d:%02d:%02d" % (raw_table[base_index],
raw_table[base_index + 1],
raw_table[base_index + 2])
def hours_minutes(raw_table, base_index):
""" Convert raw value to hours """
return "%02d:%02d" % (raw_table[base_index],
raw_table[base_index + 1])
def day_month(raw_table, base_index):
""" Convert raw value to date """
return "%02d/%02d" % (raw_table[base_index],
raw_table[base_index + 1])
def day_month_year(raw_table, base_index):
""" Convert raw value to date """
return "%02d/%02d/%02d" % (raw_table[base_index],
raw_table[base_index + 1],
raw_table[base_index + 2])
def decrease_french(raw_table, base_index):
""" Convert decrease flag to french """
if raw_table[base_index] == 0:
return "stop"
else:
return "abaissement"
def decrease_english(raw_table, base_index):
""" Convert decrease flag to french """
if raw_table[base_index] == 0:
return "stop"
else:
return "decreasing"
def off_on(raw_table, base_index):
""" Convert off/on flag to text """
if raw_table[base_index] == 0:
return "off"
else:
return "on"
OUTPUT1_BURNER = 3
OUTPUT1_HYDRAULIC_VALVE_OPEN = 1 << 2
OUTPUT1_HYDRAULIC_VALVE_CLOSE = 1 << 3
OUTPUT1_BOILER_PUMP = 1 << 4
# It's ON on my boiler, I want to follow it.
OUTPUT1_UNKNOW1 = 1 << 5
OUTPUT2_DHW_PUMP = 1 << 0
OUTPUT2_ZONEA_PUMP = 1 << 1
OUTPUT2_ZONEB_PUMP = 1 << 4
OUTPUT2_ZONEB_3WV_OPEN = 1 << 5
OUTPUT2_ZONEB_3WV_CLOSE = 1 << 6
OUTPUT2_ZONEC_PUMP = 1 << 7
OUTPUT2_ZONEC_3WV_OPEN = 1 << 8
OUTPUT2_ZONEC_3WV_CLOSE = 1 << 9
OUTPUT2_AUX_PUMP = 1 << 10
def output_state(raw_table, base_index):
""" Convert output state to JSON """
result = {}
val = raw_table[base_index]
result["burner"] = val & OUTPUT1_BURNER
result["hydraulic_valve_open"] = bool(val & OUTPUT1_HYDRAULIC_VALVE_OPEN)
result["hydraulic_valve_close"] = bool(val & OUTPUT1_HYDRAULIC_VALVE_CLOSE)
result["hydraulic_boiler_pump"] = bool(val & OUTPUT1_BOILER_PUMP)
result["UNKNOWN1"] = bool(val & OUTPUT1_UNKNOW1)
val = raw_table[base_index + 1]
result["DHW_pump"] = bool(val & OUTPUT2_DHW_PUMP)
result["zone_A_pump"] = bool(val & OUTPUT2_ZONEA_PUMP)
result["zone_B_pump"] = bool(val & OUTPUT2_ZONEB_PUMP)
result["zone_B_3WV_open"] = bool(val & OUTPUT2_ZONEB_3WV_OPEN)
result["zone_B_3WV_close"] = bool(val & OUTPUT2_ZONEB_3WV_CLOSE)
result["zone_C_pump"] = bool(val & OUTPUT2_ZONEC_PUMP)
result["zone_C_3WV_open"] = bool(val & OUTPUT2_ZONEC_3WV_OPEN)
result["zone_C_3WV_close"] = bool(val & OUTPUT2_ZONEC_3WV_CLOSE)
result["AUX_pump"] = bool(val & OUTPUT2_AUX_PUMP)
return json.dumps(result)
BASEECS_AUX_PUMP = 1
BASEECS_ZONEA_PUMP_BOILER = 1 << 1
BASEECS_BURNER_1_2 = 1 << 2
BASEECS_BURNER_1_1 = 1 << 3
BASEECS_ZONEA_PUMP = 1 << 4
BASEECS_DHW_PUMP = 1 << 5
BASEECS_ALARM_BURNER = 1 << 6
# BASEECS_ = 1 << 7
BASEECS_VALVE = 1 << 8
def base_ecs(raw_table, base_index):
""" Convert base_ecs state to JSON """
result = {}
val = raw_table[base_index]
result["AUX_pump"] = bool(val & BASEECS_AUX_PUMP)
result["zone_A_pump_boiler"] = bool(val & BASEECS_ZONEA_PUMP_BOILER)
result["burner_1_2"] = bool(val & BASEECS_BURNER_1_2)
result["burner_1_1"] = bool(val & BASEECS_BURNER_1_1)
result["zone_A_pump"] = bool(val & BASEECS_ZONEA_PUMP)
result["DHW_pump"] = bool(val & BASEECS_DHW_PUMP)
result["Alarm_burner"] = bool(val & BASEECS_ALARM_BURNER)
result["valve"] = bool(val & BASEECS_VALVE)
return json.dumps(result)
def fan(raw_table, base_index):
""" Convert for fan speed """
val = raw_table[base_index]
return val & 0x007F
def texte14(raw_table, base_index):
""" Convert 14 char of text """
result = ''
for word in raw_table[base_index:base_index + 7]:
result = result + chr(word >> 8) + chr(word & 0x00FF)
return result
def write_unit(value):
""" Convert unit value to modbus value """
return [int(value)]
def write_tenth(value):
""" Convert tenth value to modbus value """
int_value = int(float(value) * 10)
if int_value < 0:
int_value = abs(int_value) | 0x8000
return [int_value]
DEROG_NAME_TO_VALUE = {
"Vacances": BIT_ANTIFREEZE | BIT_END_OF_PROGRAM,
"Nuit": BIT_NIGHT | BIT_END_OF_PROGRAM,
"Jour": BIT_DAY | BIT_END_OF_PROGRAM,
"Automatique": BIT_AUTO,
"Vacation": BIT_ANTIFREEZE | BIT_END_OF_PROGRAM,
"Night": BIT_NIGHT | BIT_END_OF_PROGRAM,
"Day": BIT_DAY | BIT_END_OF_PROGRAM,
"Automatic": BIT_AUTO
}
def write_derog_bit_simple(value):
""" Convert French Mode to bit value """
if value not in DEROG_NAME_TO_VALUE:
return None
return [DEROG_NAME_TO_VALUE[value]]
LANGUAGE_NAME_TO_VALUE = {
"French": 0,
"German": 1,
"English": 2,
"Italian": 3,
"Spanish": 4,
"Dutch": 5,
"Polish": 6,
"Turkish": 7,
"Russian": 8
}
def write_language(value):
""" Convert French Mode to bit value """
if value not in LANGUAGE_NAME_TO_VALUE:
return None
return [LANGUAGE_NAME_TO_VALUE[value]]
|
|
#!/usr/bin/env python
#------------------------------------------------------------------------------
# Copyright 2015 Esri
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#==============================================================================
#Name: CopyItemToEachAccount.py
#
#Purpose: Adds the specified item(s) to each portal account.
#
# - If the item already exists in the user account (existence
# based on item title name, then existing item is updated).
#
# - To exclude certain portal accounts from receiving the
# item, add the user account names to the "exlude_users"
# variable.
#
#==============================================================================
import sys
import os
import time
import traceback
import json
import tempfile
import shutil
sys.path.append(os.path.join(os.path.join(os.path.dirname(
os.path.dirname(sys.argv[0])), 'Publish'), 'Portal'))
from portalpy import Portal, provision
import PortalContentExtract
# Get script name
scriptName = sys.argv[0]
# Create list of users to exclude
exclude_users = ['admin',
'system_publisher',
'ArcGISforDefense',
'Demonstration',
'DemoEManagement',
'DemoIntelligence',
'DemoLGovt',
'DemoMilitary',
'DemoNationalSecurity',
'DemoParksGardens',
'DemoSGovt',
'DemoUtilities',
'ReleasedEManagement',
'ReleasedIntelligence',
'ReleasedLGovt',
'ReleasedMilitary',
'ReleasedOpsServer',
'ReleasedParksGardens',
'ReleasedSGovt',
'ReleasedUtilities',
'TemplateIntelligence',
'TemplateMilitary']
# Get currrent working directory
start_dir = os.getcwd()
# Set section break characteristics for print statements
sec_len = 75
sec_char = '-'
def check_args():
# -----------------------------------------------------------------
# Check arguments
# -----------------------------------------------------------------
if len(sys.argv) <> 5:
print '\n' + scriptName + ' <PortalURL> <AdminUser> ' \
'<AdminUserPassword> <GUID{,GUID...}>'
print '\nWhere:'
print '\n\t<PortalURL> (required): URL of Portal ' \
'(i.e. https://fully_qualified_domain_name/arcgis)'
print '\n\t<AdminUser> (required): Primary portal administrator user.'
print '\n\t<AdminUserPassword> (required): Password for AdminUser.'
print '\n\t<GUID{,GUID...}> (required): GUIDs of portal items to ' \
'add to other user accounts.'
print '\n\nNOTE: The specified items will not be added to the following accounts:'
print '\t{}\n'.format(exclude_users)
return None
else:
# Set variables from parameter values
portal_address = sys.argv[1]
adminuser = sys.argv[2]
password = sys.argv[3]
guids = sys.argv[4]
src_ids = [guid.strip() for guid in guids.split(',')]
return portal_address, adminuser, password, src_ids
def validate_guids(portal, guids):
invalid_guids = []
for guid in guids:
search_results = portal.search(q='id:{}'.format(guid))
if len(search_results) == 0:
invalid_guids.append(guid)
return invalid_guids
def get_folder_name(portal, owner, folder_id):
folder_name = None
if folder_id:
folders = portal.folders(owner)
for folder in folders:
if folder_id == folder['id']:
folder_name = folder['title']
return folder_name
def has_folder(portal, owner, folder_name):
''' Determines if folder already exists '''
exists = False
if folder_name:
for folder in portal.folders(owner):
if folder_name == folder['title']:
exists = True
break
return exists
def main():
output_root = None
# Get script parameters
results = check_args()
if not results:
sys.exit(0)
portal_address, adminuser, password, src_ids = results
try:
# Create portal connection object
portal = Portal(portal_address, adminuser, password)
# Check if any specified GUIDs do not exist
invalid_guids = validate_guids(portal, src_ids)
if len(invalid_guids) > 0:
raise Exception(
'ERROR: The following portal items do not exist: {}'.format(
invalid_guids))
# Create list of users
users = [org_user['username'] for org_user in portal.org_users()]
target_users = [user for user in users if user not in exclude_users]
# -----------------------------------------------------------------
# Extract portal items
# -----------------------------------------------------------------
print '\n\n{}\nExtracting select portal items...\n{}\n'.format(
sec_char * sec_len, sec_char * sec_len)
# Create temporary extract folder in OS users' temp directory
output_root = os.path.join(tempfile.gettempdir(),
os.path.basename(
sys.argv[0]).split('.')[0] + '_Extract' )
os.makedirs(output_root)
print 'Extract folder: {}'.format(output_root)
# Extract specified portal item(s)
for src_id in src_ids:
src_item = portal.item(src_id)
os.chdir(output_root)
print '- Extracting item {} "{}" ({}) user account {}...'.format(
src_item['id'], src_item['title'],
src_item['type'], src_item['owner'])
PortalContentExtract.extract_item(
portal, src_item['id'],
src_item['owner'])
# Create list of paths to individual extracted portal item folders
src_item_paths = [os.path.join(output_root,
src_id) for src_id in src_ids]
# -----------------------------------------------------------------
# Publish extracted portal items for each user
# -----------------------------------------------------------------
print '\n\n{}\nPublish extracted items to each portal' \
'user account...\n{}'.format(sec_char * sec_len,
sec_char * sec_len)
print 'NOTE: not publishing to the following users:'
print exclude_users
for target_user in target_users:
print '\n\nUser Account: {}'.format(target_user)
# Get info about user folders
target_user_folders = portal.folders(target_user)
for src_item_path in src_item_paths:
# Get info about the source item
os.chdir(src_item_path)
src_item_json = json.load(open('item.json'))
item_title = src_item_json['title']
item_type = src_item_json['type']
item_id = src_item_json['id']
item_owner = src_item_json['owner']
item_folder_id = src_item_json['ownerFolder']
# Create folder in user account for item
item_folder_name = get_folder_name(portal, item_owner,
item_folder_id)
if item_folder_name:
if not has_folder(portal, target_user, item_folder_name):
print 'Creating target folder "{}" in account ' \
'{}...'.format(item_folder_name, target_user)
portal.create_folder(target_user, item_folder_name)
# Check if user already owns item
user_items = portal.search(
q='owner:{} AND type:{} AND title:{}'.format(
target_user, item_type, item_title))
# Add item if item does not exist in user account or
# update item if it already exists
if len(user_items) == 0:
print '\n- Add item "{}" ({}) to user account {}...'.format(
item_title, item_type,
portal.logged_in_user()['username'])
item, orig_id = provision.load_item(portal, src_item_path)
print '- Reassign item to user account {}, ' \
'folder "{}"...'.format(target_user,
item_folder_name)
portal.reassign_item(item.get('id'), target_user, item_folder_name)
else:
for user_item in user_items:
if user_item['id'] <> item_id:
print '\n- Update existing item {} ' \
'"{}" ({}) user account {}...'.format(
user_item['id'], user_item['title'],
user_item['type'], user_item['owner'])
item, orig_id = provision.load_item(
portal, src_item_path,
user_item['id'])
print '- Reassign item to user account {}, ' \
'folder "{}"...'.format(target_user,
item_folder_name)
portal.reassign_item(item.get('id'), target_user, item_folder_name)
else:
print '*** No need to update item {}; ' \
'user is owner of extracted item.'.format(
user_item['id'])
print '\n\nDone.'
except:
# Get the traceback object
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
# Concatenate information together concerning the error
# into a message string
pymsg = "PYTHON ERRORS:\nTraceback info:\n" + tbinfo + \
"\nError Info:\n" + str(sys.exc_info()[1])
# Print Python error messages for use in Python / Python Window
print
print "***** ERROR ENCOUNTERED *****"
print pymsg + "\n"
finally:
# Change directory to starting directory, otherwise the
# delete will fail.
os.chdir(start_dir)
# Delete temp extracted folder/files
if output_root:
if os.path.exists(output_root):
shutil.rmtree(output_root)
if __name__ == "__main__":
main()
|
|
import os
from collections import Counter
from contextlib import closing, contextmanager
import six
from exporters.default_retries import retry_long
from exporters.progress_callback import BotoDownloadProgress
from exporters.utils import CHUNK_SIZE, split_file, calculate_multipart_etag, get_bucket_name, \
get_boto_connection
from exporters.writers.base_writer import InconsistentWriteState
from exporters.writers.filebase_base_writer import FilebaseBaseWriter
DEFAULT_BUCKET_REGION = 'us-east-1'
@contextmanager
def multipart_upload(bucket, key_name, **kwargs):
mp = bucket.initiate_multipart_upload(key_name, **kwargs)
try:
yield mp
mp.complete_upload()
except:
mp.cancel_upload()
raise
def should_use_multipart_upload(path, bucket):
from boto.exception import S3ResponseError
# We need to check if we have READ permissions on this bucket, as they are
# needed to perform the complete_upload operation.
try:
acl = bucket.get_acl()
for grant in acl.acl.grants:
if grant.permission == 'READ':
break
except S3ResponseError:
return False
return os.path.getsize(path) > CHUNK_SIZE
class S3Writer(FilebaseBaseWriter):
"""
Writes items to S3 bucket. It is a File Based writer, so it has filebase
option available
- bucket (str)
Name of the bucket to write the items to.
- aws_access_key_id (str)
Public acces key to the s3 bucket.
- aws_secret_access_key (str)
Secret access key to the s3 bucket.
- filebase (str)
Base path to store the items in the bucket.
- aws_region (str)
AWS region to connect to.
- save_metadata (bool)
Save key's items count as metadata. Default: True
- filebase
Path to store the exported files
"""
supported_options = {
'bucket': {'type': six.string_types},
'aws_access_key_id': {
'type': six.string_types,
'env_fallback': 'EXPORTERS_S3WRITER_AWS_LOGIN'
},
'aws_secret_access_key': {
'type': six.string_types,
'env_fallback': 'EXPORTERS_S3WRITER_AWS_SECRET'
},
'aws_region': {'type': six.string_types, 'default': None},
'host': {'type': six.string_types, 'default': None},
'save_pointer': {'type': six.string_types, 'default': None},
'save_metadata': {'type': bool, 'default': True, 'required': False}
}
def __init__(self, options, *args, **kwargs):
super(S3Writer, self).__init__(options, *args, **kwargs)
access_key = self.read_option('aws_access_key_id')
secret_key = self.read_option('aws_secret_access_key')
self.aws_region = self.read_option('aws_region')
self.host = self.read_option('host')
bucket_name = get_bucket_name(self.read_option('bucket'))
self.logger.info('Starting S3Writer for bucket: %s' % bucket_name)
if self.aws_region is None:
self.aws_region = self._get_bucket_location(access_key, secret_key,
bucket_name)
self.conn = get_boto_connection(access_key, secret_key, self.aws_region,
bucket_name, self.host)
self.bucket = self.conn.get_bucket(bucket_name, validate=False)
self.save_metadata = self.read_option('save_metadata')
self.set_metadata('files_counter', Counter())
self.set_metadata('keys_written', [])
def _get_bucket_location(self, access_key, secret_key, bucket):
try:
conn = get_boto_connection(access_key, secret_key, bucketname=bucket, host=self.host)
return conn.get_bucket(bucket).get_location() or DEFAULT_BUCKET_REGION
except:
return DEFAULT_BUCKET_REGION
def _update_metadata(self, dump_path, key_name):
buffer_info = self.write_buffer.get_metadata(dump_path)
key_info = {
'key_name': key_name,
'size': buffer_info['size'],
'number_of_records': buffer_info['number_of_records']
}
keys_written = self.get_metadata('keys_written')
keys_written.append(key_info)
self.set_metadata('keys_written', keys_written)
def _get_total_count(self, dump_path):
return self.write_buffer.get_metadata_for_file(dump_path, 'number_of_records') or 0
def _ensure_proper_key_permissions(self, key):
from boto.exception import S3ResponseError
try:
key.set_acl('bucket-owner-full-control')
except S3ResponseError:
self.logger.warning('We have no READ_ACP/WRITE_ACP permissions')
def _create_key_metadata(self, dump_path, md5=None):
from boto.utils import compute_md5
metadata = {}
metadata['total'] = self._get_total_count(dump_path)
if md5:
metadata['md5'] = md5
else:
with open(dump_path, 'r') as f:
metadata['md5'] = compute_md5(f)
return metadata
def _save_metadata_for_key(self, key, dump_path, md5=None):
from boto.exception import S3ResponseError
metadata = self._create_key_metadata(dump_path, md5)
try:
for k, v in metadata.items():
key.set_metadata(k, v)
except S3ResponseError:
self.logger.warning(
'We have no READ_ACP/WRITE_ACP permissions, '
'so we could not add metadata info')
def _upload_small_file(self, dump_path, key_name):
with closing(self.bucket.new_key(key_name)) as key, open(dump_path, 'r') as f:
buffer_info = self.write_buffer.get_metadata(dump_path)
md5 = key.get_md5_from_hexdigest(buffer_info['file_hash'])
if self.save_metadata:
self._save_metadata_for_key(key, dump_path, md5)
progress = BotoDownloadProgress(self.logger)
key.set_contents_from_file(f, cb=progress, md5=md5)
self._ensure_proper_key_permissions(key)
@retry_long
def _upload_chunk(self, mp, chunk):
mp.upload_part_from_file(chunk.bytes, part_num=chunk.number)
def _upload_large_file(self, dump_path, key_name):
self.logger.debug('Using multipart S3 uploader')
md5 = None
if self.save_metadata:
md5 = calculate_multipart_etag(dump_path, CHUNK_SIZE)
metadata = self._create_key_metadata(dump_path, md5=md5)
with multipart_upload(self.bucket, key_name, metadata=metadata) as mp:
for chunk in split_file(dump_path):
self._upload_chunk(mp, chunk)
self.logger.debug(
'Uploaded chunk number {}'.format(chunk.number))
def _write_s3_key(self, dump_path, key_name):
destination = 's3://{}/{}'.format(self.bucket.name, key_name)
self.logger.info('Start uploading {} to {}'.format(dump_path, destination))
if should_use_multipart_upload(dump_path, self.bucket):
self._upload_large_file(dump_path, key_name)
else:
self._upload_small_file(dump_path, key_name)
self.last_written_file = destination
self.logger.info('Saved {}'.format(destination))
def write(self, dump_path, group_key=None, file_name=None):
if group_key is None:
group_key = []
filebase_path, file_name = self.create_filebase_name(group_key, file_name=file_name)
key_name = filebase_path + '/' + file_name
self._write_s3_key(dump_path, key_name)
self._update_metadata(dump_path, key_name)
self.get_metadata('files_counter')[filebase_path] += 1
@retry_long
def _write_s3_pointer(self, save_pointer, filebase):
with closing(self.bucket.new_key(save_pointer)) as key:
key.set_contents_from_string(filebase)
def _update_last_pointer(self):
save_pointer = self.read_option('save_pointer')
self._write_s3_pointer(save_pointer, self.filebase.dirname_template + '/')
def close(self):
"""
Called to clean all possible tmp files created during the process.
"""
if self.read_option('save_pointer'):
self._update_last_pointer()
super(S3Writer, self).close()
def get_file_suffix(self, path, prefix):
number_of_keys = self.get_metadata('files_counter').get(path, 0)
suffix = '{}'.format(str(number_of_keys))
return suffix
def _check_write_consistency(self):
from boto.exception import S3ResponseError
for key_info in self.get_metadata('keys_written'):
try:
key = self.bucket.get_key(key_info['key_name'])
if not key:
raise InconsistentWriteState('Key {} not found in bucket'.format(
key_info['key_name']))
if str(key.content_length) != str(key_info['size']):
raise InconsistentWriteState(
'Key {} has unexpected size. (expected {} - got {})'.format(
key_info['key_name'], key_info['size'], key.content_length))
if self.save_metadata:
if str(key.get_metadata('total')) != str(key_info['number_of_records']):
raise InconsistentWriteState(
'Unexpected number of records for key {}. ('
'expected {} - got {})'.format(key_info['key_name'],
key_info['number_of_records'],
key.get_metadata('total')))
except S3ResponseError:
self.logger.warning(
'Skipping consistency check for key {}. Probably due to lack of '
'read permissions'.format(key_info['key_name']))
self.logger.info('Consistency check passed')
|
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A logging handler that records information about unique exceptions.
'Unique' in this case is defined as a given (exception class, location) tuple.
Unique exceptions are logged to the datastore with an example stacktrace and an
approximate count of occurrences, grouped by day and application version.
A cron handler, in google.appengine.ext.ereporter.report_generator, constructs
and emails a report based on the previous day's exceptions.
Example usage:
In your handler script(s), add:
import logging
from google.appengine.ext import ereporter
ereporter.register_logger()
In your app.yaml, add:
handlers:
- url: /_ereporter/.*
script: $PYTHON_LIB/google/appengine/ext/ereporter/report_generator.py
login: admin
In your cron.yaml, add:
cron:
- description: Daily exception report
url: /[email protected]
schedule: every day 00:00
This will cause a daily exception report to be generated and emailed to all
admins, with exception traces grouped by minor version. If you only want to
get exception information for the most recent minor version, add the
'versions=latest' argument to the query string. For other valid query string
arguments, see report_generator.py.
If you anticipate a lot of exception traces (for example, if you're deploying
many minor versions, each of which may have its own set of exceptions), you
can ensure that the traces from the newest minor versions get included by adding
this to your index.yaml:
indexes:
- kind: ExceptionRecord
properties:
- name: date
- name: major_version
- name: minor_version
direction: desc
"""
import datetime
import logging
import os
import sha
import traceback
import urllib
from google.appengine.api import memcache
from google.appengine.ext import db
from google.appengine.ext import webapp
MAX_SIGNATURE_LENGTH = 256
class ExceptionRecord(db.Model):
"""Datastore model for a record of a unique exception."""
signature = db.StringProperty(required=True)
major_version = db.StringProperty(required=True)
minor_version = db.IntegerProperty(required=True)
date = db.DateProperty(required=True)
count = db.IntegerProperty(required=True, default=0)
stacktrace = db.TextProperty(required=True)
http_method = db.TextProperty(required=True)
url = db.TextProperty(required=True)
handler = db.TextProperty(required=True)
@classmethod
def get_key_name(cls, signature, version, date=None):
"""Generates a key name for an exception record.
Args:
signature: A signature representing the exception and its site.
version: The major/minor version of the app the exception occurred in.
date: The date the exception occurred.
Returns:
The unique key name for this exception record.
"""
if not date:
date = datetime.date.today()
return '%s@%s:%s' % (signature, date, version)
class ExceptionRecordingHandler(logging.Handler):
"""A handler that records exception data to the App Engine datastore."""
def __init__(self, log_interval=10):
"""Constructs a new ExceptionRecordingHandler.
Args:
log_interval: The minimum interval at which we will log an individual
exception. This is a per-exception timeout, so doesn't affect the
aggregate rate of exception logging, only the rate at which we record
ocurrences of a single exception, to prevent datastore contention.
"""
self.log_interval = log_interval
logging.Handler.__init__(self)
@classmethod
def __RelativePath(cls, path):
"""Rewrites a path to be relative to the app's root directory.
Args:
path: The path to rewrite.
Returns:
The path with the prefix removed, if that prefix matches the app's
root directory.
"""
cwd = os.getcwd()
if path.startswith(cwd):
path = path[len(cwd)+1:]
return path
@classmethod
def __GetSignature(cls, exc_info):
"""Returns a unique signature string for an exception.
Args:
exc_info: The exc_info object for an exception.
Returns:
A unique signature string for the exception, consisting of fully
qualified exception name and call site.
"""
ex_type, unused_value, trace = exc_info
frames = traceback.extract_tb(trace)
fulltype = '%s.%s' % (ex_type.__module__, ex_type.__name__)
path, line_no = frames[-1][:2]
path = cls.__RelativePath(path)
site = '%s:%d' % (path, line_no)
signature = '%s@%s' % (fulltype, site)
if len(signature) > MAX_SIGNATURE_LENGTH:
signature = 'hash:%s' % sha.new(signature).hexdigest()
return signature
@classmethod
def __GetURL(cls):
"""Returns the URL of the page currently being served.
Returns:
The full URL of the page currently being served.
"""
if os.environ['SERVER_PORT'] == '80':
scheme = 'http://'
else:
scheme = 'https://'
host = os.environ['SERVER_NAME']
script_name = urllib.quote(os.environ['SCRIPT_NAME'])
path_info = urllib.quote(os.environ['PATH_INFO'])
qs = os.environ.get('QUERY_STRING', '')
if qs:
qs = '?' + qs
return scheme + host + script_name + path_info + qs
def __GetFormatter(self):
"""Returns the log formatter for this handler.
Returns:
The log formatter to use.
"""
if self.formatter:
return self.formatter
else:
return logging._defaultFormatter
def emit(self, record):
"""Log an error to the datastore, if applicable.
Args:
The logging.LogRecord object.
See http://docs.python.org/library/logging.html#logging.LogRecord
"""
try:
if not record.exc_info:
return
signature = self.__GetSignature(record.exc_info)
if not memcache.add(signature, None, self.log_interval):
return
db.run_in_transaction_custom_retries(1, self.__EmitTx, signature,
record.exc_info)
except Exception:
self.handleError(record)
def __EmitTx(self, signature, exc_info):
"""Run in a transaction to insert or update the record for this transaction.
Args:
signature: The signature for this exception.
exc_info: The exception info record.
"""
today = datetime.date.today()
version = os.environ['CURRENT_VERSION_ID']
major_ver, minor_ver = version.rsplit('.', 1)
minor_ver = int(minor_ver)
key_name = ExceptionRecord.get_key_name(signature, version)
exrecord = ExceptionRecord.get_by_key_name(key_name)
if not exrecord:
exrecord = ExceptionRecord(
key_name=key_name,
signature=signature,
major_version=major_ver,
minor_version=minor_ver,
date=today,
stacktrace=self.__GetFormatter().formatException(exc_info),
http_method=os.environ['REQUEST_METHOD'],
url=self.__GetURL(),
handler=self.__RelativePath(os.environ['PATH_TRANSLATED']))
exrecord.count += 1
exrecord.put()
def register_logger(logger=None):
if not logger:
logger = logging.getLogger()
handler = ExceptionRecordingHandler()
logger.addHandler(handler)
return handler
|
|
from django.test import TestCase
from django.test import Client
from django.urls import reverse
# Create your tests here.
class ResponseTestBrowse(TestCase):
def setUp(self):
print("Set uo [ Response Test ] : browse")
self.client = Client()
def test_browse(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse")
response = self.client.get('/browse')
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browseAll(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/all")
response = self.client.get('/browse/all')
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse1(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/1")
response = self.client.get("/browse/1")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse2(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/2")
response = self.client.get("/browse/2")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse3(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/3")
response = self.client.get("/browse/3")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse4(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/4")
response = self.client.get("/browse/4")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse5(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/5")
response = self.client.get("/browse/5")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse6(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/6")
response = self.client.get("/browse/6")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse7(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/7")
response = self.client.get("/browse/7")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse8(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/8")
response = self.client.get("/browse/8")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse9(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/9")
response = self.client.get("/browse/9")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse10(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/10")
response = self.client.get("/browse/10")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse11(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/11")
response = self.client.get("/browse/11")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse12(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/12")
response = self.client.get("/browse/12")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse13(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/13")
response = self.client.get("/browse/13")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse14(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/14")
response = self.client.get("/browse/14")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse15(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/15")
response = self.client.get("/browse/15")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse16(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/16")
response = self.client.get("/browse/16")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse17(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/17")
response = self.client.get("/browse/17")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse18(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/18")
response = self.client.get("/browse/18")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse19(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/19")
response = self.client.get("/browse/19")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse20(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/20")
response = self.client.get("/browse/20")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse21(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/21")
response = self.client.get("/browse/21")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse22(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/22")
response = self.client.get("/browse/22")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse23(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/23")
response = self.client.get("/browse/23")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse24(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/24")
response = self.client.get("/browse/24")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse25(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/25")
response = self.client.get("/browse/25")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse26(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/26")
response = self.client.get("/browse/26")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse27(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/27")
response = self.client.get("/browse/27")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse28(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/28")
response = self.client.get("/browse/28")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse29(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/29")
response = self.client.get("/browse/29")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse30(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/30")
response = self.client.get("/browse/30")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse31(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/31")
response = self.client.get("/browse/31")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse32(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/32")
response = self.client.get("/browse/32")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse33(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/33")
response = self.client.get("/browse/33")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse34(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/34")
response = self.client.get("/browse/34")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse35(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/35")
response = self.client.get("/browse/35")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse36(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/36")
response = self.client.get("/browse/36")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse37(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/37")
response = self.client.get("/browse/37")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse38(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/38")
response = self.client.get("/browse/38")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse39(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/39")
response = self.client.get("/browse/39")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse40(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/40")
response = self.client.get("/browse/40")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse41(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/41")
response = self.client.get("/browse/41")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse42(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/42")
response = self.client.get("/browse/42")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse43(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/43")
response = self.client.get("/browse/43")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse44(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/44")
response = self.client.get("/browse/44")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse45(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/45")
response = self.client.get("/browse/45")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse46(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/46")
response = self.client.get("/browse/46")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse47(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/47")
response = self.client.get("/browse/47")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse48(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/48")
response = self.client.get("/browse/48")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse49(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/49")
response = self.client.get("/browse/49")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse50(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/50")
response = self.client.get("/browse/50")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse51(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/51")
response = self.client.get("/browse/51")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse52(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/52")
response = self.client.get("/browse/52")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse53(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/53")
response = self.client.get("/browse/53")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse54(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/54")
response = self.client.get("/browse/54")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse55(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/55")
response = self.client.get("/browse/55")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse56(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/56")
response = self.client.get("/browse/56")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse57(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/57")
response = self.client.get("/browse/57")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse58(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/58")
response = self.client.get("/browse/58")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse59(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/59")
response = self.client.get("/browse/59")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse60(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/60")
response = self.client.get("/browse/60")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse61(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/61")
response = self.client.get("/browse/61")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse62(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/62")
response = self.client.get("/browse/62")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse63(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/63")
response = self.client.get("/browse/63")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse64(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/64")
response = self.client.get("/browse/64")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse65(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/65")
response = self.client.get("/browse/65")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse66(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/66")
response = self.client.get("/browse/66")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse67(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/67")
response = self.client.get("/browse/67")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse68(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/68")
response = self.client.get("/browse/68")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse69(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/69")
response = self.client.get("/browse/69")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse70(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/70")
response = self.client.get("/browse/70")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse71(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/71")
response = self.client.get("/browse/71")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse72(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/72")
response = self.client.get("/browse/72")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse73(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/73")
response = self.client.get("/browse/73")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse74(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/74")
response = self.client.get("/browse/74")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse75(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/75")
response = self.client.get("/browse/75")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse76(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/76")
response = self.client.get("/browse/76")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse77(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/77")
response = self.client.get("/browse/77")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse78(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/78")
response = self.client.get("/browse/78")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse79(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/79")
response = self.client.get("/browse/79")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse80(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/80")
response = self.client.get("/browse/80")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse81(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/81")
response = self.client.get("/browse/81")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse82(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/82")
response = self.client.get("/browse/82")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse83(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/83")
response = self.client.get("/browse/83")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse84(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/84")
response = self.client.get("/browse/84")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse85(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/85")
response = self.client.get("/browse/85")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse86(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/86")
response = self.client.get("/browse/86")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse87(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/87")
response = self.client.get("/browse/87")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse88(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/88")
response = self.client.get("/browse/88")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse89(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/89")
response = self.client.get("/browse/89")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse90(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/90")
response = self.client.get("/browse/90")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse91(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/91")
response = self.client.get("/browse/91")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse92(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/92")
response = self.client.get("/browse/92")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse93(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/93")
response = self.client.get("/browse/93")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse94(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/94")
response = self.client.get("/browse/94")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse95(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/95")
response = self.client.get("/browse/95")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse96(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/96")
response = self.client.get("/browse/96")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse97(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/97")
response = self.client.get("/browse/97")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse98(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/98")
response = self.client.get("/browse/98")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse99(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/99")
response = self.client.get("/browse/99")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse100(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/100")
response = self.client.get("/browse/100")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse101(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/101")
response = self.client.get("/browse/101")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse102(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/102")
response = self.client.get("/browse/102")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse103(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/103")
response = self.client.get("/browse/103")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse104(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/104")
response = self.client.get("/browse/104")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse105(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/105")
response = self.client.get("/browse/105")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse106(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/106")
response = self.client.get("/browse/106")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse107(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/107")
response = self.client.get("/browse/107")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse108(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/108")
response = self.client.get("/browse/108")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse109(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/109")
response = self.client.get("/browse/109")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse110(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/110")
response = self.client.get("/browse/110")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse111(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/111")
response = self.client.get("/browse/111")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse112(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/112")
response = self.client.get("/browse/112")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse113(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/113")
response = self.client.get("/browse/113")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse114(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/114")
response = self.client.get("/browse/114")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse115(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/115")
response = self.client.get("/browse/115")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse116(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/116")
response = self.client.get("/browse/116")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse117(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/117")
response = self.client.get("/browse/117")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse118(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/118")
response = self.client.get("/browse/118")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse119(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/119")
response = self.client.get("/browse/119")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse120(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/120")
response = self.client.get("/browse/120")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse121(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/121")
response = self.client.get("/browse/121")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse122(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/122")
response = self.client.get("/browse/122")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse123(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/123")
response = self.client.get("/browse/123")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse124(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/124")
response = self.client.get("/browse/124")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse125(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/125")
response = self.client.get("/browse/125")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse126(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/126")
response = self.client.get("/browse/126")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse127(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/127")
response = self.client.get("/browse/127")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse128(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/128")
response = self.client.get("/browse/128")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse129(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/129")
response = self.client.get("/browse/129")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse130(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/130")
response = self.client.get("/browse/130")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse131(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/131")
response = self.client.get("/browse/131")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse132(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/132")
response = self.client.get("/browse/132")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse133(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/133")
response = self.client.get("/browse/133")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse134(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/134")
response = self.client.get("/browse/134")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse135(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/135")
response = self.client.get("/browse/135")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse136(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/136")
response = self.client.get("/browse/136")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse137(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/137")
response = self.client.get("/browse/137")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse138(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/138")
response = self.client.get("/browse/138")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse139(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/139")
response = self.client.get("/browse/139")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse140(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/140")
response = self.client.get("/browse/140")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse141(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/141")
response = self.client.get("/browse/141")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse142(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/142")
response = self.client.get("/browse/142")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse143(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/143")
response = self.client.get("/browse/143")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse144(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/144")
response = self.client.get("/browse/144")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse145(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/145")
response = self.client.get("/browse/145")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse146(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/146")
response = self.client.get("/browse/146")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse147(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/147")
response = self.client.get("/browse/147")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse148(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/148")
response = self.client.get("/browse/148")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse149(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/149")
response = self.client.get("/browse/149")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse150(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/150")
response = self.client.get("/browse/150")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse151(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/151")
response = self.client.get("/browse/151")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse152(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/152")
response = self.client.get("/browse/152")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse153(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/153")
response = self.client.get("/browse/153")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse154(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/154")
response = self.client.get("/browse/154")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse155(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/155")
response = self.client.get("/browse/155")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse156(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/156")
response = self.client.get("/browse/156")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse157(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/157")
response = self.client.get("/browse/157")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse158(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/158")
response = self.client.get("/browse/158")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse159(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/159")
response = self.client.get("/browse/159")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse160(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/160")
response = self.client.get("/browse/160")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse161(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/161")
response = self.client.get("/browse/161")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse162(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/162")
response = self.client.get("/browse/162")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse163(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/163")
response = self.client.get("/browse/163")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse164(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/164")
response = self.client.get("/browse/164")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse165(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/165")
response = self.client.get("/browse/165")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse166(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/166")
response = self.client.get("/browse/166")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse167(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/167")
response = self.client.get("/browse/167")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse168(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/168")
response = self.client.get("/browse/168")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse169(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/169")
response = self.client.get("/browse/169")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse170(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/170")
response = self.client.get("/browse/170")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse171(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/171")
response = self.client.get("/browse/171")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse172(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/172")
response = self.client.get("/browse/172")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse173(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/173")
response = self.client.get("/browse/173")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse174(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/174")
response = self.client.get("/browse/174")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse175(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/175")
response = self.client.get("/browse/175")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse176(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/176")
response = self.client.get("/browse/176")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse177(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/177")
response = self.client.get("/browse/177")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse178(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/178")
response = self.client.get("/browse/178")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse179(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/179")
response = self.client.get("/browse/179")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse180(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/180")
response = self.client.get("/browse/180")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse181(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/181")
response = self.client.get("/browse/181")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse182(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/182")
response = self.client.get("/browse/182")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse183(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/183")
response = self.client.get("/browse/183")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse184(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/184")
response = self.client.get("/browse/184")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse185(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/185")
response = self.client.get("/browse/185")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse186(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/186")
response = self.client.get("/browse/186")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse187(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/187")
response = self.client.get("/browse/187")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse188(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/188")
response = self.client.get("/browse/188")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse189(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/189")
response = self.client.get("/browse/189")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse190(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/190")
response = self.client.get("/browse/190")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse191(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/191")
response = self.client.get("/browse/191")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse192(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/192")
response = self.client.get("/browse/192")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse193(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/193")
response = self.client.get("/browse/193")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse194(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/194")
response = self.client.get("/browse/194")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse195(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/195")
response = self.client.get("/browse/195")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse196(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/196")
response = self.client.get("/browse/196")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse197(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/197")
response = self.client.get("/browse/197")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse198(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/198")
response = self.client.get("/browse/198")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse199(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/199")
response = self.client.get("/browse/199")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse200(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/200")
response = self.client.get("/browse/200")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse201(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/201")
response = self.client.get("/browse/201")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse202(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/202")
response = self.client.get("/browse/202")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse203(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/203")
response = self.client.get("/browse/203")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse204(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/204")
response = self.client.get("/browse/204")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse205(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/205")
response = self.client.get("/browse/205")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse206(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/206")
response = self.client.get("/browse/206")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse207(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/207")
response = self.client.get("/browse/207")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse208(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/208")
response = self.client.get("/browse/208")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse209(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/209")
response = self.client.get("/browse/209")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse210(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/210")
response = self.client.get("/browse/210")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse211(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/211")
response = self.client.get("/browse/211")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse212(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/212")
response = self.client.get("/browse/212")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse213(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/213")
response = self.client.get("/browse/213")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse214(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/214")
response = self.client.get("/browse/214")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse215(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/215")
response = self.client.get("/browse/215")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse216(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/216")
response = self.client.get("/browse/216")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse217(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/217")
response = self.client.get("/browse/217")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse218(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/218")
response = self.client.get("/browse/218")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse219(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/219")
response = self.client.get("/browse/219")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse220(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/220")
response = self.client.get("/browse/220")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse221(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/221")
response = self.client.get("/browse/221")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse222(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/222")
response = self.client.get("/browse/222")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse223(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/223")
response = self.client.get("/browse/223")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse224(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/224")
response = self.client.get("/browse/224")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse225(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/225")
response = self.client.get("/browse/225")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse226(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/226")
response = self.client.get("/browse/226")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse227(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/227")
response = self.client.get("/browse/227")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse228(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/228")
response = self.client.get("/browse/228")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse229(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/229")
response = self.client.get("/browse/229")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse230(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/230")
response = self.client.get("/browse/230")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse231(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/231")
response = self.client.get("/browse/231")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse232(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/232")
response = self.client.get("/browse/232")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse233(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/233")
response = self.client.get("/browse/233")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse234(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/234")
response = self.client.get("/browse/234")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse235(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/235")
response = self.client.get("/browse/235")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse236(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/236")
response = self.client.get("/browse/236")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse237(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/237")
response = self.client.get("/browse/237")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse238(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/238")
response = self.client.get("/browse/238")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse239(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/239")
response = self.client.get("/browse/239")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse240(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/240")
response = self.client.get("/browse/240")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse241(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/241")
response = self.client.get("/browse/241")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse242(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/242")
response = self.client.get("/browse/242")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse243(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/243")
response = self.client.get("/browse/243")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse244(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/244")
response = self.client.get("/browse/244")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse245(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/245")
response = self.client.get("/browse/245")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse246(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/246")
response = self.client.get("/browse/246")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse247(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/247")
response = self.client.get("/browse/247")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse248(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/248")
response = self.client.get("/browse/248")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse249(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/249")
response = self.client.get("/browse/249")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse250(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/250")
response = self.client.get("/browse/250")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse251(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/251")
response = self.client.get("/browse/251")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse252(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/252")
response = self.client.get("/browse/252")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse253(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/253")
response = self.client.get("/browse/253")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse254(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/254")
response = self.client.get("/browse/254")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse255(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/255")
response = self.client.get("/browse/255")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse256(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/256")
response = self.client.get("/browse/256")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse257(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/257")
response = self.client.get("/browse/257")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse258(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/258")
response = self.client.get("/browse/258")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse259(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/259")
response = self.client.get("/browse/259")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse260(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/260")
response = self.client.get("/browse/260")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse261(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/261")
response = self.client.get("/browse/261")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse262(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/262")
response = self.client.get("/browse/262")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse263(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/263")
response = self.client.get("/browse/263")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse264(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/264")
response = self.client.get("/browse/264")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse265(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/265")
response = self.client.get("/browse/265")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse266(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/266")
response = self.client.get("/browse/266")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse267(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/267")
response = self.client.get("/browse/267")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse268(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/268")
response = self.client.get("/browse/268")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse269(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/269")
response = self.client.get("/browse/269")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse270(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/270")
response = self.client.get("/browse/270")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse271(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/271")
response = self.client.get("/browse/271")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse272(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/272")
response = self.client.get("/browse/272")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse273(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/273")
response = self.client.get("/browse/273")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse274(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/274")
response = self.client.get("/browse/274")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse275(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/275")
response = self.client.get("/browse/275")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse276(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/276")
response = self.client.get("/browse/276")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse277(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/277")
response = self.client.get("/browse/277")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse278(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/278")
response = self.client.get("/browse/278")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse279(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/279")
response = self.client.get("/browse/279")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse280(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/280")
response = self.client.get("/browse/280")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse281(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/281")
response = self.client.get("/browse/281")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse282(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/282")
response = self.client.get("/browse/282")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse283(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/283")
response = self.client.get("/browse/283")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse284(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/284")
response = self.client.get("/browse/284")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse285(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/285")
response = self.client.get("/browse/285")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse286(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/286")
response = self.client.get("/browse/286")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse287(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/287")
response = self.client.get("/browse/287")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse288(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/288")
response = self.client.get("/browse/288")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse289(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/289")
response = self.client.get("/browse/289")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse290(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/290")
response = self.client.get("/browse/290")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse291(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/291")
response = self.client.get("/browse/291")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse292(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/292")
response = self.client.get("/browse/292")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse293(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/293")
response = self.client.get("/browse/293")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse294(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/294")
response = self.client.get("/browse/294")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse295(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/295")
response = self.client.get("/browse/295")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse296(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/296")
response = self.client.get("/browse/296")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse297(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/297")
response = self.client.get("/browse/297")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse298(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/298")
response = self.client.get("/browse/298")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse299(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/299")
response = self.client.get("/browse/299")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def test_browse300(self):
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Response : http://localhost:port/browse/300")
response = self.client.get("/browse/300")
self.assertEqual(response.status_code, 301)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers
from google.api_core import operations_v1
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.videointelligence_v1beta2.types import video_intelligence
from google.longrunning import operations_pb2 # type: ignore
from .base import VideoIntelligenceServiceTransport, DEFAULT_CLIENT_INFO
class VideoIntelligenceServiceGrpcTransport(VideoIntelligenceServiceTransport):
"""gRPC backend transport for VideoIntelligenceService.
Service that implements Google Cloud Video Intelligence API.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "videointelligence.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client: Optional[operations_v1.OperationsClient] = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "videointelligence.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Quick check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsClient(self.grpc_channel)
# Return the client from cache.
return self._operations_client
@property
def annotate_video(
self,
) -> Callable[[video_intelligence.AnnotateVideoRequest], operations_pb2.Operation]:
r"""Return a callable for the annotate video method over gRPC.
Performs asynchronous video annotation. Progress and results can
be retrieved through the ``google.longrunning.Operations``
interface. ``Operation.metadata`` contains
``AnnotateVideoProgress`` (progress). ``Operation.response``
contains ``AnnotateVideoResponse`` (results).
Returns:
Callable[[~.AnnotateVideoRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "annotate_video" not in self._stubs:
self._stubs["annotate_video"] = self.grpc_channel.unary_unary(
"/google.cloud.videointelligence.v1beta2.VideoIntelligenceService/AnnotateVideo",
request_serializer=video_intelligence.AnnotateVideoRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["annotate_video"]
def close(self):
self.grpc_channel.close()
__all__ = ("VideoIntelligenceServiceGrpcTransport",)
|
|
# Copyright 2013 Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon.utils import memoized
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.instances \
import utils as instance_utils
LOG = logging.getLogger(__name__)
class SetInstanceDetailsAction(workflows.Action):
name = forms.CharField(max_length=80, label=_("Instance Name"))
flavor = forms.ChoiceField(label=_("Flavor"),
help_text=_("Size of image to launch."))
volume = forms.IntegerField(label=_("Volume Size"),
min_value=0,
initial=1,
help_text=_("Size of the volume in GB."))
datastore = forms.ChoiceField(label=_("Datastore"),
help_text=_(
"Type and version of datastore."))
class Meta(object):
name = _("Details")
help_text_template = "project/databases/_launch_details_help.html"
def clean(self):
if self.data.get("datastore", None) == "select_datastore_type_version":
msg = _("You must select a datastore type and version.")
self._errors["datastore"] = self.error_class([msg])
return self.cleaned_data
@memoized.memoized_method
def flavors(self, request):
try:
return api.trove.flavor_list(request)
except Exception:
LOG.exception("Exception while obtaining flavors list")
redirect = reverse("horizon:project:databases:index")
exceptions.handle(request,
_('Unable to obtain flavors.'),
redirect=redirect)
def populate_flavor_choices(self, request, context):
flavors = self.flavors(request)
if flavors:
return instance_utils.sort_flavor_list(request, flavors)
return []
@memoized.memoized_method
def datastores(self, request):
try:
return api.trove.datastore_list(request)
except Exception:
LOG.exception("Exception while obtaining datastores list")
self._datastores = []
@memoized.memoized_method
def datastore_versions(self, request, datastore):
try:
return api.trove.datastore_version_list(request, datastore)
except Exception:
LOG.exception("Exception while obtaining datastore version list")
self._datastore_versions = []
def populate_datastore_choices(self, request, context):
choices = ()
set_initial = False
datastores = self.datastores(request)
if datastores is not None:
num_datastores_with_one_version = 0
for ds in datastores:
versions = self.datastore_versions(request, ds.name)
if not set_initial:
if len(versions) >= 2:
set_initial = True
elif len(versions) == 1:
num_datastores_with_one_version += 1
if num_datastores_with_one_version > 1:
set_initial = True
if len(versions) > 0:
# only add to choices if datastore has at least one version
version_choices = ()
for v in versions:
version_choices = (version_choices +
((ds.name + ',' + v.name, v.name),))
datastore_choices = (ds.name, version_choices)
choices = choices + (datastore_choices,)
if set_initial:
# prepend choice to force user to choose
initial = (('select_datastore_type_version',
_('Select datastore type and version')))
choices = (initial,) + choices
return choices
TROVE_ADD_USER_PERMS = getattr(settings, 'TROVE_ADD_USER_PERMS', [])
TROVE_ADD_DATABASE_PERMS = getattr(settings, 'TROVE_ADD_DATABASE_PERMS', [])
TROVE_ADD_PERMS = TROVE_ADD_USER_PERMS + TROVE_ADD_DATABASE_PERMS
class SetInstanceDetails(workflows.Step):
action_class = SetInstanceDetailsAction
contributes = ("name", "volume", "flavor", "datastore")
class SetNetworkAction(workflows.Action):
network = forms.MultipleChoiceField(label=_("Networks"),
widget=forms.CheckboxSelectMultiple(),
error_messages={
'required': _(
"At least one network must"
" be specified.")},
help_text=_("Launch instance with"
" these networks"))
def __init__(self, request, *args, **kwargs):
super(SetNetworkAction, self).__init__(request, *args, **kwargs)
network_list = self.fields["network"].choices
if len(network_list) == 1:
self.fields['network'].initial = [network_list[0][0]]
class Meta(object):
name = _("Networking")
permissions = ('openstack.services.network',)
help_text = _("Select networks for your instance.")
def populate_network_choices(self, request, context):
try:
tenant_id = self.request.user.tenant_id
networks = api.neutron.network_list_for_tenant(request, tenant_id)
network_list = [(network.id, network.name_or_id)
for network in networks]
except Exception:
network_list = []
exceptions.handle(request,
_('Unable to retrieve networks.'))
return network_list
class SetNetwork(workflows.Step):
action_class = SetNetworkAction
template_name = "project/databases/_launch_networks.html"
contributes = ("network_id",)
def contribute(self, data, context):
if data:
networks = self.workflow.request.POST.getlist("network")
# If no networks are explicitly specified, network list
# contains an empty string, so remove it.
networks = [n for n in networks if n != '']
if networks:
context['network_id'] = networks
return context
class AddDatabasesAction(workflows.Action):
"""Initialize the database with users/databases. This tab will honor
the settings which should be a list of permissions required:
* TROVE_ADD_USER_PERMS = []
* TROVE_ADD_DATABASE_PERMS = []
"""
databases = forms.CharField(label=_('Initial Databases'),
required=False,
help_text=_('Comma separated list of '
'databases to create'))
user = forms.CharField(label=_('Initial Admin User'),
required=False,
help_text=_("Initial admin user to add"))
password = forms.CharField(widget=forms.PasswordInput(),
label=_("Password"),
required=False)
host = forms.CharField(label=_("Allowed Host (optional)"),
required=False,
help_text=_("Host or IP that the user is allowed "
"to connect through."))
class Meta(object):
name = _("Initialize Databases")
permissions = TROVE_ADD_PERMS
help_text_template = "project/databases/_launch_initialize_help.html"
def clean(self):
cleaned_data = super(AddDatabasesAction, self).clean()
if cleaned_data.get('user'):
if not cleaned_data.get('password'):
msg = _('You must specify a password if you create a user.')
self._errors["password"] = self.error_class([msg])
if not cleaned_data.get('databases'):
msg = _('You must specify at least one database if '
'you create a user.')
self._errors["databases"] = self.error_class([msg])
return cleaned_data
class InitializeDatabase(workflows.Step):
action_class = AddDatabasesAction
contributes = ["databases", 'user', 'password', 'host']
class AdvancedAction(workflows.Action):
initial_state = forms.ChoiceField(
label=_('Source for Initial State'),
required=False,
help_text=_("Choose initial state."),
choices=[
('', _('None')),
('backup', _('Restore from Backup')),
('master', _('Replicate from Instance'))],
widget=forms.Select(attrs={
'class': 'switchable',
'data-slug': 'initial_state'
}))
backup = forms.ChoiceField(
label=_('Backup Name'),
required=False,
help_text=_('Select a backup to restore'),
widget=forms.Select(attrs={
'class': 'switched',
'data-switch-on': 'initial_state',
'data-initial_state-backup': _('Backup Name')
}))
master = forms.ChoiceField(
label=_('Master Instance Name'),
required=False,
help_text=_('Select a master instance'),
widget=forms.Select(attrs={
'class': 'switched',
'data-switch-on': 'initial_state',
'data-initial_state-master': _('Master Instance Name')
}))
class Meta(object):
name = _("Advanced")
help_text_template = "project/databases/_launch_advanced_help.html"
def populate_backup_choices(self, request, context):
try:
backups = api.trove.backup_list(request)
choices = [(b.id, b.name) for b in backups
if b.status == 'COMPLETED']
except Exception:
choices = []
if choices:
choices.insert(0, ("", _("Select backup")))
else:
choices.insert(0, ("", _("No backups available")))
return choices
def populate_master_choices(self, request, context):
try:
instances = api.trove.instance_list(request)
choices = [(i.id, i.name) for i in
instances if i.status == 'ACTIVE']
except Exception:
choices = []
if choices:
choices.insert(0, ("", _("Select instance")))
else:
choices.insert(0, ("", _("No instances available")))
return choices
def clean(self):
cleaned_data = super(AdvancedAction, self).clean()
initial_state = cleaned_data.get("initial_state")
if initial_state == 'backup':
backup = self.cleaned_data['backup']
if backup:
try:
bkup = api.trove.backup_get(self.request, backup)
self.cleaned_data['backup'] = bkup.id
except Exception:
raise forms.ValidationError(_("Unable to find backup!"))
else:
raise forms.ValidationError(_("A backup must be selected!"))
cleaned_data['master'] = None
elif initial_state == 'master':
master = self.cleaned_data['master']
if master:
try:
api.trove.instance_get(self.request, master)
except Exception:
raise forms.ValidationError(
_("Unable to find master instance!"))
else:
raise forms.ValidationError(
_("A master instance must be selected!"))
cleaned_data['backup'] = None
else:
cleaned_data['master'] = None
cleaned_data['backup'] = None
return cleaned_data
class Advanced(workflows.Step):
action_class = AdvancedAction
contributes = ['backup', 'master']
class LaunchInstance(workflows.Workflow):
slug = "launch_instance"
name = _("Launch Instance")
finalize_button_name = _("Launch")
success_message = _('Launched %(count)s named "%(name)s".')
failure_message = _('Unable to launch %(count)s named "%(name)s".')
success_url = "horizon:project:databases:index"
default_steps = (SetInstanceDetails,
SetNetwork,
InitializeDatabase,
Advanced)
def __init__(self, request=None, context_seed=None, entry_point=None,
*args, **kwargs):
super(LaunchInstance, self).__init__(request, context_seed,
entry_point, *args, **kwargs)
self.attrs['autocomplete'] = (
settings.HORIZON_CONFIG.get('password_autocomplete'))
def format_status_message(self, message):
name = self.context.get('name', 'unknown instance')
return message % {"count": _("instance"), "name": name}
def _get_databases(self, context):
"""Returns the initial databases for this instance."""
databases = None
if context.get('databases'):
dbs = context['databases']
databases = [{'name': d.strip()} for d in dbs.split(',')]
return databases
def _get_users(self, context):
users = None
if context.get('user'):
user = {
'name': context['user'],
'password': context['password'],
'databases': self._get_databases(context),
}
if context['host']:
user['host'] = context['host']
users = [user]
return users
def _get_backup(self, context):
backup = None
if context.get('backup'):
backup = {'backupRef': context['backup']}
return backup
def _get_nics(self, context):
netids = context.get('network_id', None)
if netids:
return [{"net-id": netid, "v4-fixed-ip": ""}
for netid in netids]
else:
return None
def handle(self, request, context):
try:
datastore = self.context['datastore'].split(',')[0]
datastore_version = self.context['datastore'].split(',')[1]
LOG.info("Launching database instance with parameters "
"{name=%s, volume=%s, flavor=%s, "
"datastore=%s, datastore_version=%s, "
"dbs=%s, users=%s, "
"backups=%s, nics=%s, replica_of=%s}",
context['name'], context['volume'], context['flavor'],
datastore, datastore_version,
self._get_databases(context), self._get_users(context),
self._get_backup(context), self._get_nics(context),
context.get('master'))
api.trove.instance_create(request,
context['name'],
context['volume'],
context['flavor'],
datastore=datastore,
datastore_version=datastore_version,
databases=self._get_databases(context),
users=self._get_users(context),
restore_point=self._get_backup(context),
nics=self._get_nics(context),
replica_of=context.get('master'))
return True
except Exception:
exceptions.handle(request)
return False
|
|
#! /usr/bin/env python
"""
A tiny run tracker
"""
from __future__ import print_function, division
from argparse import ArgumentParser
from contextlib import contextmanager
from datetime import datetime, timedelta
import json
import os
import sqlite3
LOCATION = os.path.expanduser('~/.runt')
DATABASE = os.path.join(LOCATION, 'runt.sqlite')
SCHEMA = """CREATE TABLE IF NOT EXISTS runs (
id INTEGER PRIMARY KEY AUTOINCREMENT,
time INTEGER UNIQUE,
distance REAL NOT NULL,
duration INTEGER
);
CREATE TABLE IF NOT EXISTS partners (
run_id INTEGER,
name TEXT,
FOREIGN KEY (run_id) REFERENCES runs(id),
PRIMARY KEY (run_id, name)
);
"""
EPOCH = datetime(1970, 1, 1)
DATE_FORMAT = '%Y/%m/%d'
DATETIME_FORMAT = '%Y/%m/%d at %H:%M'
def add_run(date, distance, duration=None, partners=None):
"""
Add a run.
date: When the run occurred.
distance: The distance (in miles).
duration: (optional, None) A timedelta representing the amount of
time spent running.
partners: (optional, None) A list of people who you ran with.
"""
timestamp = to_timestamp(date)
if duration:
duration = int(duration.total_seconds())
with connect(commit=True) as cursor:
cursor.execute('INSERT INTO runs (time, distance, duration) VALUES '
'(:timestamp, :distance, :duration);',
{'timestamp': timestamp, 'distance': distance,
'duration': duration})
if partners:
cursor.execute('SELECT id FROM runs WHERE time = :timestamp;',
{'timestamp': timestamp})
(run_id,) = cursor.fetchone()
for partner in partners:
cursor.execute('INSERT INTO partners (run_id, name) VALUES '
'(:run_id, :partner);',
{'run_id': run_id, 'partner': partner})
def show(before=None, after=None, longer_than=None, shorter_than=None,
partners=None, excluded=None, solo=False, by_distance=False,
descending=False, total=False):
"""
Show the logged runs.
before: (optional, None) A date the run must be before.
after: (optional, None) A date the run must be after.
longer_than: (optional, None) A distance the run must be longer
than.
shorter_than: (optional, None) A distance the run must be shorter
than.
partners: (optional, None) People who must have been on the run.
excluded: (optional, None) People who can't have been on the run.
solo: (optional, False) A solo run.
by_distance: (optional, False) Sort by distance.
descending: (optional, False) Sort descending.
total: (optional, False) Show aggregated runs
"""
query = ['SELECT id, time, distance, duration FROM runs']
where = 'WHERE'
if before is not None:
query.append('WHERE time <= :before')
where = 'AND'
before = to_timestamp(before)
if after is not None:
query.append('{} time >= :after'.format(where))
where = 'AND'
after = to_timestamp(after)
if longer_than is not None:
query.append('{} distance >= :longer_than'.format(where))
where = 'AND'
if shorter_than is not None:
query.append('{} distance <= :shorter_than'.format(where))
where = 'AND'
partners = set(partners or []) # may be None
excluded = set(excluded or []) # may be None
order = 'distance' if by_distance else 'time'
direction = 'DESC' if descending else 'ASC'
query.append('ORDER BY {} {};'.format(order, direction))
count = total_distance = 0
with connect() as cursor, connect() as partner_cursor:
cursor.execute(' '.join(query),
{'before': before, 'after': after,
'longer_than': longer_than,
'shorter_than': shorter_than})
for run_id, timestamp, distance, duration in cursor:
date = from_timestmap(timestamp)
output = ['{:.2f} miles'.format(distance)]
if duration:
output.append('in {} minutes'.format(int(duration / 60)))
if date.hour == date.minute == date.second == 0:
output.append('on {}'.format(date.strftime(DATE_FORMAT)))
else:
output.append('on {}'.format(date.strftime(DATETIME_FORMAT)))
partner_cursor.execute('SELECT name FROM partners WHERE '
'run_id = :run_id;', {'run_id': run_id})
actual_partners = {name for (name,) in partner_cursor}
if not actual_partners.issuperset(partners):
continue
if actual_partners.intersection(excluded):
continue
if solo and actual_partners:
continue
if actual_partners:
output.append('with')
output.append(' and '.join(actual_partners))
if total:
count += 1
total_distance += distance
else:
print(' '.join(output))
if total:
print('{} miles over {} runs'.format(total_distance, count))
def export(filename):
"""
Export the database to json.
filename: The file to export to.
"""
data = {'runs': []}
# Two cursors are used so that queries for patners can be made while
# iterating over the cursor for runs.
with connect() as cursor, connect() as partner_cursor:
cursor.execute('SELECT id, time, distance, duration FROM runs '
'ORDER BY time ASC;')
for run_id, timestamp, distance, duration in cursor:
date = from_timestmap(timestamp)
run = {
"date": {
"year": date.year,
"month": date.month,
"day": date.day,
},
"distance": distance
}
# Ignore runs at 0:00, they are just runs for some time
# during that day
if date.hour or date.minute:
run["date"]["hour"] = date.hour
run["date"]["minute"] = date.minute
if duration:
run["duration"] = duration
partner_cursor.execute('SELECT name FROM partners WHERE '
'run_id = :run_id;', {'run_id': run_id})
partners = partner_cursor.fetchall()
if partners:
run["partners"] = [partner for (partner,) in partners]
data['runs'].append(run)
with open(filename, 'w') as fileobj:
json.dump(data, fileobj)
def to_timestamp(date):
"""
Convert a datetime object into a timestamp.
"""
return int((date - EPOCH).total_seconds())
def from_timestmap(timestamp):
"""
Convert a timestamp into a datetime object.
"""
return EPOCH + timedelta(seconds=timestamp)
@contextmanager
def connect(commit=False):
"""
Get a cursor to the database.
commit: (optional, False) Whether to commit changes on successful
completion.
"""
connection = sqlite3.connect(DATABASE)
try:
yield connection.cursor()
if commit:
connection.commit()
finally:
connection.close()
def initialize():
"""
Initialize the run database.
"""
if not os.path.isdir(LOCATION):
os.mkdir(LOCATION)
with connect(commit=True) as cursor:
cursor.connection.executescript(SCHEMA)
def datestring(raw):
"""
Validate a date string. Should be of the format %Y/%m/%d.
"""
sections = raw.split('/')
if len(sections) != 3:
raise ValueError(raw)
return [int(value) for value in sections]
def timestring(raw):
"""
validate a time string. Should be of the format %H:%M.
"""
sections = raw.split(':')
if len(sections) != 2:
raise ValueError(raw)
return [int(value) for value in sections]
def str_to_date(raw):
"""
Convert a date string into a datetime.
"""
return datetime(*(int(value) for value in raw.split('/')))
def main():
"""
Run runt from the command line
"""
initialize()
parser = ArgumentParser(description="a tiny run tracker")
commands = parser.add_subparsers(help="Sub-command help")
add_parser = commands.add_parser('add', help="Add a new run")
add_parser.set_defaults(command='add')
add_parser.add_argument('distance', type=float, help="Distance in miles")
add_parser.add_argument('--date', '-d', type=datestring, default=None,
help="The date of the run (YYYY/MM/DD)")
add_parser.add_argument('--time', '-t', type=timestring, default=None,
help="The time of the run (HH:MM)")
add_parser.add_argument('--length', '-l', type=int, default=None,
help="Duration of the run (in minutes)")
add_parser.add_argument('--partner', '-p', dest='partners',
action='append', help="Who you ran with")
show_parser = commands.add_parser('show', help="Show run data")
show_parser.set_defaults(command='show')
show_parser.add_argument('--after', '-a', type=str_to_date,
help="the earliest allowed date")
show_parser.add_argument('--before', '-b', type=str_to_date,
help="the latest allowed date")
show_parser.add_argument('--longer-than', '-l', dest='longer_than',
type=float, help="only show above this distance")
show_parser.add_argument('--shorter-than', '-s', dest='shorter_than',
type=float, help="only show below this distance")
show_parser.add_argument('--with', '-w', action='append', dest='partners',
help="people who must have been on the run")
show_parser.add_argument(
'--excluding', '-e', action='append', dest='excluded',
help="people who can't have been on the run")
show_parser.add_argument('--solo', action='store_true', help="A solo run")
show_parser.add_argument('--by-distance', '-d', dest='by_distance',
action='store_true', help="Sort by duration")
show_parser.add_argument('--desc', dest='descending', action='store_true',
help="Sort in a descending order")
show_parser.add_argument('--total', '-t', action='store_true',
help='Show the number of runs')
export_parser = commands.add_parser('export', help="Export to json")
export_parser.set_defaults(command='export')
export_parser.add_argument('filename', help="The file to save to")
args = parser.parse_args()
if args.command == 'add':
today = datetime.now()
year, month, day = args.date or (today.year, today.month, today.day)
hour, minute = args.time or (0, 0)
date = datetime(year, month, day, hour, minute)
duration = timedelta(minutes=args.length) if args.length else None
add_run(date, args.distance, duration=duration, partners=args.partners)
elif args.command == 'show':
show(before=args.before, after=args.after,
longer_than=args.longer_than, shorter_than=args.shorter_than,
partners=args.partners, excluded=args.excluded, solo=args.solo,
by_distance=args.by_distance, descending=args.descending,
total=args.total)
elif args.command == 'export':
export(args.filename)
if __name__ == '__main__':
main()
|
|
'''
Created on March 3, 2017
This file is subject to the terms and conditions defined in the
file 'LICENSE.txt', which is part of this source code package.
@author: David Moss
'''
from devices.siren.siren import SirenDevice
# For the LinkHigh siren, send all 3 parameters simultaneously:
# ppc.alarmWarn = sound id to play
# ppc.alarmDuration = 1 is play once, 2+ is play for that many seconds
# ppc.alarmStrobe = 0 or 1 to turn the strobe light off or on.
class LinkhighSirenDevice(SirenDevice):
"""Siren"""
# List of Device Types this class is compatible with
DEVICE_TYPES = [9009]
# Sound library
SOUNDS = {
"silence": 0,
"alarm": 1,
"dog": 2,
"warning": 3,
"bling": 4,
"bird": 5,
"droid": 6,
"lock": 7,
"phaser": 8,
"doorbell": 9,
"guncock": 10,
"gunshot": 11,
"switch": 12,
"trumpet": 13,
"whistle": 14
}
def did_tamper(self, botengine):
"""
Did someone tamper with this device
:param botengine:
:return:
"""
return False
#===========================================================================
# Capabilities
#===========================================================================
def has_dogbark(self, botengine):
"""
Determine if this siren supports a dog bark sound
:param botengine:
:return: True if this siren supports a dog bark sound
"""
return True
def has_doorbell(self, botengine):
"""
Determine if this siren supports a doorbell sound
:param botengine:
:return:
"""
return True
#===========================================================================
# Commands
#===========================================================================
def play_sound(self, botengine, sound_id, strobe, duration_sec, microservice_identifier=""):
"""
Squawk the given sound ID
:param botengine: BotEngine
:param sound_id: Sound ID to play
:param strobe: True to activate the strobe light
:param duration_sec: 1 = play once; 2+ = play this many seconds.
"""
if self.locked_microservice is not None:
if self.locked_microservice != microservice_identifier:
botengine.get_logger().info("Siren: Currently locked by {}, cannot play sound from microservice {}".format(self.locked_microservice, microservice_identifier))
return
param_sound = {
"name": "ppc.alarmWarn",
"value": int(sound_id)
}
param_strobe = {
"name": "ppc.alarmStrobe",
"value": int(strobe)
}
param_duration = {
"name": "ppc.alarmDuration",
"value": int(duration_sec)
}
botengine.send_commands(self.device_id, [param_sound, param_strobe, param_duration], command_timeout_ms=5000)
def force_silence(self, botengine):
"""
Force silence, even if this is locked by some other service.
:param botengine:
:return:
"""
self.play_sound(botengine, self.SOUNDS['silence'], False, 0, microservice_identifier=self.locked_microservice)
def silence(self, botengine, microservice_identifier=""):
"""
Silence
:param botengine:
:return:
"""
self.play_sound(botengine, self.SOUNDS['silence'], False, 0, microservice_identifier)
def squawk(self, botengine, warning=False, microservice_identifier=""):
"""
Squawk
:param warning: True for a little warning squawk, False for a more alarming squawk
"""
if self.locked_microservice is not None:
if self.locked_microservice != microservice_identifier:
botengine.get_logger().info("Siren: Currently locked by {}, cannot play sound from microservice {}".format(self.locked_microservice, microservice_identifier))
return
style = self.SOUNDS['warning']
self.play_sound(botengine, style, False, 1, microservice_identifier=microservice_identifier)
def alarm(self, botengine, on, microservice_identifier=""):
"""
Sound the alarm
:param on: True for on, False for off
"""
if self.locked_microservice is not None:
if self.locked_microservice != microservice_identifier:
botengine.get_logger().info("Siren: Currently locked by {}, cannot play sound from microservice {}".format(self.locked_microservice, microservice_identifier))
return
if on:
self.play_sound(botengine, self.SOUNDS['alarm'], True, 900, microservice_identifier=microservice_identifier)
else:
self.play_sound(botengine, self.SOUNDS['silence'], False, 0, microservice_identifier)
def disarmed(self, botengine, microservice_identifier=""):
"""
Make a sound that the home is disarmed
:param botengine:
:return:
"""
self.play_sound(botengine, LinkhighSirenDevice.SOUNDS['trumpet'], False, 1, microservice_identifier)
def short_warning(self, botengine, microservice_identifier=""):
"""
Make a sound that the home is disarmed
:param botengine:
:return:
"""
self.play_sound(botengine, LinkhighSirenDevice.SOUNDS['bling'], True, 1, microservice_identifier)
def about_to_arm(self, botengine, seconds_left, microservice_identifier=""):
"""
Make a unique aggressive warning noise the amount of time remaining
:param botengine:
:param seconds_left: Seconds left before arming
:return:
"""
self.play_sound(botengine, LinkhighSirenDevice.SOUNDS['warning'], True, seconds_left, microservice_identifier)
def armed(self, botengine, microservice_identifier=""):
"""
Make a sound that the home is disarmed
:param botengine:
:return:
"""
self.play_sound(botengine, LinkhighSirenDevice.SOUNDS['lock'], False, 1, microservice_identifier)
def bark(self, botengine, duration_sec, microservice_identifier=""):
"""
Dog bark
:param botengine:
:param duration_sec
:return:
"""
self.play_sound(botengine, LinkhighSirenDevice.SOUNDS['dog'], True, duration_sec, microservice_identifier)
def doorbell(self, botengine, microservice_identifier=""):
"""
Make a doorbell sound.
:param botengine:
:return:
"""
if self.locked_microservice is None:
self.play_sound(botengine, self.SOUNDS['doorbell'], True, 1, microservice_identifier)
def door_opened(self, botengine, microservice_identifier=""):
"""
Door opened chime
:param botengine:
:return:
"""
self.play_sound(botengine, LinkhighSirenDevice.SOUNDS['bird'], False, 1, microservice_identifier)
|
|
import bson
from copy_state_db import CopyStateDB
import gevent
from faster_ordered_dict import FasterOrderedDict
from gevent.pool import Pool
import pymongo
from pymongo.errors import DuplicateKeyError
from pymongo.read_preferences import ReadPreference
from pymongo.cursor import CursorType
import time
import utils
from utils import auto_retry, log_exceptions, squelch_keyboard_interrupt
log = utils.get_logger(__name__)
TS_REWIND = 30 # seconds
HEADER_INTERVAL = 15 # entries
#
# Apply oplogs
#
class ApplyStats(object):
def __init__(self):
self.ops_retrieved = 0
self.inserts = 0
self.insert_warnings = 0
self.deletes = 0
self.delete_warnings = 0
self.updates = 0
self.update_warnings = 0
self.last_ts = bson.Timestamp(int(time.time()), 0)
self.sleeps = 0
self.exceptions = 0
self.retries = 0
self.paused = False
self.pending_ids = set()
def log(self):
# we record warnings but don't print them, because they haven't been that useful
#
# that said, we track them just in case
lag = int(time.time() - self.last_ts.time)
log.info(FMT, self.ops_retrieved, lag,
self.inserts, self.deletes, self.updates,
self.sleeps, self.exceptions, self.retries)
SH1 = "OPS APPLIED | WARNINGS"
SH2 = "total lag inserts removes updates | sleeps exceptions retries"
FMT = "%-9d %-6d %-9d %-9d %-9d | %-9d %-10d %d"
def _op_id(op):
if op['op'] == 'u':
return op['o2']['_id']
else:
return op['o']['_id']
def print_header_worker(sleep_interval):
while True:
log.info(SH1)
log.info(SH2)
time.sleep(sleep_interval)
@log_exceptions
def oplog_stats_worker(stats):
"""
Greenlet for printing state for oplog applies
"""
while True:
if not stats.paused:
stats.log()
gevent.sleep(3)
def oplog_checkpoint_worker(stats, source, dest, state_db):
"""
Greenlet for persisting oplog position to disk. This only has to do work periodically,
because it's ok if the recorded position is behind the position of the last applied
op. Oplog entries are idempotent, so we don't worry about applying an op twice.
"""
while True:
state_db.update_oplog_ts(source, dest, stats.last_ts)
gevent.sleep(3)
@auto_retry
def _apply_op(op, source_collection, dest_collection, stats):
"""
Actually applies an op. Assumes that we are the only one mutating a document with
the _id referenced by the op.
"""
_id = _op_id(op)
if op['op'] == 'i':
# insert
try:
inserted_id = dest_collection.insert(op['o'])
if inserted_id:
if inserted_id != _id:
raise SystemError("inserted _id doesn't match given _id")
stats.inserts += 1
else:
stats.insert_warnings += 1
except DuplicateKeyError:
stats.insert_warnings += 1
elif op['op'] == 'd':
# delete
result = dest_collection.remove({'_id': _id})
if result:
if result['n'] == 1:
# success
stats.deletes += 1
else:
# we're deleting by _id, so we should have deleted exactly one document;
# anything else is a warning
#log.debug("warn delete _id = %s; result = %r", base64.b64encode(_id), result)
stats.delete_warnings += 1
if result.get('err', None):
log.error("error while deleting: %r" % op['err'])
elif op['op'] == 'u':
# update. which involves re-reading the document from the source and updating the
# destination with the updated contents
doc = source_collection.find_one({'_id': _id})
if not doc:
# document not found (might have been deleted in a subsequent oplog entry)
stats.update_warnings += 1
return
stats.updates += 1
dest_collection.save(doc)
else:
raise TypeError("unknown op type %s" % op['op'])
def _apply_op_worker(op, source_collection, dest_collection, stats):
"""
Applies an op. Meant to be run as part of a greenlet.
@param op op we're applying
@param source_collection collection we're reading from
@param dest_collection collection we're writing to
@param stats an ApplyStats object
"""
_id = _op_id(op)
# apply the op, ensuring that all ops on this _id execute serially
try:
_apply_op(op, source_collection, dest_collection, stats)
finally:
stats.pending_ids.remove(_id)
@log_exceptions
@squelch_keyboard_interrupt
def apply_oplog(source, dest, percent, state_path):
"""
Applies oplog entries from source to destination. Since the oplog storage format
has known and possibly unknown idiosyncracies, we take a conservative approach. For
each insert or delete op, we can easily replay those. For updates, we do the following:
1. Note the _id of the updated document
2. Retrieved the updated document from the source
3. Upsert the updated document in the destination
@param oplog oplog collection from the source mongod instance
@param start_ts timestamp at which we should start replaying oplog entries
@param source_collection collection we're reading from
@param dest_collection collection we're writing to
@param checkpoint_ts_func function that, when called, persists oplog timestamp to disk
@param
"""
gevent.monkey.patch_socket()
stats = ApplyStats()
apply_workers = Pool(20)
# connect to state db
state_db = CopyStateDB(state_path)
# connect to mongo
source_client = utils.mongo_connect(source,
ensure_direct=True,
max_pool_size=30,
read_preference=ReadPreference.SECONDARY,
document_class=FasterOrderedDict)
source_collection = source_client[source['db']][source['collection']]
dest_client = utils.mongo_connect(dest,
max_pool_size=30,
document_class=FasterOrderedDict)
dest_collection = dest_client[dest['db']][dest['collection']]
oplog = source_client['local']['oplog.rs']
# print stats periodically
stats.paused = True
stats_greenlet = gevent.spawn(oplog_stats_worker, stats)
# checkpoint oplog position to disk periodically
checkpoint_greenlet = gevent.spawn(oplog_checkpoint_worker, stats, source, dest, state_db)
# figure out where we need to start reading oplog entries; rewind our oplog timestamp
# a bit, to avoid issues with the user pressing Control-C while some ops are pending
#
# this works, because oplog entries are idempotent
start_ts_orig = state_db.get_oplog_ts(source, dest)
start_ts = bson.Timestamp(time=start_ts_orig.time-TS_REWIND, inc=0)
log.info("starting apply at %s", start_ts)
# perform tailing oplog query using the oplog_replay option to efficiently find
# our starting position in the oplog
query = {}
query['ts'] = {'$gte': start_ts}
query['ns'] = source_collection.full_name
cursor = oplog.find(
query,
cursor_type=CursorType.TAILABLE_AWAIT,
)
cursor.add_option(pymongo.cursor._QUERY_OPTIONS['oplog_replay'])
while True:
for op in cursor:
stats.paused = False
_id = _op_id(op)
if percent and not utils.id_in_subset(_id, percent):
continue
stats.ops_retrieved += 1
# block *all* further ops from being applied if there's a pending
# op on the current _id, to ensure serialization
while _id in stats.pending_ids:
gevent.sleep(0.1)
stats.sleeps += 1
# do the real oplog work in a greenlet from the pool
stats.pending_ids.add(_id)
apply_workers.spawn(_apply_op_worker,
op,
source_collection,
dest_collection,
stats)
# update our last timestamp; this is *not* guaranteed to be the timestamp of the
# most recent op, which is impossible because of our out-of-order execution
#
# this is an approximation that needs to be accurate to within TS_REWIND seconds
stats.last_ts = op['ts']
# while we have a tailable cursor, it can stop iteration if no more results come back
# in a reasonable time, so sleep for a bit then try to continue iteration
if cursor.alive:
log.debug("replayed all oplog entries; sleeping...")
stats.paused = True
gevent.sleep(2)
stats.paused = False
else:
log.error("cursor died on us!")
break
# just to silence pyflakes...
stats_greenlet.kill()
checkpoint_greenlet.kill()
|
|
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from argparse import ArgumentParser
import tempfile
import pickle
from time import time
from bigdl.orca import init_orca_context, stop_orca_context, OrcaContext
from bigdl.orca.data.file import makedirs, write_text, exists
from bigdl.dllib.utils.file_utils import put_local_file_to_remote
from bigdl.friesian.feature import FeatureTable
bool_cols = [
'engaged_with_user_is_verified',
'enaging_user_is_verified'
]
count_cols = [
'engaged_with_user_follower_count',
'engaged_with_user_following_count',
'enaging_user_follower_count',
'enaging_user_following_count'
]
timestamp_cols = [
'reply_timestamp',
'retweet_timestamp',
'retweet_with_comment_timestamp',
'like_timestamp'
]
cat_cols = [
'engaged_with_user_id',
'enaging_user_id',
'present_media',
'tweet_type',
'language'
]
list_cols = [
"hashtags",
"present_domains",
"present_links"
]
len_cols = ['len_hashtags',
'len_domains',
'len_links']
media_map = {
'': 0,
'GIF': 1,
'GIF_GIF': 2,
'GIF_Photo': 3,
'GIF_Video': 4,
'Photo': 5,
'Photo_GIF': 6,
'Photo_Photo': 7,
'Photo_Video': 8,
'Video': 9,
'Video_GIF': 10,
'Video_Photo': 11,
'Video_Video': 12
}
type_map = {
'Quote': 0,
'Retweet': 1,
'TopLevel': 2,
}
cross_cols = [['present_media', 'language']]
conf = {"spark.network.timeout": "10000000",
"spark.sql.broadcastTimeout": "7200",
"spark.sql.shuffle.partitions": "2000",
"spark.locality.wait": "0s",
"spark.sql.crossJoin.enabled": "true",
"spark.task.cpus": "1",
"spark.executor.heartbeatInterval": "200s",
"spark.driver.maxResultSize": "40G",
"spark.eventLog.enabled": "true",
"spark.eventLog.dir": "hdfs://172.16.0.105:8020/sparkHistoryLogs",
"spark.app.name": "recsys-preprocess",
"spark.executor.memoryOverhead": "20g"}
def _parse_args():
parser = ArgumentParser()
parser.add_argument('--cluster_mode', type=str, default="local",
help='The cluster mode, such as local, yarn or standalone.')
parser.add_argument('--master', type=str, default=None,
help='The master url, only used when cluster mode is standalone.')
parser.add_argument('--executor_cores', type=int, default=44,
help='The executor core number.')
parser.add_argument('--executor_memory', type=str, default="130",
help='The executor memory.')
parser.add_argument('--num_executor', type=int, default=8,
help='The number of executor.')
parser.add_argument('--driver_cores', type=int, default=4,
help='The driver core number.')
parser.add_argument('--driver_memory', type=str, default="36g",
help='The driver memory.')
parser.add_argument('--train_files', type=str, default="000-269",
help="range for preprocessing train files, such as 000-269, 000-001.")
parser.add_argument('--input_train_folder', type=str, required=True,
help="Path to the folder of train parquet files.")
parser.add_argument('--input_test_folder', type=str, required=True,
help="Path to the folder of test parquet files.")
parser.add_argument('--output_folder', type=str, default=".",
help="The path to save the preprocessed data to parquet files. ")
parser.add_argument('--cross_sizes', type=str,
help='bucket sizes for cross columns', default="600")
args = parser.parse_args()
start, end = args.train_files.split('-')
args.train_files = list(range(int(start), int(end) + 1))
args.cross_sizes = [int(x) for x in args.cross_sizes.split(',')]
return args
def preprocess(tbl):
tbl = tbl.cast(bool_cols + count_cols + timestamp_cols, "int")\
.fillna(0, timestamp_cols)\
.fillna("", ["present_media", "present_domains", "present_links", "hashtags"])
process_media = lambda x: '_'.join(x.split('\t')[:2])
tbl = tbl.apply("present_media", "present_media", process_media, "string")\
.encode_string("present_media", media_map)\
.encode_string("tweet_type", type_map)
count_func = lambda x: str(x).count('\t') + 1 if x else 0
tbl = tbl.apply("hashtags", "len_hashtags", count_func, "int") \
.apply("present_domains", "len_domains", count_func, "int") \
.apply("present_links", "len_links", count_func, "int")
return tbl
def encode_user_id(tbl):
tbl = tbl.rename({"engaged_with_user_id": "user_id"}) \
.encode_string("user_id", user_index, broadcast=False) \
.rename({"user_id": "engaged_with_user_id"})\
.fillna(0, "engaged_with_user_id")
tbl = tbl.rename({"enaging_user_id": "user_id"}) \
.encode_string("user_id", user_index, broadcast=False) \
.rename({"user_id": "enaging_user_id"})\
.fillna(0, "enaging_user_id")
return tbl
def generate_features(tbl, bins, cross_sizes):
tbl = tbl.cut_bins(columns=count_cols,
bins=bins,
out_cols=count_cols)
tbl = tbl.cross_columns(cross_cols, cross_sizes)
return tbl
def transform_label(tbl):
gen_label = lambda x: 1 if max(x) > 0 else 0
tbl = tbl.apply(in_col=timestamp_cols, out_col="label", func=gen_label, dtype="int")
return tbl
if __name__ == '__main__':
args = _parse_args()
if args.cluster_mode == "local":
init_orca_context("local", cores=args.executor_cores, memory=args.executor_memory)
elif args.cluster_mode == "standalone":
init_orca_context("standalone", master=args.master,
cores=args.executor_cores, num_nodes=args.num_executor,
memory=args.executor_memory,
driver_cores=args.driver_cores,
driver_memory=args.driver_memory, conf=conf)
elif args.cluster_mode == "yarn":
init_orca_context("yarn-client", cores=args.executor_cores,
num_nodes=args.num_executor, memory=args.executor_memory,
driver_cores=args.driver_cores, driver_memory=args.driver_memory,
conf=conf)
else:
raise ValueError(
"cluster_mode should be one of 'local', 'yarn' and 'standalone'"
", but got " + args.cluster_mode)
start = time()
train_paths = [os.path.join(args.input_train_folder, 'part-%05d.parquet' % i)
for i in args.train_files]
train_tbl = FeatureTable.read_parquet(train_paths)
train_tbl.df.printSchema()
test_tbl = FeatureTable.read_parquet(args.input_test_folder)
train_tbl = preprocess(train_tbl)
test_tbl = preprocess(test_tbl)
train_tbl, language_idx = train_tbl.category_encode("language")
test_tbl = test_tbl.encode_string("language", language_idx)
user_index = train_tbl.gen_string_idx({'src_cols': ['engaged_with_user_id', 'enaging_user_id'],
'col_name': 'user_id'})
train_tbl = encode_user_id(train_tbl)
test_tbl = encode_user_id(test_tbl)
test_tbl = test_tbl.fillna(0, ["engaged_with_user_id", "enaging_user_id"])
indexes = train_tbl.gen_string_idx(list_cols, do_split=True, sep='\t')
train_tbl = train_tbl.encode_string(list_cols, indexes,
do_split=True, sep='\t', keep_most_frequent=True)
test_tbl = test_tbl.encode_string(list_cols, indexes,
do_split=True, sep='\t', keep_most_frequent=True)
test_tbl = test_tbl.fillna(0, list_cols)
train_tbl.cache()
test_tbl.cache()
bins = [1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7]
train_tbl = generate_features(train_tbl, bins, args.cross_sizes)
test_tbl = generate_features(test_tbl, bins, args.cross_sizes)
train_tbl, min_max_dict = train_tbl.min_max_scale(len_cols)
test_tbl = test_tbl.transform_min_max_scale(len_cols, min_max_dict)
train_tbl = transform_label(train_tbl)
test_tbl = transform_label(test_tbl)
# save preprocessed data
train_tbl.write_parquet(os.path.join(args.output_folder, "train_parquet"))
test_tbl.write_parquet(os.path.join(args.output_folder, "test_parquet"))
# save meta
cat_sizes_dict = {}
cat_sizes_dict['present_media'] = len(media_map)
cat_sizes_dict['tweet_type'] = len(type_map)
cat_sizes_dict['language'] = language_idx.size()
for i in range(len(list_cols)):
cat_sizes_dict[list_cols[i]] = indexes[i].size()
cat_sizes_dict['engaged_with_user_id'] = user_index.size()
cat_sizes_dict['enaging_user_id'] = user_index.size()
cross_sizes_dict = dict(zip(["_".join(cross_names) for cross_names in cross_cols],
args.cross_sizes))
cat_sizes_dict.update(cross_sizes_dict)
count_sizes_dict = dict(zip(count_cols, [len(bins)] * len(count_cols)))
cat_sizes_dict.update(count_sizes_dict)
print("cat size dict: ", cat_sizes_dict)
if not exists(os.path.join(args.output_folder, "meta")):
makedirs(os.path.join(args.output_folder, "meta"))
with tempfile.TemporaryDirectory() as local_path:
with open(os.path.join(local_path, "categorical_sizes.pkl"), 'wb') as f:
pickle.dump(cat_sizes_dict, f)
put_local_file_to_remote(os.path.join(local_path, "categorical_sizes.pkl"),
os.path.join(args.output_folder, "meta/categorical_sizes.pkl"),
over_write=True)
end = time()
print("Preprocessing and save time: ", end - start)
stop_orca_context()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Contains the core classes and functionality that makes Horizon what it is.
This module is considered internal, and should not be relied on directly.
Public APIs are made available through the :mod:`horizon` module and
the classes contained therein.
"""
import collections
import copy
import inspect
import logging
import os
from django.conf import settings
from django.conf.urls.defaults import patterns, url, include
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse
from django.utils.datastructures import SortedDict
from django.utils.functional import SimpleLazyObject
from django.utils.importlib import import_module
from django.utils.module_loading import module_has_submodule
from django.utils.translation import ugettext as _
from horizon import loaders
from horizon.decorators import (require_auth, require_roles,
require_services, _current_component)
LOG = logging.getLogger(__name__)
# Default configuration dictionary. Do not mutate directly. Use copy.copy().
HORIZON_CONFIG = {
# Allow for ordering dashboards; list or tuple if provided.
'dashboards': None,
# Name of a default dashboard; defaults to first alphabetically if None
'default_dashboard': None,
# Default redirect url for users' home
'user_home': settings.LOGIN_REDIRECT_URL,
'exceptions': {'unauthorized': [],
'not_found': [],
'recoverable': []}
}
def _decorate_urlconf(urlpatterns, decorator, *args, **kwargs):
for pattern in urlpatterns:
if getattr(pattern, 'callback', None):
pattern._callback = decorator(pattern.callback, *args, **kwargs)
if getattr(pattern, 'url_patterns', []):
_decorate_urlconf(pattern.url_patterns, decorator, *args, **kwargs)
class NotRegistered(Exception):
pass
class HorizonComponent(object):
def __init__(self):
super(HorizonComponent, self).__init__()
if not self.slug:
raise ImproperlyConfigured('Every %s must have a slug.'
% self.__class__)
def __unicode__(self):
name = getattr(self, 'name', u"Unnamed %s" % self.__class__.__name__)
return unicode(name)
def _get_default_urlpatterns(self):
package_string = '.'.join(self.__module__.split('.')[:-1])
if getattr(self, 'urls', None):
try:
mod = import_module('.%s' % self.urls, package_string)
except ImportError:
mod = import_module(self.urls)
urlpatterns = mod.urlpatterns
else:
# Try importing a urls.py from the dashboard package
if module_has_submodule(import_module(package_string), 'urls'):
urls_mod = import_module('.urls', package_string)
urlpatterns = urls_mod.urlpatterns
else:
urlpatterns = patterns('')
return urlpatterns
class Registry(object):
def __init__(self):
self._registry = {}
if not getattr(self, '_registerable_class', None):
raise ImproperlyConfigured('Subclasses of Registry must set a '
'"_registerable_class" property.')
def _register(self, cls):
"""Registers the given class.
If the specified class is already registered then it is ignored.
"""
if not inspect.isclass(cls):
raise ValueError('Only classes may be registered.')
elif not issubclass(cls, self._registerable_class):
raise ValueError('Only %s classes or subclasses may be registered.'
% self._registerable_class.__name__)
if cls not in self._registry:
cls._registered_with = self
self._registry[cls] = cls()
return self._registry[cls]
def _unregister(self, cls):
"""Unregisters the given class.
If the specified class isn't registered, ``NotRegistered`` will
be raised.
"""
if not issubclass(cls, self._registerable_class):
raise ValueError('Only %s classes or subclasses may be '
'unregistered.' % self._registerable_class)
if cls not in self._registry.keys():
raise NotRegistered('%s is not registered' % cls)
del self._registry[cls]
return True
def _registered(self, cls):
if inspect.isclass(cls) and issubclass(cls, self._registerable_class):
found = self._registry.get(cls, None)
if found:
return found
else:
# Allow for fetching by slugs as well.
for registered in self._registry.values():
if registered.slug == cls:
return registered
class_name = self._registerable_class.__name__
if hasattr(self, "_registered_with"):
parent = self._registered_with._registerable_class.__name__
raise NotRegistered('%(type)s with slug "%(slug)s" is not '
'registered with %(parent)s "%(name)s".'
% {"type": class_name,
"slug": cls,
"parent": parent,
"name": self.slug})
else:
slug = getattr(cls, "slug", cls)
raise NotRegistered('%(type)s with slug "%(slug)s" is not '
'registered.' % {"type": class_name,
"slug": slug})
class Panel(HorizonComponent):
""" A base class for defining Horizon dashboard panels.
All Horizon dashboard panels should extend from this class. It provides
the appropriate hooks for automatically constructing URLconfs, and
providing role-based access control.
.. attribute:: name
The name of the panel. This will be displayed in the
auto-generated navigation and various other places.
Default: ``''``.
.. attribute:: slug
A unique "short name" for the panel. The slug is used as
a component of the URL path for the panel. Default: ``''``.
.. attribute:: roles
A list of role names, all of which a user must possess in order
to access any view associated with this panel. This attribute
is combined cumulatively with any roles required on the
``Dashboard`` class with which it is registered.
.. attribute:: services
A list of service names, all of which must be in the service catalog
in order for this panel to be available.
.. attribute:: urls
Path to a URLconf of views for this panel using dotted Python
notation. If no value is specified, a file called ``urls.py``
living in the same package as the ``panel.py`` file is used.
Default: ``None``.
.. attribute:: nav
.. method:: nav(context)
The ``nav`` attribute can be either boolean value or a callable
which accepts a ``RequestContext`` object as a single argument
to control whether or not this panel should appear in
automatically-generated navigation. Default: ``True``.
.. attribute:: index_url_name
The ``name`` argument for the URL pattern which corresponds to
the index view for this ``Panel``. This is the view that
:meth:`.Panel.get_absolute_url` will attempt to reverse.
"""
name = ''
slug = ''
urls = None
nav = True
index_url_name = "index"
def __repr__(self):
return "<Panel: %s>" % self.slug
def get_absolute_url(self):
""" Returns the default URL for this panel.
The default URL is defined as the URL pattern with ``name="index"`` in
the URLconf for this panel.
"""
try:
return reverse('horizon:%s:%s:%s' % (self._registered_with.slug,
self.slug,
self.index_url_name))
except Exception as exc:
# Logging here since this will often be called in a template
# where the exception would be hidden.
LOG.info("Error reversing absolute URL for %s: %s" % (self, exc))
raise
@property
def _decorated_urls(self):
urlpatterns = self._get_default_urlpatterns()
# Apply access controls to all views in the patterns
roles = getattr(self, 'roles', [])
services = getattr(self, 'services', [])
_decorate_urlconf(urlpatterns, require_roles, roles)
_decorate_urlconf(urlpatterns, require_services, services)
_decorate_urlconf(urlpatterns, _current_component, panel=self)
# Return the three arguments to django.conf.urls.defaults.include
return urlpatterns, self.slug, self.slug
class PanelGroup(object):
""" A container for a set of :class:`~horizon.Panel` classes.
When iterated, it will yield each of the ``Panel`` instances it
contains.
.. attribute:: slug
A unique string to identify this panel group. Required.
.. attribute:: name
A user-friendly name which will be used as the group heading in
places such as the navigation. Default: ``None``.
.. attribute:: panels
A list of panel module names which should be contained within this
grouping.
"""
def __init__(self, dashboard, slug=None, name=None, panels=None):
self.dashboard = dashboard
self.slug = slug or getattr(self, "slug", "default")
self.name = name or getattr(self, "name", None)
# Our panels must be mutable so it can be extended by others.
self.panels = list(panels or getattr(self, "panels", []))
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.slug)
def __unicode__(self):
return self.name
def __iter__(self):
panel_instances = []
for name in self.panels:
try:
panel_instances.append(self.dashboard.get_panel(name))
except NotRegistered, e:
LOG.debug(e)
return iter(panel_instances)
class Dashboard(Registry, HorizonComponent):
""" A base class for defining Horizon dashboards.
All Horizon dashboards should extend from this base class. It provides the
appropriate hooks for automatic discovery of :class:`~horizon.Panel`
modules, automatically constructing URLconfs, and providing role-based
access control.
.. attribute:: name
The name of the dashboard. This will be displayed in the
auto-generated navigation and various other places.
Default: ``''``.
.. attribute:: slug
A unique "short name" for the dashboard. The slug is used as
a component of the URL path for the dashboard. Default: ``''``.
.. attribute:: panels
The ``panels`` attribute can be either a flat list containing the name
of each panel **module** which should be loaded as part of this
dashboard, or a list of :class:`~horizon.PanelGroup` classes which
define groups of panels as in the following example::
class SystemPanels(horizon.PanelGroup):
slug = "syspanel"
name = _("System Panel")
panels = ('overview', 'instances', ...)
class Syspanel(horizon.Dashboard):
panels = (SystemPanels,)
Automatically generated navigation will use the order of the
modules in this attribute.
Default: ``[]``.
.. warning::
The values for this attribute should not correspond to the
:attr:`~.Panel.name` attributes of the ``Panel`` classes.
They should be the names of the Python modules in which the
``panel.py`` files live. This is used for the automatic
loading and registration of ``Panel`` classes much like
Django's ``ModelAdmin`` machinery.
Panel modules must be listed in ``panels`` in order to be
discovered by the automatic registration mechanism.
.. attribute:: default_panel
The name of the panel which should be treated as the default
panel for the dashboard, i.e. when you visit the root URL
for this dashboard, that's the panel that is displayed.
Default: ``None``.
.. attribute:: roles
A list of role names, all of which a user must possess in order
to access any panel registered with this dashboard. This attribute
is combined cumulatively with any roles required on individual
:class:`~horizon.Panel` classes.
.. attribute:: services
A list of service names, all of which must be in the service catalog
in order for this dashboard to be available.
.. attribute:: urls
Optional path to a URLconf of additional views for this dashboard
which are not connected to specific panels. Default: ``None``.
.. attribute:: nav
Optional boolean to control whether or not this dashboard should
appear in automatically-generated navigation. Default: ``True``.
.. attribute:: supports_tenants
Optional boolean that indicates whether or not this dashboard includes
support for projects/tenants. If set to ``True`` this dashboard's
navigation will include a UI element that allows the user to select
project/tenant. Default: ``False``.
.. attribute:: public
Boolean value to determine whether this dashboard can be viewed
without being logged in. Defaults to ``False``.
"""
_registerable_class = Panel
name = ''
slug = ''
urls = None
panels = []
default_panel = None
nav = True
supports_tenants = False
public = False
def __repr__(self):
return "<Dashboard: %s>" % self.slug
def __init__(self, *args, **kwargs):
super(Dashboard, self).__init__(*args, **kwargs)
self._panel_groups = None
def get_panel(self, panel):
"""
Returns the specified :class:`~horizon.Panel` instance registered
with this dashboard.
"""
return self._registered(panel)
def get_panels(self):
"""
Returns the :class:`~horizon.Panel` instances registered with this
dashboard in order, without any panel groupings.
"""
all_panels = []
panel_groups = self.get_panel_groups()
for panel_group in panel_groups.values():
all_panels.extend(panel_group)
return all_panels
def get_panel_group(self, slug):
return self._panel_groups[slug]
def get_panel_groups(self):
registered = copy.copy(self._registry)
panel_groups = []
# Gather our known panels
for panel_group in self._panel_groups.values():
for panel in panel_group:
registered.pop(panel.__class__)
panel_groups.append((panel_group.slug, panel_group))
# Deal with leftovers (such as add-on registrations)
if len(registered):
slugs = [panel.slug for panel in registered.values()]
new_group = PanelGroup(self,
slug="other",
name=_("Other"),
panels=slugs)
panel_groups.append((new_group.slug, new_group))
return SortedDict(panel_groups)
def get_absolute_url(self):
""" Returns the default URL for this dashboard.
The default URL is defined as the URL pattern with ``name="index"``
in the URLconf for the :class:`~horizon.Panel` specified by
:attr:`~horizon.Dashboard.default_panel`.
"""
try:
return self._registered(self.default_panel).get_absolute_url()
except:
# Logging here since this will often be called in a template
# where the exception would be hidden.
LOG.exception("Error reversing absolute URL for %s." % self)
raise
@property
def _decorated_urls(self):
urlpatterns = self._get_default_urlpatterns()
default_panel = None
# Add in each panel's views except for the default view.
for panel in self._registry.values():
if panel.slug == self.default_panel:
default_panel = panel
continue
urlpatterns += patterns('',
url(r'^%s/' % panel.slug, include(panel._decorated_urls)))
# Now the default view, which should come last
if not default_panel:
raise NotRegistered('The default panel "%s" is not registered.'
% self.default_panel)
urlpatterns += patterns('',
url(r'', include(default_panel._decorated_urls)))
# Require login if not public.
if not self.public:
_decorate_urlconf(urlpatterns, require_auth)
# Apply access controls to all views in the patterns
roles = getattr(self, 'roles', [])
services = getattr(self, 'services', [])
_decorate_urlconf(urlpatterns, require_roles, roles)
_decorate_urlconf(urlpatterns, require_services, services)
_decorate_urlconf(urlpatterns, _current_component, dashboard=self)
# Return the three arguments to django.conf.urls.defaults.include
return urlpatterns, self.slug, self.slug
def _autodiscover(self):
""" Discovers panels to register from the current dashboard module. """
if getattr(self, "_autodiscover_complete", False):
return
panels_to_discover = []
panel_groups = []
# If we have a flat iterable of panel names, wrap it again so
# we have a consistent structure for the next step.
if all([isinstance(i, basestring) for i in self.panels]):
self.panels = [self.panels]
# Now iterate our panel sets.
for panel_set in self.panels:
# Instantiate PanelGroup classes.
if not isinstance(panel_set, collections.Iterable) and \
issubclass(panel_set, PanelGroup):
panel_group = panel_set(self)
# Check for nested tuples, and convert them to PanelGroups
elif not isinstance(panel_set, PanelGroup):
panel_group = PanelGroup(self, panels=panel_set)
# Put our results into their appropriate places
panels_to_discover.extend(panel_group.panels)
panel_groups.append((panel_group.slug, panel_group))
self._panel_groups = SortedDict(panel_groups)
# Do the actual discovery
package = '.'.join(self.__module__.split('.')[:-1])
mod = import_module(package)
for panel in panels_to_discover:
try:
before_import_registry = copy.copy(self._registry)
import_module('.%s.panel' % panel, package)
except:
self._registry = before_import_registry
if module_has_submodule(mod, panel):
raise
self._autodiscover_complete = True
@classmethod
def register(cls, panel):
""" Registers a :class:`~horizon.Panel` with this dashboard. """
panel_class = Horizon.register_panel(cls, panel)
# Support template loading from panel template directories.
panel_mod = import_module(panel.__module__)
panel_dir = os.path.dirname(panel_mod.__file__)
template_dir = os.path.join(panel_dir, "templates")
if os.path.exists(template_dir):
key = os.path.join(cls.slug, panel.slug)
loaders.panel_template_dirs[key] = template_dir
return panel_class
@classmethod
def unregister(cls, panel):
""" Unregisters a :class:`~horizon.Panel` from this dashboard. """
success = Horizon.unregister_panel(cls, panel)
if success:
# Remove the panel's template directory.
key = os.path.join(cls.slug, panel.slug)
if key in loaders.panel_template_dirs:
del loaders.panel_template_dirs[key]
return success
class Workflow(object):
def __init__(*args, **kwargs):
raise NotImplementedError()
try:
from django.utils.functional import empty
except ImportError:
#Django 1.3 fallback
empty = None
class LazyURLPattern(SimpleLazyObject):
def __iter__(self):
if self._wrapped is empty:
self._setup()
return iter(self._wrapped)
def __reversed__(self):
if self._wrapped is empty:
self._setup()
return reversed(self._wrapped)
class Site(Registry, HorizonComponent):
""" The overarching class which encompasses all dashboards and panels. """
# Required for registry
_registerable_class = Dashboard
name = "Horizon"
namespace = 'horizon'
slug = 'horizon'
urls = 'horizon.site_urls'
def __repr__(self):
return u"<Site: %s>" % self.slug
@property
def _conf(self):
conf = copy.copy(HORIZON_CONFIG)
conf.update(getattr(settings, 'HORIZON_CONFIG', {}))
return conf
@property
def dashboards(self):
return self._conf['dashboards']
@property
def default_dashboard(self):
return self._conf['default_dashboard']
def register(self, dashboard):
""" Registers a :class:`~horizon.Dashboard` with Horizon."""
return self._register(dashboard)
def unregister(self, dashboard):
""" Unregisters a :class:`~horizon.Dashboard` from Horizon. """
return self._unregister(dashboard)
def registered(self, dashboard):
return self._registered(dashboard)
def register_panel(self, dashboard, panel):
dash_instance = self.registered(dashboard)
return dash_instance._register(panel)
def unregister_panel(self, dashboard, panel):
dash_instance = self.registered(dashboard)
if not dash_instance:
raise NotRegistered("The dashboard %s is not registered."
% dashboard)
return dash_instance._unregister(panel)
def get_dashboard(self, dashboard):
""" Returns the specified :class:`~horizon.Dashboard` instance. """
return self._registered(dashboard)
def get_dashboards(self):
""" Returns an ordered tuple of :class:`~horizon.Dashboard` modules.
Orders dashboards according to the ``"dashboards"`` key in
``settings.HORIZON_CONFIG`` or else returns all registered dashboards
in alphabetical order.
Any remaining :class:`~horizon.Dashboard` classes registered with
Horizon but not listed in ``settings.HORIZON_CONFIG['dashboards']``
will be appended to the end of the list alphabetically.
"""
if self.dashboards:
registered = copy.copy(self._registry)
dashboards = []
for item in self.dashboards:
dashboard = self._registered(item)
dashboards.append(dashboard)
registered.pop(dashboard.__class__)
if len(registered):
extra = registered.values()
extra.sort()
dashboards.extend(extra)
return dashboards
else:
dashboards = self._registry.values()
dashboards.sort()
return dashboards
def get_default_dashboard(self):
""" Returns the default :class:`~horizon.Dashboard` instance.
If ``"default_dashboard"`` is specified in ``settings.HORIZON_CONFIG``
then that dashboard will be returned. If not, the first dashboard
returned by :func:`~horizon.get_dashboards` will be returned.
"""
if self.default_dashboard:
return self._registered(self.default_dashboard)
elif len(self._registry):
return self.get_dashboards()[0]
else:
raise NotRegistered("No dashboard modules have been registered.")
def get_user_home(self, user):
""" Returns the default URL for a particular user.
This method can be used to customize where a user is sent when
they log in, etc. By default it returns the value of
:meth:`get_absolute_url`.
An alternative function can be supplied to customize this behavior
by specifying a either a URL or a function which returns a URL via
the ``"user_home"`` key in ``settings.HORIZON_CONFIG``. Each of these
would be valid::
{"user_home": "/home",} # A URL
{"user_home": "my_module.get_user_home",} # Path to a function
{"user_home": lambda user: "/" + user.name,} # A function
{"user_home": None,} # Will always return the default dashboard
This can be useful if the default dashboard may not be accessible
to all users. When user_home is missing from HORIZON_CONFIG,
it will default to the settings.LOGIN_REDIRECT_URL value.
"""
user_home = self._conf['user_home']
if user_home:
if callable(user_home):
return user_home(user)
elif isinstance(user_home, basestring):
# Assume we've got a URL if there's a slash in it
if user_home.find("/") != -1:
return user_home
else:
mod, func = user_home.rsplit(".", 1)
return getattr(import_module(mod), func)(user)
# If it's not callable and not a string, it's wrong.
raise ValueError('The user_home setting must be either a string '
'or a callable object (e.g. a function).')
else:
return self.get_absolute_url()
def get_absolute_url(self):
""" Returns the default URL for Horizon's URLconf.
The default URL is determined by calling
:meth:`~horizon.Dashboard.get_absolute_url`
on the :class:`~horizon.Dashboard` instance returned by
:meth:`~horizon.get_default_dashboard`.
"""
return self.get_default_dashboard().get_absolute_url()
@property
def _lazy_urls(self):
""" Lazy loading for URL patterns.
This method avoids problems associated with attempting to evaluate
the the URLconf before the settings module has been loaded.
"""
def url_patterns():
return self._urls()[0]
return LazyURLPattern(url_patterns), self.namespace, self.slug
def _urls(self):
""" Constructs the URLconf for Horizon from registered Dashboards. """
urlpatterns = self._get_default_urlpatterns()
self._autodiscover()
# Discover each dashboard's panels.
for dash in self._registry.values():
dash._autodiscover()
# Allow for override modules
config = getattr(settings, "HORIZON_CONFIG", {})
if config.get("customization_module", None):
customization_module = config["customization_module"]
bits = customization_module.split('.')
mod_name = bits.pop()
package = '.'.join(bits)
mod = import_module(package)
try:
before_import_registry = copy.copy(self._registry)
import_module('%s.%s' % (package, mod_name))
except:
self._registry = before_import_registry
if module_has_submodule(mod, mod_name):
raise
# Compile the dynamic urlconf.
for dash in self._registry.values():
urlpatterns += patterns('',
url(r'^%s/' % dash.slug, include(dash._decorated_urls)))
# Return the three arguments to django.conf.urls.defaults.include
return urlpatterns, self.namespace, self.slug
def _autodiscover(self):
""" Discovers modules to register from ``settings.INSTALLED_APPS``.
This makes sure that the appropriate modules get imported to register
themselves with Horizon.
"""
if not getattr(self, '_registerable_class', None):
raise ImproperlyConfigured('You must set a '
'"_registerable_class" property '
'in order to use autodiscovery.')
# Discover both dashboards and panels, in that order
for mod_name in ('dashboard', 'panel'):
for app in settings.INSTALLED_APPS:
mod = import_module(app)
try:
before_import_registry = copy.copy(self._registry)
import_module('%s.%s' % (app, mod_name))
except:
self._registry = before_import_registry
if module_has_submodule(mod, mod_name):
raise
class HorizonSite(Site):
"""
A singleton implementation of Site such that all dealings with horizon
get the same instance no matter what. There can be only one.
"""
_instance = None
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(Site, cls).__new__(cls, *args, **kwargs)
return cls._instance
# The one true Horizon
Horizon = HorizonSite()
|
|
from __future__ import absolute_import
from ..base import BaseObj, FieldMeta
from ...utils import deref
from ...io import SwaggerRequest, SwaggerResponse
from ...primitives import Array
import six
import copy
class BaseObj_v2_0(BaseObj):
__swagger_version__ = '2.0'
class XMLObject(BaseObj_v2_0):
""" XML Object
"""
__swagger_fields__ = {
'name': None,
'namespace': None,
'prefix': None,
'attribute': None,
'wrapped': None,
}
class BaseSchema(BaseObj_v2_0):
""" Base type for Items, Schema, Parameter, Header
"""
__swagger_fields__ = {
'type': None,
'format': None,
'items': None,
'default': None,
'maximum': None,
'exclusiveMaximum': None,
'minimum': None,
'exclusiveMinimum': None,
'maxLength': None,
'minLength': None,
'maxItems': None,
'minItems': None,
'multipleOf': None,
'enum': None,
'pattern': None,
'uniqueItems': None,
}
class Items(six.with_metaclass(FieldMeta, BaseSchema)):
""" Items Object
"""
__swagger_fields__ = {
'collectionFormat': None,
}
def _prim_(self, v, prim_factory):
return prim_factory.produce(self, v)
class Schema(six.with_metaclass(FieldMeta, BaseSchema)):
""" Schema Object
"""
__swagger_fields__ = {
'$ref': None,
'maxProperties': None,
'minProperties': None,
'required': [],
'allOf': [],
'properties': {},
'additionalProperties': True,
'title': None,
'description': None,
'discriminator': None,
# TODO: readonly not handled
'readOnly': None,
'xml': None,
'externalDocs': None,
'example': None,
}
__internal_fields__ = {
# pyswagger only
'ref_obj': None,
'norm_ref': None,
'name': None,
}
def _prim_(self, v, prim_factory):
return prim_factory.produce(self, v)
class Swagger(six.with_metaclass(FieldMeta, BaseObj_v2_0)):
""" Swagger Object
"""
__swagger_fields__ = {
'swagger': None,
'info': None,
'host': None,
'basePath': None,
'schemes': [],
'consumes': [],
'produces': [],
'paths': None,
'definitions': None,
'parameters': None,
'responses': None,
'securityDefinitions': None,
'security': None,
'tags': None,
'externalDocs': None,
}
class Contact(six.with_metaclass(FieldMeta, BaseObj_v2_0)):
""" Contact Object
"""
__swagger_fields__ = {
'name': None,
'url': None,
'email': None,
}
class License(six.with_metaclass(FieldMeta, BaseObj_v2_0)):
""" License Object
"""
__swagger_fields__ = {
'name': None,
'url': None,
}
class Info(six.with_metaclass(FieldMeta, BaseObj_v2_0)):
""" Info Object
"""
__swagger_fields__ = {
'version': None,
'title': None,
'description': None,
'termsOfService': None,
'contact': None,
'license': None,
}
class Parameter(six.with_metaclass(FieldMeta, BaseSchema)):
""" Parameter Object
"""
__swagger_fields__ = {
# Reference Object
'$ref': None,
'name': None,
'in': None,
'required': None,
# body parameter
'schema': None,
# other parameter
'collectionFormat': None,
# for converter only
'description': None,
# TODO: not supported yet
'allowEmptyValue': False,
}
__internal_fields__ = {
# pyswagger only
'ref_obj': None,
'norm_ref': None,
}
def _prim_(self, v, prim_factory):
i = getattr(self, 'in')
return prim_factory.produce(self.schema, v) if i == 'body' else prim_factory.produce(self, v)
class Header(six.with_metaclass(FieldMeta, BaseSchema)):
""" Header Object
"""
__swagger_fields__ = {
'collectionFormat': None,
'description': None,
}
def _prim_(self, v, prim_factory):
return prim_factory.produce(self, v)
class Response(six.with_metaclass(FieldMeta, BaseObj_v2_0)):
""" Response Object
"""
__swagger_fields__ = {
# Reference Object
'$ref': None,
'schema': None,
'headers': {},
'description': None,
'examples': None,
}
__internal_fields__ = {
'ref_obj': None,
'norm_ref': None,
}
class Operation(six.with_metaclass(FieldMeta, BaseObj_v2_0)):
""" Operation Object
"""
__swagger_fields__ = {
'tags': None,
'operationId': None,
'consumes': [],
'produces': [],
'schemes': [],
'parameters': None,
'responses': None,
'deprecated': False,
'security': None,
'description': None,
'summary': None,
'externalDocs': None,
}
__internal_fields__ = {
'method': None,
'url': None,
'path': None,
'base_path': None,
'cached_schemes': [],
}
def __call__(self, **k):
# prepare parameter set
params = dict(header={}, query=[], path={}, body={}, formData=[], file={})
names = []
def _convert_parameter(p):
if p.name not in k and not p.is_set("default") and p.required:
raise ValueError('requires parameter: ' + p.name)
if p.is_set("default"):
v = k.get(p.name, p.default)
else:
if p.name in k:
v = k[p.name]
else:
# do not provide value for parameters that use didn't specify.
return
c = p._prim_(v, self._prim_factory)
i = getattr(p, 'in')
if p.type == 'file':
params['file'][p.name] = c
elif i in ('query', 'formData'):
if isinstance(c, Array):
params[i].extend([tuple([p.name, v]) for v in c.to_url()])
else:
params[i].append((p.name, str(c),))
else:
params[i][p.name] = str(c) if i != 'body' else c
names.append(p.name)
for p in self.parameters:
_convert_parameter(deref(p))
# check for unknown parameter
unknown = set(six.iterkeys(k)) - set(names)
if len(unknown) > 0:
raise ValueError('Unknown parameters: {0}'.format(unknown))
return \
SwaggerRequest(op=self, params=params), SwaggerResponse(self)
class PathItem(six.with_metaclass(FieldMeta, BaseObj_v2_0)):
""" Path Item Object
"""
__swagger_fields__ = {
# Reference Object
'$ref': None,
'get': None,
'put': None,
'post': None,
'delete': None,
'options': None,
'head': None,
'patch': None,
'parameters': [],
}
__internal_fields__ = {
'ref_obj': None,
'norm_ref': None,
}
class SecurityScheme(six.with_metaclass(FieldMeta, BaseObj_v2_0)):
""" Security Scheme Object
"""
__swagger_fields__ = {
'type': None,
'name': None,
'in': None,
'flow': None,
'authorizationUrl': None,
'tokenUrl': None,
'scopes': None,
'description': None,
}
class Tag(six.with_metaclass(FieldMeta, BaseObj_v2_0)):
""" Tag Object
"""
__swagger_fields__ = {
'name': None,
'description': None,
'externalDocs': None,
}
class ExternalDocumentation(six.with_metaclass(FieldMeta, BaseObj_v2_0)):
""" External Documentation Object
"""
__swagger_fields__ = {
'description': None,
'url': None,
}
|
|
'''correlation plots
Author: Josef Perktold
License: BSD-3
example for usage with different options in
statsmodels\sandbox\examples\thirdparty\ex_ratereturn.py
'''
import numpy as np
from . import utils
def plot_corr(dcorr, xnames=None, ynames=None, title=None, normcolor=False,
ax=None, cmap='RdYlBu_r'):
"""Plot correlation of many variables in a tight color grid.
Parameters
----------
dcorr : ndarray
Correlation matrix, square 2-D array.
xnames : list of str, optional
Labels for the horizontal axis. If not given (None), then the
matplotlib defaults (integers) are used. If it is an empty list, [],
then no ticks and labels are added.
ynames : list of str, optional
Labels for the vertical axis. Works the same way as `xnames`.
If not given, the same names as for `xnames` are re-used.
title : str, optional
The figure title. If None, the default ('Correlation Matrix') is used.
If ``title=''``, then no title is added.
normcolor : bool or tuple of scalars, optional
If False (default), then the color coding range corresponds to the
range of `dcorr`. If True, then the color range is normalized to
(-1, 1). If this is a tuple of two numbers, then they define the range
for the color bar.
ax : Matplotlib AxesSubplot instance, optional
If `ax` is None, then a figure is created. If an axis instance is
given, then only the main plot but not the colorbar is created.
cmap : str or Matplotlib Colormap instance, optional
The colormap for the plot. Can be any valid Matplotlib Colormap
instance or name.
Returns
-------
fig : Matplotlib figure instance
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> import statsmodels.graphics.api as smg
>>> hie_data = sm.datasets.randhie.load_pandas()
>>> corr_matrix = np.corrcoef(hie_data.data.T)
>>> smg.plot_corr(corr_matrix, xnames=hie_data.names)
>>> plt.show()
"""
if ax is None:
create_colorbar = True
else:
create_colorbar = False
fig, ax = utils.create_mpl_ax(ax)
import matplotlib as mpl
from matplotlib import cm
nvars = dcorr.shape[0]
if ynames is None:
ynames = xnames
if title is None:
title = 'Correlation Matrix'
if isinstance(normcolor, tuple):
vmin, vmax = normcolor
elif normcolor:
vmin, vmax = -1.0, 1.0
else:
vmin, vmax = None, None
axim = ax.imshow(dcorr, cmap=cmap, interpolation='nearest',
extent=(0,nvars,0,nvars), vmin=vmin, vmax=vmax)
# create list of label positions
labelPos = np.arange(0, nvars) + 0.5
if not ynames is None:
ax.set_yticks(labelPos)
ax.set_yticks(labelPos[:-1]+0.5, minor=True)
ax.set_yticklabels(ynames[::-1], fontsize='small',
horizontalalignment='right')
elif ynames == []:
ax.set_yticks([])
if not xnames is None:
ax.set_xticks(labelPos)
ax.set_xticks(labelPos[:-1]+0.5, minor=True)
ax.set_xticklabels(xnames, fontsize='small', rotation=45,
horizontalalignment='right')
elif xnames == []:
ax.set_xticks([])
if not title == '':
ax.set_title(title)
if create_colorbar:
fig.colorbar(axim, use_gridspec=True)
fig.tight_layout()
ax.tick_params(which='minor', length=0)
ax.tick_params(direction='out', top=False, right=False)
try:
ax.grid(True, which='minor', linestyle='-', color='w', lw=1)
except AttributeError:
# Seems to fail for axes created with AxesGrid. MPL bug?
pass
return fig
def plot_corr_grid(dcorrs, titles=None, ncols=None, normcolor=False, xnames=None,
ynames=None, fig=None, cmap='RdYlBu_r'):
"""Create a grid of correlation plots.
The individual correlation plots are assumed to all have the same
variables, axis labels can be specified only once.
Parameters
----------
dcorrs : list or iterable of ndarrays
List of correlation matrices.
titles : list of str, optional
List of titles for the subplots. By default no title are shown.
ncols : int, optional
Number of columns in the subplot grid. If not given, the number of
columns is determined automatically.
normcolor : bool or tuple, optional
If False (default), then the color coding range corresponds to the
range of `dcorr`. If True, then the color range is normalized to
(-1, 1). If this is a tuple of two numbers, then they define the range
for the color bar.
xnames : list of str, optional
Labels for the horizontal axis. If not given (None), then the
matplotlib defaults (integers) are used. If it is an empty list, [],
then no ticks and labels are added.
ynames : list of str, optional
Labels for the vertical axis. Works the same way as `xnames`.
If not given, the same names as for `xnames` are re-used.
fig : Matplotlib figure instance, optional
If given, this figure is simply returned. Otherwise a new figure is
created.
cmap : str or Matplotlib Colormap instance, optional
The colormap for the plot. Can be any valid Matplotlib Colormap
instance or name.
Returns
-------
fig : Matplotlib figure instance
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> import statsmodels.api as sm
In this example we just reuse the same correlation matrix several times.
Of course in reality one would show a different correlation (measuring a
another type of correlation, for example Pearson (linear) and Spearman,
Kendall (nonlinear) correlations) for the same variables.
>>> hie_data = sm.datasets.randhie.load_pandas()
>>> corr_matrix = np.corrcoef(hie_data.data.T)
>>> sm.graphics.plot_corr_grid([corr_matrix] * 8, xnames=hie_data.names)
>>> plt.show()
"""
if ynames is None:
ynames = xnames
if not titles:
titles = ['']*len(dcorrs)
n_plots = len(dcorrs)
if ncols is not None:
nrows = int(np.ceil(n_plots / float(ncols)))
else:
# Determine number of rows and columns, square if possible, otherwise
# prefer a wide (more columns) over a high layout.
if n_plots < 4:
nrows, ncols = 1, n_plots
else:
nrows = int(np.sqrt(n_plots))
ncols = int(np.ceil(n_plots / float(nrows)))
# Create a figure with the correct size
aspect = min(ncols / float(nrows), 1.8)
vsize = np.sqrt(nrows) * 5
fig = utils.create_mpl_fig(fig, figsize=(vsize * aspect + 1, vsize))
for i, c in enumerate(dcorrs):
ax = fig.add_subplot(nrows, ncols, i+1)
# Ensure to only plot labels on bottom row and left column
_xnames = xnames if nrows * ncols - (i+1) < ncols else []
_ynames = ynames if (i+1) % ncols == 1 else []
plot_corr(c, xnames=_xnames, ynames=_ynames, title=titles[i],
normcolor=normcolor, ax=ax, cmap=cmap)
# Adjust figure margins and add a colorbar
fig.subplots_adjust(bottom=0.1, left=0.09, right=0.9, top=0.9)
cax = fig.add_axes([0.92, 0.1, 0.025, 0.8])
fig.colorbar(fig.axes[0].images[0], cax=cax)
return fig
|
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v9.resources.types import (
keyword_plan_ad_group_keyword,
)
from google.ads.googleads.v9.services.types import (
keyword_plan_ad_group_keyword_service,
)
from .base import KeywordPlanAdGroupKeywordServiceTransport, DEFAULT_CLIENT_INFO
class KeywordPlanAdGroupKeywordServiceGrpcTransport(
KeywordPlanAdGroupKeywordServiceTransport
):
"""gRPC backend transport for KeywordPlanAdGroupKeywordService.
Service to manage Keyword Plan ad group keywords.
KeywordPlanAdGroup is required to add ad group keywords.
Positive and negative keywords are supported. A maximum of
10,000 positive keywords are allowed per keyword plan. A maximum
of 1,000 negative keywords are allower per keyword plan. This
includes campaign negative keywords and ad group negative
keywords.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn(
"api_mtls_endpoint and client_cert_source are deprecated",
DeprecationWarning,
)
host = (
api_mtls_endpoint
if ":" in api_mtls_endpoint
else api_mtls_endpoint + ":443"
)
if credentials is None:
credentials, _ = google.auth.default(
scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
)
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
ssl_credentials=ssl_channel_credentials,
scopes=self.AUTH_SCOPES,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._stubs = {} # type: Dict[str, Callable]
# Run the base constructor.
super().__init__(
host=host, credentials=credentials, client_info=client_info,
)
@classmethod
def create_channel(
cls,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
scopes: Optional[Sequence[str]] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
address (Optionsl[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
scopes=scopes or cls.AUTH_SCOPES,
**kwargs,
)
def close(self):
self.grpc_channel.close()
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def get_keyword_plan_ad_group_keyword(
self,
) -> Callable[
[
keyword_plan_ad_group_keyword_service.GetKeywordPlanAdGroupKeywordRequest
],
keyword_plan_ad_group_keyword.KeywordPlanAdGroupKeyword,
]:
r"""Return a callable for the get keyword plan ad group
keyword method over gRPC.
Returns the requested Keyword Plan ad group keyword in full
detail.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Returns:
Callable[[~.GetKeywordPlanAdGroupKeywordRequest],
~.KeywordPlanAdGroupKeyword]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_keyword_plan_ad_group_keyword" not in self._stubs:
self._stubs[
"get_keyword_plan_ad_group_keyword"
] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v9.services.KeywordPlanAdGroupKeywordService/GetKeywordPlanAdGroupKeyword",
request_serializer=keyword_plan_ad_group_keyword_service.GetKeywordPlanAdGroupKeywordRequest.serialize,
response_deserializer=keyword_plan_ad_group_keyword.KeywordPlanAdGroupKeyword.deserialize,
)
return self._stubs["get_keyword_plan_ad_group_keyword"]
@property
def mutate_keyword_plan_ad_group_keywords(
self,
) -> Callable[
[
keyword_plan_ad_group_keyword_service.MutateKeywordPlanAdGroupKeywordsRequest
],
keyword_plan_ad_group_keyword_service.MutateKeywordPlanAdGroupKeywordsResponse,
]:
r"""Return a callable for the mutate keyword plan ad group
keywords method over gRPC.
Creates, updates, or removes Keyword Plan ad group keywords.
Operation statuses are returned.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `DatabaseError <>`__ `FieldError <>`__
`HeaderError <>`__ `InternalError <>`__
`KeywordPlanAdGroupKeywordError <>`__ `KeywordPlanError <>`__
`MutateError <>`__ `QuotaError <>`__ `RequestError <>`__
`ResourceCountLimitExceededError <>`__
Returns:
Callable[[~.MutateKeywordPlanAdGroupKeywordsRequest],
~.MutateKeywordPlanAdGroupKeywordsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "mutate_keyword_plan_ad_group_keywords" not in self._stubs:
self._stubs[
"mutate_keyword_plan_ad_group_keywords"
] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v9.services.KeywordPlanAdGroupKeywordService/MutateKeywordPlanAdGroupKeywords",
request_serializer=keyword_plan_ad_group_keyword_service.MutateKeywordPlanAdGroupKeywordsRequest.serialize,
response_deserializer=keyword_plan_ad_group_keyword_service.MutateKeywordPlanAdGroupKeywordsResponse.deserialize,
)
return self._stubs["mutate_keyword_plan_ad_group_keywords"]
__all__ = ("KeywordPlanAdGroupKeywordServiceGrpcTransport",)
|
|
'''Test case for MongoDB database backend
'''
import unittest
from lighty.db import fields, models, backend
backend.manager.connect('default')
class User(models.Model):
name = fields.CharField()
age = fields.IntegerField()
created = fields.DateField(auto_now_add=True)
changed = fields.DateTimeField(auto_now=True)
birthday = fields.DateField(null=True)
def __str__(self):
return "%s %d" % (self.name, self.age)
def datetime_equals(first, second):
return (first.year == second.year and first.month == second.month and
first.day == second.day and first.hour == second.hour and
first.minute == second.minute and first.second == second.second)
class MongoTestCase(unittest.TestCase):
'''Test case for partial template execution
'''
def setUp(self):
backend.manager.default.db.User.drop()
User(name='Peter', age=18).save()
User(name='John', age=19).save()
User(name='Harry', age=17).save()
def testCreate(self):
'''Test model creation method
'''
user = User(name='Harry', age=20)
assert not user._is_saved, 'Model saved on creation'
user.save()
assert user._is_saved, 'Model _is_saved not changes after was saved'
assert user.key(), 'Model key error after object was saved'
def testGetAll(self):
'''Test Model.all() method
'''
users = [user for user in User.all()]
assert (users[0].name == 'Peter' and users[1].name == 'John' and
users[2].name == 'Harry'), ('Wrong result [%s]' %
','.join([str(user) for user in users]))
assert (users[0].age == 18 and users[1].age == 19 and
users[2].age == 17), ('Wrong result [%s]' %
','.join([str(user) for user in users]))
def testCount(self):
'''Test len(query) method
'''
users = [user for user in User.all()]
assert len(users) == len(User.all()), ('Wrong number of items: '
'%d != %d' % (len(users), len(User.all())))
query = User.all().where((User.age > 17) & (User.name == 'Peter'))
users = [user for user in query]
assert len(users) == len(query), 'Wrong number of items: %d != %s' % (
len(users) == len(query))
def testOrder(self):
'''Test query ordering
'''
users = [user for user in User.all().order_by(User.age)]
assert (users[0].age == 17 and users[1].age == 18 and
users[2].age == 19), ('Wrong result [%s]' %
','.join([str(user) for user in users]))
def testSimple(self):
'''Test simple query
'''
users = [user for user in User.all().where(User.age > 17)]
assert users[0].age == 18 and users[1].age == 19, (
'Wrong result [%s]' % ','.join([str(user) for user in users]))
def testQueryToString(self):
'''Test query to string convertion
'''
query = str(User.all().where((User.age > 17) & (User.name == 'Peter')))
right_query = '((User.age > 17) && (User.name == "Peter"))'
assert (query == right_query), 'Wrong query string: %s ex %s' % (query,
right_query)
def testQuery(self):
'''Test queries with some logic
'''
users = [user for user in User.all().where((User.age > 17) &
(User.name == 'Peter'))]
assert users[0].name == 'Peter' and users[0].age == 18, (
'Wrong result [%s]' % ','.join([str(user) for user in users]))
def testGetByKey(self):
'''Test Model.get(object_key) method
'''
key = User(name='Kevin', age=20).save().key()
user = User.get(key)
assert user, 'Wrong result getting entity for key: %s' % user
assert user.name == 'Kevin', ('Wrong result getting entity name for '
'key: %s' % user.name)
assert user.age == 20, ('Wrong result getting entity name for '
'key: %s' % user.age)
def testGet(self):
'''Test Model.get(field_name=value) method
'''
user = User.get(None)
assert not user, 'Wrong result for getting empty modal: %s' % user
user = User.get(name='Peter')
assert user, 'Wrong result searching for name: %s' % user
assert user.name == 'Peter', ('Wrong result searching for name: %s' %
user)
assert user.age == 18, 'Wrong result searching for name: %s' % user
user = User.get(name='John', age=19)
assert user, 'Wrong result searching for two fields: %s' % user
assert user.name == 'John', ('Wrong result searching for name: %s' %
user)
assert user.age == 19, 'Wrong result searching for name: %s' % user
user = User.get(name='adad')
assert not user, 'Wrong not found result: %s' % user
assert not user.name, 'Wrong not found result: %s' % user
def testDelete(self):
'''Test entity.delete() method
'''
User.get(name='Peter').delete()
assert len(User.all().where(User.name == 'Peter')) == 0, (
'Error deleting entity')
assert len(User.all()) == 2, 'Error deleting entity'
def testChange(self):
'''Test save changes into entity
'''
user = User.get(name='Peter')
user.name = 'Alex'
user.save()
assert len(User.all().where(User.name == 'Alex')) == 1, (
'Error saving entity')
assert len(User.all().where(User.name == 'Peter')) == 0, (
'Error saving entity')
def testContains(self):
'''Test contains for strings and lists
'''
query = User.all().where(User.name.contains('eter'))
users = [user for user in query]
assert len(users) == 1, 'Wrong results for\n%s\n%s' % (query, users)
assert users[0].name == 'Peter', 'Wrong result: %s' % users[0]
# TODO: write a test case for arrays
def testDateTime(self):
'''Test different date/time fields
'''
# This method so big becouse it requires to take some time to execute
# to check change time properly, exceptially in queries
from datetime import date, datetime, timedelta
now = datetime.now()
today = date.today()
birthday = today - timedelta(days=20*366)
user = User(name='Kevin', age=20, birthday=birthday).save()
changed = user.changed
# Check auto filled dates
assert user.birthday == birthday, ('Manually setted value error: %s'
' except %s' % (user.birthday, birthday))
assert user.created == today, ('auto_now_add value error: %s except '
'%s' % (user.created, today))
print user.changed, now
assert datetime_equals(user.changed, now), ('auto_now value error: %s'
' except %s' % (user.changed, now))
# Test few queries with date objects
selected = User.all().where(User.created <= today)
assert len(selected) == 4, ('Wrong date query results number: %s' %
len(selected))
user.created = today + timedelta(days=2)
user.save()
selected = User.all().where(User.created > today)
assert len(selected) == 1, ('Wrong date query results number: %s' %
len(selected))
# Update user, save and check changed time
user.name = 'Kevin'
user.save()
assert user.changed > changed, 'Error changed auto_now: %s' % changed
# Check queries
updated = User.all().where(User.changed > changed)
assert len(updated) == 1, ('Wrong query results number: %s' %
len(updated))
assert updated[0].name == 'Kevin', ('Wrong result item: %s' %
updated[0].name)
assert isinstance(updated[0].changed, datetime), (
'Wrong resut item field type: %s' % type(updated[0].changed))
def test():
suite = unittest.TestSuite()
suite.addTest(MongoTestCase('testQueryToString'))
suite.addTest(MongoTestCase('testCreate'))
suite.addTest(MongoTestCase('testGet'))
suite.addTest(MongoTestCase('testGetByKey'))
suite.addTest(MongoTestCase('testGetAll'))
suite.addTest(MongoTestCase('testChange'))
suite.addTest(MongoTestCase('testOrder'))
suite.addTest(MongoTestCase('testSimple'))
suite.addTest(MongoTestCase('testQuery'))
suite.addTest(MongoTestCase('testCount'))
suite.addTest(MongoTestCase('testContains'))
suite.addTest(MongoTestCase('testDelete'))
suite.addTest(MongoTestCase('testDateTime'))
return suite
|
|
from autobahn.twisted import websocket
import logging
import numpy as np
import threading
import time
from twisted.python import failure
from twisted.internet import defer, endpoints
import twisted.internet.error
from universe import utils
from universe.twisty import reactor
from universe.rewarder import connection_timer, env_status, reward_buffer, rewarder_client
from universe.utils import display
logger = logging.getLogger(__name__)
extra_logger = logging.getLogger('universe.extra.'+__name__)
def _ping(client):
return client.send('v0.control.ping', {}, expect_reply=True)
class RewarderSession(object):
def __init__(self):
self.lock = threading.RLock()
self.i = 0
# Mutated by main thread exclusively
self.names_by_id = {}
self.reward_buffers = {}
self.env_statuses = {}
self.errors = {}
self.networks = {}
self.clients = {}
def close(self, name=None, reason=u'closed by RewarderSession.close'):
if name is None:
names = list(self.names_by_id.values())
else:
logger.info('[%s] Closing rewarder connection', name)
names = [name]
self.ids_by_name = {name: id for id, name in self.names_by_id.items()}
for name in names:
with self.lock:
id = self.ids_by_name.pop(name, None)
if id is None:
# already closed
continue
del self.names_by_id[id]
del self.reward_buffers[id]
del self.env_statuses[id]
self.errors.pop(id, None)
network = self.networks.pop(id)
network.close()
client = self.clients.pop(id, None)
if client is not None:
reactor.callFromThread(client.close, reason=reason)
def connect(self, name, address, label, password, env_id=None, seed=None, fps=60,
start_timeout=None, observer=False, skip_network_calibration=False):
if name in self.reward_buffers:
self.close(name, reason='closing previous connection to reconnect with the same name')
network = Network()
self.names_by_id[self.i] = name
self.reward_buffers[self.i] = reward_buffer.RewardBuffer(label)
self.env_statuses[self.i] = env_status.EnvStatus(label=label, primary=False)
self.networks[self.i] = network
reactor.callFromThread(self._connect,
name=name,
address=address,
env_id=env_id,
seed=seed,
fps=fps,
i=self.i,
network=network,
env_status=self.env_statuses[self.i],
reward_buffer=self.reward_buffers[self.i],
label=label,
start_timeout=start_timeout,
password=password,
observer=observer,
skip_network_calibration=skip_network_calibration,
)
self.i += 1
return network
def _already_closed(self, i):
# Lock must be held
return i not in self.names_by_id
# Call only from Twisted thread
# TODO: probably time to convert to kwargs
@defer.inlineCallbacks
def _connect(self, name, address, env_id, seed, fps, i, network, env_status, reward_buffer,
label, password, start_timeout,
observer, skip_network_calibration,
attempt=0, elapsed_sleep_time=0,
):
endpoint = endpoints.clientFromString(reactor, 'tcp:'+address)
factory = websocket.WebSocketClientFactory('ws://'+address)
factory.protocol = rewarder_client.RewarderClient
assert password, "Missing password: {} for rewarder session".format(password)
factory.headers = {'authorization': utils.basic_auth_encode(password), 'openai-observer': 'true' if observer else 'false'}
factory.i = i
# Various important objects
factory.endpoint = endpoint
factory.env_status = env_status
factory.reward_buffer = reward_buffer
# Helpful strings
factory.label = label
factory.address = address
# Arguments to always send to the remote reset call
factory.arg_env_id = env_id
factory.arg_fps = fps
def record_error(e):
if isinstance(e, failure.Failure):
e = e.value
# logger.error('[%s] Recording rewarder error: %s', factory.label, e)
with self.lock:
# drop error on the floor if we're already closed
if self._already_closed(factory.i):
extra_logger.info('[%s] Ignoring error for already closed connection: %s', label, e)
elif factory.i not in self.clients:
extra_logger.info('[%s] Received error for connection which has not been fully initialized: %s', label, e)
# We could handle this better, but right now we
# just mark this as a fatal error for the
# backend. Often it actually is.
self.errors[factory.i] = e
else:
extra_logger.info('[%s] Recording fatal error for connection: %s', label, e)
self.errors[factory.i] = e
def retriable_error(e, error_message):
if isinstance(e, failure.Failure):
e = e.value
if self._already_closed(factory.i):
logger.error('[%s] Got error, but giving up on reconnecting, since %d already disconnected', factory.label, factory.i)
return
# Also need to handle DNS errors, so let's just handle everything for now.
#
# reason.trap(twisted.internet.error.ConnectError, error.ConnectionError)
if elapsed_sleep_time < start_timeout:
sleep = min((2 * attempt+1), 10)
logger.error('[%s] Waiting on rewarder: %s. Retry in %ds (slept %ds/%ds): %s', factory.label, error_message, sleep, elapsed_sleep_time, start_timeout, e)
reactor.callLater(
sleep, self._connect, name=name, address=address,
env_id=env_id, seed=seed, fps=fps, i=i, network=network,
env_status=env_status, reward_buffer=reward_buffer, label=label,
attempt=attempt+1, elapsed_sleep_time=elapsed_sleep_time+sleep,
start_timeout=start_timeout, password=password,
observer=observer, skip_network_calibration=skip_network_calibration,
)
else:
logger.error('[%s] %s. Retries exceeded (slept %ds/%ds): %s', factory.label, error_message, elapsed_sleep_time, start_timeout, e)
record_error(e)
factory.record_error = record_error
try:
retry_msg = 'establish rewarder TCP connection'
client = yield endpoint.connect(factory)
extra_logger.info('[%s] Rewarder TCP connection established', factory.label)
retry_msg = 'complete WebSocket handshake'
yield client.waitForWebsocketConnection()
extra_logger.info('[%s] Websocket client successfully connected', factory.label)
if not skip_network_calibration:
retry_msg = 'run network calibration'
yield network.calibrate(client)
extra_logger.info('[%s] Network calibration complete', factory.label)
retry_msg = ''
if factory.arg_env_id is not None:
# We aren't picky about episode ID: we may have
# already receieved an env.describe message
# telling us about a resetting environment, which
# we don't need to bump post.
#
# tl;dr hardcoding 0.0 here avoids a double reset.
reply = yield self._send_env_reset(client, seed=seed, episode_id='0')
else:
# No env_id requested, so we just proceed without a reset
reply = None
# We're connected and have measured the
# network. Mark everything as ready to go.
with self.lock:
if factory.i not in self.names_by_id:
# ID has been popped!
logger.info('[%s] Rewarder %d started, but has already been closed', factory.label, factory.i)
client.close(reason='RewarderSession: double-closing, client was closed while RewarderSession was starting')
elif reply is None:
logger.info('[%s] Attached to running environment without reset', factory.label)
else:
context, req, rep = reply
logger.info('[%s] Initial reset complete: episode_id=%s', factory.label, rep['headers']['episode_id'])
self.clients[factory.i] = client
except Exception as e:
if retry_msg:
retriable_error(e, 'failed to ' + retry_msg)
else:
record_error(e)
def pop_errors(self):
errors = {}
with self.lock:
if self.errors:
for i, error in self.errors.items():
name = self.names_by_id[i]
errors[name] = error
self.errors.clear()
return errors
def reset(self, seed=None):
with self.lock:
for i, reward_buffer in self.reward_buffers.items():
reward_buffer.mask()
reactor.callFromThread(self._reset, seed=seed)
def _reset(self, seed=None):
with self.lock:
for client in self.clients.values():
d = self._send_env_reset(client, seed=seed)
# Total hack to capture the variable in the closure
def callbacks(client):
def success(reply): pass
def fail(reason): client.factory.record_error(reason)
return success, fail
success, fail = callbacks(client)
d.addCallback(success)
d.addErrback(fail)
def _send_env_reset(self, client, seed=None, episode_id=None):
if episode_id is None:
episode_id = client.factory.env_status.episode_id
logger.info('[%s] Sending reset for env_id=%s fps=%s episode_id=%s', client.factory.label, client.factory.arg_env_id, client.factory.arg_fps, episode_id)
return client.send_reset(
env_id=client.factory.arg_env_id,
seed=seed,
fps=client.factory.arg_fps,
episode_id=episode_id)
def pop(self, warn=True, peek_d=None):
reward_d = {}
done_d = {}
info_d = {}
err_d = self.pop_errors()
for i, reward_buffer in self.reward_buffers.items():
name = self.names_by_id[i]
reward, done, info = reward_buffer.pop(peek_d.get(name))
reward_d[name] = reward
done_d[name] = done
info_d[name] = info
# TODO: use FPS here rather than 60
if warn and any(info.get('stats.reward.count', 0) > 60 for info in info_d.values()):
logger.warn('WARNING: returning more than 60 aggregated rewards: %s. Either your agent is not keeping up with the framerate, or you should have called ".reset()" to clear pending rewards and reset the environments to a known state.',
{name: '{} (episode_id={})'.format(info['stats.reward.count'], info.get('env_status.episode_id')) for name, info in info_d.items()})
return reward_d, done_d, info_d, err_d
def wait(self, timeout=None):
deadline = time.time() + timeout
for client in self.clients:
if timeout is not None:
remaining_timeout = deadline - time.time()
else:
remaining_timeout = None
client.reward_buffer.wait_for_step(timeout=remaining_timeout)
# Hack to test actions over websockets
# TODO: Carve websockets out of rewarder pkg (into vnc_env? - and move this there)
def send_action(self, action_n, env_id):
reactor.callFromThread(self._send_action, env_id, action_n)
return self.pop_errors()
def _send_action(self, env_id, action_n):
with self.lock:
for n, client in zip(action_n, self.clients.values()):
self._send_env_action(client, env_id, action_n[n])
def _send_env_action(self, client, env_id, action_n):
if len(action_n) == 0:
# Hack to skip empty actions. TODO: Find source (throttle?) and fix
return
message = {
'env_id': env_id,
'action': action_n,
}
client.send('v0.agent.action', message, expect_reply=False)
def rewards_count(self):
# TODO: any reason to lock these?
return [client.reward_buffer.count for client in self.clients]
def pop_observation(self):
return [client.reward_buffer.pop_observation() for client in self.clients]
# def _connection_time(self):
# deferreds = []
# for client in self.clients:
# endpoint = client.factory.endpoint
# d = connection_timer.start(endpoint)
# deferreds.append(d)
# d = defer.DeferredList(deferreds, fireOnOneErrback=True, consumeErrors=True)
# return d
# Run this in Twisty therad
class Network(object):
def __init__(self):
self.connection_samples = 10
self.application_ping_samples = 10
self.connection_time_m = None
self.lock = threading.Lock()
self.recalibrate = None
self.client = None
self._ntpdate_reversed_clock_skew = None
self._reversed_clock_skew = None
def active(self):
with self.lock:
return self._reversed_clock_skew is not None
# Used by external consumers
def reversed_clock_skew(self):
with self.lock:
if self._ntpdate_clock_skew is not None:
return self._ntpdate_reversed_clock_skew
else:
return self._reversed_clock_skew
def _report(self):
connection_time = display.display_timestamps(self.connection_time_m)
if self._ntpdate_clock_skew is not None:
ntpdate_clock_skew = display.display_timestamp(self._ntpdate_clock_skew[0])
else:
ntpdate_clock_skew = None
clock_skew = display.display_timestamps_pair(self.clock_skew_m)
application_rtt = display.display_timestamps(self.application_rtt_m)
request_overhead = display.display_timestamps(self.request_overhead_m)
response_overhead = display.display_timestamps(self.response_overhead_m)
extra_logger.info('[%s] Network calibration: ntpdate_clock_skew=%s clock_skew=%s connection_time=%s application_rtt=%s request_overhead=%s response_overhead=%s',
self.client.factory.label, ntpdate_clock_skew, clock_skew, connection_time, application_rtt,
request_overhead, response_overhead)
def _start(self):
def calibrate():
d = defer.Deferred()
def fail(reason):
logger.error('[%s] Could not recalibrate network: %s', self.client.factory.label, reason)
d.addErrback(fail)
self._start_measure_connection_time(d)
self._start()
self.recalibrate = reactor.callLater(5 * 60, calibrate)
def close(self):
if self.recalibrate:
try:
self.recalibrate.cancel()
except twisted.internet.error.AlreadyCalled:
pass
# Called externally
def calibrate(self, client):
d = defer.Deferred()
def success(res):
# If we succeed, kick off the periodic 5 minute
# recalibrations.
self._start()
return res
d.addCallback(success)
self.client = client
# Kinda a hack. Idea is to try using the ntpdate -q offset if
# we can.
skew = self._start_measure_clock_skew()
def succeed(offset):
with self.lock:
self._ntpdate_clock_skew = np.array([offset, offset])
self._ntpdate_reversed_clock_skew = np.array([-offset, -offset])
self._start_measure_connection_time(d)
skew.addCallback(succeed)
def fail(reason):
with self.lock:
self._ntpdate_clock_skew = None
self._ntpdate_reversed_clock_skew = None
extra_logger.info('[%s] Could not determine clock skew with ntpdate; falling back to application-level ping: %s', self.client.factory.label, reason.value)
self._start_measure_connection_time(d)
skew.addErrback(fail)
return d
def _start_measure_connection_time(self, d):
connection_time_m = np.zeros(self.connection_samples)
self._measure_connection_time(d, connection_time_m, 0)
def _measure_connection_time(self, d, connection_time_m, i):
extra_logger.debug('[%s] Measuring connection time (%d/%d)', self.client.factory.label, i+1, len(connection_time_m))
endpoint = self.client.factory.endpoint
timer = connection_timer.start(endpoint)
def success(delta):
connection_time_m[i] = delta
if i+1 < len(connection_time_m):
self._measure_connection_time(d, connection_time_m, i+1)
else:
self.connection_time_m = connection_time_m
self._start_measure_application_ping(d)
def fail(reason):
d.errback(reason)
timer.addCallback(success)
timer.addErrback(fail)
def _start_measure_application_ping(self, d=None):
clock_skew_m = np.zeros((self.application_ping_samples, 2))
request_overhead_m = np.zeros((self.application_ping_samples))
response_overhead_m = np.zeros((self.application_ping_samples))
application_rtt_m = np.zeros((self.application_ping_samples))
self._measure_application_ping(d, clock_skew_m, request_overhead_m, response_overhead_m, application_rtt_m, 0)
def _measure_application_ping(self, d, clock_skew_m, request_overhead_m, response_overhead_m, application_rtt_m, i):
extra_logger.debug('[%s] Issuing an application-level ping (%d/%d)', self.client.factory.label, i+1, len(clock_skew_m))
start = time.time()
ping = _ping(self.client)
def success(res):
context, request, response = res
end = time.time()
request_sent_at = request['headers']['sent_at'] # local
response_sent_at = response['headers']['sent_at'] # remote
response_received_at = context['start'] # local
# We try to put bounds on clock skew by subtracting
# local and remote times, for local and remote events
# that are causally related.
#
# For example, suppose that the following local/remote
# logical timestamps apply to a request (for a system
# with clock skew of 100):
#
# request_sent local: 0 remote: 100
# request_recieved local: 1 remote: 101
# response_sent local: 2 remote: 102
# response_received local: 3 remote: 103
#
# Then:
#
# # Remote event *after* local is upper bound
# request_recieved.remote - request_sent.local = 101
# # Remote event *before* local is lower bound
# response_sent.remote - response_received.local = 102 - 3 = 99
#
# There's danger of further clock drift over time, but
# we don't need these to be fully accurate, and this
# should be fine for now.
clock_skew_m[i, :] = (response_sent_at-response_received_at, response_sent_at-request_sent_at)
request_overhead_m[i] = request_sent_at - start
response_overhead_m[i] = end - response_received_at
application_rtt_m[i] = response_received_at - request_sent_at
if i+1 < len(clock_skew_m):
self._measure_application_ping(d, clock_skew_m, request_overhead_m, response_overhead_m, application_rtt_m, i+1)
else:
self.clock_skew_m = clock_skew_m
self.request_overhead_m = request_overhead_m
self.response_overhead_m = response_overhead_m
self.application_rtt_m = application_rtt_m
self._report()
self._update_exposed_metrics()
# Ok, all done!
if d is not None:
d.callback(self)
ping.addCallback(success)
ping.addErrback(d.errback)
def _update_exposed_metrics(self):
with self.lock:
self._clock_skew = self.clock_skew_m.mean(axis=0) # add to local time to get remote time, as (min, max) values
self._reversed_clock_skew = -self._clock_skew[[1, 0]] # add to remote time to get local time, in format (min, max)
def _start_measure_clock_skew(self):
host = self.client.factory.address.split(':')[0]
return connection_timer.measure_clock_skew(self.client.factory.label, host)
|
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from astropy import units as u
# list of pairs (target unit/physical type, input unit)
x_inputs = [(u.arcsec, u.deg), ('angle', u.deg),
(u.kpc/u.Myr, u.km/u.s), ('speed', u.km/u.s),
([u.arcsec, u.km], u.deg), ([u.arcsec, u.km], u.km), # multiple allowed
(['angle', 'length'], u.deg), (['angle', 'length'], u.km)]
y_inputs = [(u.arcsec, u.deg), ('angle', u.deg),
(u.kpc/u.Myr, u.km/u.s), ('speed', u.km/u.s)]
@pytest.fixture(scope="module",
params=list(range(len(x_inputs))))
def x_input(request):
return x_inputs[request.param]
@pytest.fixture(scope="module",
params=list(range(len(y_inputs))))
def y_input(request):
return y_inputs[request.param]
# ---- Tests that use the fixtures defined above ----
def test_args(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x, y):
return x, y
x, y = myfunc_args(1*x_unit, 1*y_unit)
assert isinstance(x, u.Quantity)
assert isinstance(y, u.Quantity)
assert x.unit == x_unit
assert y.unit == y_unit
def test_args_nonquantity(x_input):
x_target, x_unit = x_input
@u.quantity_input(x=x_target)
def myfunc_args(x, y):
return x, y
x, y = myfunc_args(1*x_unit, 100)
assert isinstance(x, u.Quantity)
assert isinstance(y, int)
assert x.unit == x_unit
def test_wrong_unit(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x, y):
return x, y
with pytest.raises(u.UnitsError) as e:
x, y = myfunc_args(1*x_unit, 100*u.Joule) # has to be an unspecified unit
str_to = str(y_target)
assert str(e.value) == f"Argument 'y' to function 'myfunc_args' must be in units convertible to '{str_to}'."
def test_not_quantity(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x, y):
return x, y
with pytest.raises(TypeError) as e:
x, y = myfunc_args(1*x_unit, 100)
assert str(e.value) == "Argument 'y' to function 'myfunc_args' has no 'unit' attribute. You may want to pass in an astropy Quantity instead."
def test_kwargs(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x, my_arg, y=1*y_unit):
return x, my_arg, y
x, my_arg, y = myfunc_args(1*x_unit, 100, y=100*y_unit)
assert isinstance(x, u.Quantity)
assert isinstance(my_arg, int)
assert isinstance(y, u.Quantity)
assert y.unit == y_unit
def test_unused_kwargs(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x, my_arg1, y=y_unit, my_arg2=1000):
return x, my_arg1, y, my_arg2
x, my_arg1, y, my_arg2 = myfunc_args(1*x_unit, 100,
y=100*y_unit, my_arg2=10)
assert isinstance(x, u.Quantity)
assert isinstance(my_arg1, int)
assert isinstance(y, u.Quantity)
assert isinstance(my_arg2, int)
assert y.unit == y_unit
assert my_arg2 == 10
def test_kwarg_wrong_unit(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x, y=10*y_unit):
return x, y
with pytest.raises(u.UnitsError) as e:
x, y = myfunc_args(1*x_unit, y=100*u.Joule)
str_to = str(y_target)
assert str(e.value) == f"Argument 'y' to function 'myfunc_args' must be in units convertible to '{str_to}'."
def test_kwarg_not_quantity(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x, y=10*y_unit):
return x, y
with pytest.raises(TypeError) as e:
x, y = myfunc_args(1*x_unit, y=100)
assert str(e.value) == "Argument 'y' to function 'myfunc_args' has no 'unit' attribute. You may want to pass in an astropy Quantity instead."
def test_kwarg_default(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x, y=10*y_unit):
return x, y
x, y = myfunc_args(1*x_unit)
assert isinstance(x, u.Quantity)
assert isinstance(y, u.Quantity)
assert x.unit == x_unit
assert y.unit == y_unit
def test_kwargs_input(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x=1*x_unit, y=1*y_unit):
return x, y
kwargs = {'x': 10*x_unit, 'y': 10*y_unit}
x, y = myfunc_args(**kwargs)
assert isinstance(x, u.Quantity)
assert isinstance(y, u.Quantity)
assert x.unit == x_unit
assert y.unit == y_unit
def test_kwargs_extra(x_input):
x_target, x_unit = x_input
@u.quantity_input(x=x_target)
def myfunc_args(x, **kwargs):
return x
x = myfunc_args(1*x_unit)
assert isinstance(x, u.Quantity)
assert x.unit == x_unit
# ---- Tests that don't used the fixtures ----
@pytest.mark.parametrize("x_unit,y_unit", [
(u.arcsec, u.eV),
('angle', 'energy')])
def test_arg_equivalencies(x_unit, y_unit):
@u.quantity_input(x=x_unit, y=y_unit,
equivalencies=u.mass_energy())
def myfunc_args(x, y):
return x, y+(10*u.J) # Add an energy to check equiv is working
x, y = myfunc_args(1*u.arcsec, 100*u.gram)
assert isinstance(x, u.Quantity)
assert isinstance(y, u.Quantity)
assert x.unit == u.arcsec
assert y.unit == u.gram
@pytest.mark.parametrize("x_unit,energy_unit", [
(u.arcsec, u.eV),
('angle', 'energy')])
def test_kwarg_equivalencies(x_unit, energy_unit):
@u.quantity_input(x=x_unit, energy=energy_unit, equivalencies=u.mass_energy())
def myfunc_args(x, energy=10*u.eV):
return x, energy+(10*u.J) # Add an energy to check equiv is working
x, energy = myfunc_args(1*u.arcsec, 100*u.gram)
assert isinstance(x, u.Quantity)
assert isinstance(energy, u.Quantity)
assert x.unit == u.arcsec
assert energy.unit == u.gram
def test_no_equivalent():
class test_unit:
pass
class test_quantity:
unit = test_unit()
@u.quantity_input(x=u.arcsec)
def myfunc_args(x):
return x
with pytest.raises(TypeError) as e:
x, y = myfunc_args(test_quantity())
assert str(e.value) == "Argument 'x' to function 'myfunc_args' has a 'unit' attribute without an 'is_equivalent' method. You may want to pass in an astropy Quantity instead."
def test_kwarg_invalid_physical_type():
@u.quantity_input(x='angle', y='africanswallow')
def myfunc_args(x, y=10*u.deg):
return x, y
with pytest.raises(ValueError) as e:
x, y = myfunc_args(1*u.arcsec, y=100*u.deg)
assert str(e.value) == "Invalid unit or physical type 'africanswallow'."
def test_default_value_check():
x_target = u.deg
x_unit = u.arcsec
with pytest.raises(TypeError):
@u.quantity_input(x=x_target)
def myfunc_args(x=1.):
return x
x = myfunc_args()
x = myfunc_args(1*x_unit)
assert isinstance(x, u.Quantity)
assert x.unit == x_unit
def test_str_unit_typo():
@u.quantity_input
def myfunc_args(x: "kilograam"):
return x
with pytest.raises(ValueError):
result = myfunc_args(u.kg)
def test_type_annotations():
@u.quantity_input
def myfunc_args(x: u.m, y: str):
return x, y
in_quantity = 2 * u.m
in_string = "cool string"
quantity, string = myfunc_args(in_quantity, in_string)
assert quantity == in_quantity
assert string == in_string
def test_args_None():
x_target = u.deg
x_unit = u.arcsec
y_target = u.km
y_unit = u.kpc
@u.quantity_input(x=[x_target, None], y=[None, y_target])
def myfunc_args(x, y):
return x, y
x, y = myfunc_args(1*x_unit, None)
assert isinstance(x, u.Quantity)
assert x.unit == x_unit
assert y is None
x, y = myfunc_args(None, 1*y_unit)
assert isinstance(y, u.Quantity)
assert y.unit == y_unit
assert x is None
def test_args_None_kwarg():
x_target = u.deg
x_unit = u.arcsec
y_target = u.km
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x, y=None):
return x, y
x, y = myfunc_args(1*x_unit)
assert isinstance(x, u.Quantity)
assert x.unit == x_unit
assert y is None
x, y = myfunc_args(1*x_unit, None)
assert isinstance(x, u.Quantity)
assert x.unit == x_unit
assert y is None
with pytest.raises(TypeError):
x, y = myfunc_args(None, None)
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Integration tests for ssh module.
"""
from __future__ import print_function
import gc
import gzip
import os
import random
import socket
import subprocess
from helpers import unittest
import luigi.format
from luigi.contrib.ssh import RemoteContext, RemoteTarget
working_ssh_host = None # set this to a working ssh host string (e.g. "localhost") to activate integration tests
# The following tests require a working ssh server at `working_ssh_host`
# the test runner can ssh into using password-less authentication
# since `nc` has different syntax on different platforms
# we use a short python command to start
# a 'hello'-server on the remote machine
HELLO_SERVER_CMD = """
import socket, sys
listener = socket.socket()
listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listener.bind(('localhost', 2134))
listener.listen(1)
sys.stdout.write('ready')
sys.stdout.flush()
conn = listener.accept()[0]
conn.sendall('hello')
"""
class TestRemoteContext(unittest.TestCase):
def setUp(self):
self.context = RemoteContext(working_ssh_host)
def test_check_output(self):
""" Test check_output ssh
Assumes the running user can ssh to working_ssh_host
"""
output = self.context.check_output(["echo", "-n", "luigi"])
self.assertEqual(output, "luigi")
def test_tunnel(self):
print("Setting up remote listener...")
remote_server_handle = self.context.Popen([
"python", "-c", '"{0}"'.format(HELLO_SERVER_CMD)
], stdout=subprocess.PIPE)
print("Setting up tunnel")
with self.context.tunnel(2135, 2134):
print("Tunnel up!")
# hack to make sure the listener process is up
# and running before we write to it
server_output = remote_server_handle.stdout.read(5)
self.assertEqual(server_output, "ready")
print("Connecting to server via tunnel")
s = socket.socket()
s.connect(("localhost", 2135))
print("Receiving...",)
response = s.recv(5)
self.assertEqual(response, "hello")
print("Closing connection")
s.close()
print("Waiting for listener...")
output, _ = remote_server_handle.communicate()
self.assertEqual(remote_server_handle.returncode, 0)
print("Closing tunnel")
class TestRemoteTarget(unittest.TestCase):
""" These tests assume RemoteContext working
in order for setUp and tearDown to work
"""
def setUp(self):
self.ctx = RemoteContext(working_ssh_host)
self.filepath = "/tmp/luigi_remote_test.dat"
self.target = RemoteTarget(
self.filepath,
working_ssh_host,
)
self.ctx.check_output(["rm", "-rf", self.filepath])
self.ctx.check_output(["echo -n 'hello' >", self.filepath])
def tearDown(self):
self.ctx.check_output(["rm", "-rf", self.filepath])
def test_exists(self):
self.assertTrue(self.target.exists())
no_file = RemoteTarget(
"/tmp/_file_that_doesnt_exist_",
working_ssh_host,
)
self.assertFalse(no_file.exists())
def test_remove(self):
self.target.remove()
self.assertRaises(
subprocess.CalledProcessError,
self.ctx.check_output,
["cat", self.filepath]
)
def test_open(self):
f = self.target.open('r')
file_content = f.read()
f.close()
self.assertEqual(file_content, "hello")
def test_context_manager(self):
with self.target.open('r') as f:
file_content = f.read()
self.assertEqual(file_content, "hello")
class TestRemoteTargetAtomicity(unittest.TestCase):
path = '/tmp/luigi_remote_atomic_test.txt'
ctx = RemoteContext(working_ssh_host)
def _exists(self, path):
try:
self.ctx.check_output(["test", "-e", path])
except subprocess.CalledProcessError as e:
if e.returncode == 1:
return False
else:
raise
return True
def setUp(self):
self.ctx.check_output(["rm", "-rf", self.path])
self.local_file = '/tmp/local_luigi_remote_atomic_test.txt'
if os.path.exists(self.local_file):
os.remove(self.local_file)
def tearDown(self):
self.ctx.check_output(["rm", "-rf", self.path])
if os.path.exists(self.local_file):
os.remove(self.local_file)
def test_close(self):
t = RemoteTarget(self.path, working_ssh_host)
p = t.open('w')
print('test', file=p)
self.assertFalse(self._exists(self.path))
p.close()
self.assertTrue(self._exists(self.path))
def test_del(self):
t = RemoteTarget(self.path, working_ssh_host)
p = t.open('w')
print('test', file=p)
tp = p.tmp_path
del p
self.assertFalse(self._exists(tp))
self.assertFalse(self._exists(self.path))
def test_write_cleanup_no_close(self):
t = RemoteTarget(self.path, working_ssh_host)
def context():
f = t.open('w')
f.write('stuff')
context()
gc.collect() # force garbage collection of f variable
self.assertFalse(t.exists())
def test_write_cleanup_with_error(self):
t = RemoteTarget(self.path, working_ssh_host)
try:
with t.open('w'):
raise Exception('something broke')
except:
pass
self.assertFalse(t.exists())
def test_write_with_success(self):
t = RemoteTarget(self.path, working_ssh_host)
with t.open('w') as p:
p.write("hello")
self.assertTrue(t.exists())
def test_gzip(self):
t = RemoteTarget(self.path, working_ssh_host, luigi.format.Gzip)
p = t.open('w')
test_data = 'test'
p.write(test_data)
self.assertFalse(self._exists(self.path))
p.close()
self.assertTrue(self._exists(self.path))
# Using gzip module as validation
cmd = 'scp -q %s:%s %s' % (working_ssh_host, self.path, self.local_file)
assert os.system(cmd) == 0
f = gzip.open(self.local_file, 'rb')
self.assertTrue(test_data == f.read())
f.close()
# Verifying our own gzip remote reader
f = RemoteTarget(self.path, working_ssh_host, luigi.format.Gzip).open('r')
self.assertTrue(test_data == f.read())
f.close()
def test_put(self):
f = open(self.local_file, 'w')
f.write('hello')
f.close()
t = RemoteTarget(self.path, working_ssh_host)
t.put(self.local_file)
self.assertTrue(self._exists(self.path))
def test_get(self):
self.ctx.check_output(["echo -n 'hello' >", self.path])
t = RemoteTarget(self.path, working_ssh_host)
t.get(self.local_file)
f = open(self.local_file, 'r')
file_content = f.read()
self.assertEqual(file_content, 'hello')
class TestRemoteTargetCreateDirectories(TestRemoteTargetAtomicity):
path = '/tmp/%s/xyz/luigi_remote_atomic_test.txt' % random.randint(0, 999999999)
class TestRemoteTargetRelative(TestRemoteTargetAtomicity):
path = 'luigi_remote_atomic_test.txt'
|
|
import sys
import codecs
from collections import namedtuple
import random
import bisect
from distutils.version import StrictVersion
try:
import ujson as json
except:
import json
from rdbtools.parser import RdbCallback
from rdbtools.encodehelpers import bytes_to_unicode
from heapq import heappush, nlargest, heappop
ZSKIPLIST_MAXLEVEL=32
ZSKIPLIST_P=0.25
REDIS_SHARED_INTEGERS = 10000
MemoryRecord = namedtuple('MemoryRecord', ['database', 'type', 'key', 'bytes', 'encoding','size', 'len_largest_element', 'expiry'])
class StatsAggregator(object):
def __init__(self, key_groupings = None):
self.aggregates = {}
self.scatters = {}
self.histograms = {}
self.metadata = {}
def next_record(self, record):
self.add_aggregate('database_memory', record.database, record.bytes)
self.add_aggregate('database_memory', 'all', record.bytes)
self.add_aggregate('type_memory', record.type, record.bytes)
self.add_aggregate('encoding_memory', record.encoding, record.bytes)
self.add_aggregate('type_count', record.type, 1)
self.add_aggregate('encoding_count', record.encoding, 1)
self.add_histogram(record.type + "_length", record.size)
self.add_histogram(record.type + "_memory", (record.bytes/10) * 10)
if record.type == 'list':
self.add_scatter('list_memory_by_length', record.bytes, record.size)
elif record.type == 'hash':
self.add_scatter('hash_memory_by_length', record.bytes, record.size)
elif record.type == 'set':
self.add_scatter('set_memory_by_length', record.bytes, record.size)
elif record.type == 'sortedset':
self.add_scatter('sortedset_memory_by_length', record.bytes, record.size)
elif record.type == 'string':
self.add_scatter('string_memory_by_length', record.bytes, record.size)
elif record.type in ['dict', 'module', 'stream']:
pass
else:
raise Exception('Invalid data type %s' % record.type)
def add_aggregate(self, heading, subheading, metric):
if not heading in self.aggregates :
self.aggregates[heading] = {}
if not subheading in self.aggregates[heading]:
self.aggregates[heading][subheading] = 0
self.aggregates[heading][subheading] += metric
def add_histogram(self, heading, metric):
if not heading in self.histograms:
self.histograms[heading] = {}
if not metric in self.histograms[heading]:
self.histograms[heading][metric] = 1
else :
self.histograms[heading][metric] += 1
def add_scatter(self, heading, x, y):
if not heading in self.scatters:
self.scatters[heading] = []
self.scatters[heading].append([x, y])
def set_metadata(self, key, val):
self.metadata[key] = val
def get_json(self):
return json.dumps({"aggregates": self.aggregates, "scatters": self.scatters, "histograms": self.histograms, "metadata": self.metadata})
class PrintAllKeys(object):
def __init__(self, out, bytes, largest):
self._bytes = bytes
self._largest = largest
self._out = out
headers = "%s,%s,%s,%s,%s,%s,%s,%s\n" % (
"database", "type", "key", "size_in_bytes", "encoding", "num_elements", "len_largest_element", "expiry")
self._out.write(codecs.encode(headers, 'latin-1'))
if self._largest is not None:
self._heap = []
def next_record(self, record) :
if record.key is None:
return # some records are not keys (e.g. dict)
if self._largest is None:
if self._bytes is None or record.bytes >= int(self._bytes):
rec_str = "%d,%s,%s,%d,%s,%d,%d,%s\n" % (
record.database, record.type, record.key, record.bytes, record.encoding, record.size,
record.len_largest_element,
record.expiry.isoformat() if record.expiry else '')
self._out.write(codecs.encode(rec_str, 'latin-1'))
else:
heappush(self._heap, (record.bytes, record))
def end_rdb(self):
if self._largest is not None:
self._heap = nlargest(int(self._largest), self._heap)
self._largest = None
while self._heap:
bytes, record = heappop(self._heap)
self.next_record(record)
class PrintJustKeys(object):
def __init__(self, out):
self._out = out
def next_record(self, record):
self._out.write(codecs.encode("%s\n" % record.key, 'latin-1'))
class MemoryCallback(RdbCallback):
'''Calculates the memory used if this rdb file were loaded into RAM
The memory usage is approximate, and based on heuristics.
'''
def __init__(self, stream, architecture, redis_version='5.0', string_escape=None):
super(MemoryCallback, self).__init__(string_escape)
self._stream = stream
self._dbnum = 0
self._current_size = 0
self._current_encoding = None
self._current_length = 0
self._len_largest_element = 0
self._key_expiry = None
self._db_keys = 0
self._db_expires = 0
self._aux_used_mem = None
self._aux_redis_ver = None
self._aux_redis_bits = None
self._redis_version = StrictVersion(redis_version)
self._total_internal_frag = 0
if architecture == 64 or architecture == '64':
self._pointer_size = 8
self._long_size = 8
self._architecture = 64
elif architecture == 32 or architecture == '32':
self._pointer_size = 4
self._long_size = 4
self._architecture = 32
def emit_record(self, record_type, key, byte_count, encoding, size, largest_el, expiry):
if key is not None:
key = bytes_to_unicode(key, self._escape, skip_printable=True)
record = MemoryRecord(self._dbnum, record_type, key, byte_count, encoding, size, largest_el, expiry)
self._stream.next_record(record)
def start_rdb(self):
pass
def aux_field(self, key, value):
if key == 'used-mem':
self._aux_used_mem = int(value)
if key == 'redis-ver':
self._aux_redis_ver = value
if key == 'redis-bits':
self._aux_redis_bits = int(value)
def start_database(self, db_number):
self._dbnum = db_number
self._db_keys = 0
self._db_expires = 0
def end_database(self, db_number):
self.emit_record("dict", None, self.hashtable_overhead(self._db_keys), None, None, None, None)
self.emit_record("dict", None, self.hashtable_overhead(self._db_expires), None, None, None, None)
if hasattr(self._stream, 'end_database'):
self._stream.end_database(db_number)
def end_rdb(self):
if hasattr(self._stream, 'end_rdb'):
self._stream.end_rdb()
if hasattr(self._stream, 'set_metadata'):
self._stream.set_metadata('used_mem', self._aux_used_mem)
self._stream.set_metadata('redis_ver', self._aux_redis_ver)
self._stream.set_metadata('redis_bits', self._aux_redis_bits)
self._stream.set_metadata('internal_frag', self._total_internal_frag)
def set(self, key, value, expiry, info):
self._current_encoding = info['encoding']
size = self.top_level_object_overhead(key, expiry) + self.sizeof_string(value)
length = self.element_length(value)
self.emit_record("string", key, size, self._current_encoding, length, length, expiry)
self.end_key()
def start_hash(self, key, length, expiry, info):
self._current_encoding = info['encoding']
self._current_length = length
self._key_expiry = expiry
size = self.top_level_object_overhead(key, expiry)
if 'sizeof_value' in info:
size += info['sizeof_value']
elif 'encoding' in info and info['encoding'] == 'hashtable':
size += self.hashtable_overhead(length)
else:
raise Exception('start_hash', 'Could not find encoding or sizeof_value in info object %s' % info)
self._current_size = size
def hset(self, key, field, value):
if(self.element_length(field) > self._len_largest_element) :
self._len_largest_element = self.element_length(field)
if(self.element_length(value) > self._len_largest_element) :
self._len_largest_element = self.element_length(value)
if self._current_encoding == 'hashtable':
self._current_size += self.sizeof_string(field)
self._current_size += self.sizeof_string(value)
self._current_size += self.hashtable_entry_overhead()
if self._redis_version < StrictVersion('4.0'):
self._current_size += 2*self.robj_overhead()
def end_hash(self, key):
self.emit_record("hash", key, self._current_size, self._current_encoding, self._current_length,
self._len_largest_element, self._key_expiry)
self.end_key()
def start_set(self, key, cardinality, expiry, info):
# A set is exactly like a hashmap
self.start_hash(key, cardinality, expiry, info)
def sadd(self, key, member):
if(self.element_length(member) > self._len_largest_element) :
self._len_largest_element = self.element_length(member)
if self._current_encoding == 'hashtable':
self._current_size += self.sizeof_string(member)
self._current_size += self.hashtable_entry_overhead()
if self._redis_version < StrictVersion('4.0'):
self._current_size += self.robj_overhead()
def end_set(self, key):
self.emit_record("set", key, self._current_size, self._current_encoding, self._current_length,
self._len_largest_element, self._key_expiry)
self.end_key()
def start_list(self, key, expiry, info):
self._current_length = 0
self._list_items_size = 0 # size of all elements in case list ends up using linked list
self._list_items_zipped_size = 0 # size of all elements in case of ziplist of quicklist
self._current_encoding = info['encoding']
size = self.top_level_object_overhead(key, expiry)
self._key_expiry = expiry
# ignore the encoding in the rdb, and predict the encoding that will be used at the target redis version
if self._redis_version >= StrictVersion('3.2'):
# default configuration of redis 3.2
self._current_encoding = "quicklist"
self._list_max_ziplist_size = 8192 # default is -2 which means 8k
self._list_compress_depth = 0 # currently we only support no compression which is the default
self._cur_zips = 1
self._cur_zip_size = 0
else:
# default configuration fo redis 2.8 -> 3.0
self._current_encoding = "ziplist"
self._list_max_ziplist_entries = 512
self._list_max_ziplist_value = 64
self._current_size = size
def rpush(self, key, value):
self._current_length += 1
# in linked list, when the robj has integer encoding, the value consumes no memory on top of the robj
size_in_list = self.sizeof_string(value) if not self.is_integer_type(value) else 0
# in ziplist and quicklist, this is the size of the value and the value header
size_in_zip = self.ziplist_entry_overhead(value)
if(self.element_length(value) > self._len_largest_element):
self._len_largest_element = self.element_length(value)
if self._current_encoding == "ziplist":
self._list_items_zipped_size += size_in_zip
if self._current_length > self._list_max_ziplist_entries or size_in_zip > self._list_max_ziplist_value:
self._current_encoding = "linkedlist"
elif self._current_encoding == "quicklist":
if self._cur_zip_size + size_in_zip > self._list_max_ziplist_size:
self._cur_zip_size = size_in_zip
self._cur_zips += 1
else:
self._cur_zip_size += size_in_zip
self._list_items_zipped_size += self.ziplist_entry_overhead(value)
self._list_items_size += size_in_list # not to be used in case of ziplist or quicklist
def end_list(self, key, info):
if self._current_encoding == 'quicklist':
self._current_size += self.quicklist_overhead(self._cur_zips)
self._current_size += self.ziplist_header_overhead() * self._cur_zips
self._current_size += self._list_items_zipped_size
elif self._current_encoding == 'ziplist':
self._current_size += self.ziplist_header_overhead()
self._current_size += self._list_items_zipped_size
else: # linkedlist
self._current_size += self.linkedlist_entry_overhead() * self._current_length
self._current_size += self.linkedlist_overhead()
if self._redis_version < StrictVersion('4.0'):
self._current_size += self.robj_overhead() * self._current_length
self._current_size += self._list_items_size
self.emit_record("list", key, self._current_size, self._current_encoding, self._current_length,
self._len_largest_element, self._key_expiry)
self.end_key()
def start_module(self, key, module_id, expiry, info):
self._key_expiry = expiry
self._current_encoding = module_id
self._current_size = 0
if key is not None:
self._current_size += self.top_level_object_overhead(key, expiry)
self._current_size += 8 + 1 # add the module id length and EOF byte
return False # don't build the full key buffer
def end_module(self, key, buffer_size, buffer=None):
size = self._current_size + buffer_size
self.emit_record("module", key, size, self._current_encoding, 1, size, self._key_expiry)
self.end_key()
def start_stream(self, key, listpacks_count, expiry, info):
self._key_expiry = expiry
self._current_encoding = info['encoding']
self._current_size = self.top_level_object_overhead(key, expiry)
self._current_size += self.sizeof_pointer()*2 + 8 + 16 # stream struct
self._current_size += self.sizeof_pointer() + 8*2 # rax struct
self._listpacks_count = listpacks_count
def stream_listpack(self, key, entry_id, data):
self._current_size += self.malloc_overhead(len(data))
if(len(data) > self._len_largest_element):
self._len_largest_element = len(data)
pass
def sizeof_stream_radix_tree(self, num_elements):
# This is a very rough estimation. The only alternative to doing an estimation,
# is to fully build a radix tree of similar design, and count the nodes.
# There should be at least as many nodes as there are elements in the radix tree (possibly up to 3 times)
num_nodes = int(num_elements * 2.5)
# formula for memory estimation copied from Redis's streamRadixTreeMemoryUsage
return 16*num_elements + num_nodes*4 + num_nodes*30*self.sizeof_long()
def end_stream(self, key, items, last_entry_id, cgroups):
# Now after we have some global key+value overheads, and all listpacks sizes,
# we need to add some estimations for radix tree and consumer groups.
# The logic for the memory estimation copied from Redis's MEMORY command.
radix_tree_size = self.sizeof_stream_radix_tree(self._listpacks_count)
cgroups_size = 0
for cg in cgroups:
cgroups_size += self.sizeof_pointer() * 2 + 16 # streamCG
pending = len(cg['pending'])
cgroups_size += self.sizeof_stream_radix_tree(pending)
cgroups_size += pending*(self.sizeof_pointer()+8+8) # streamNACK
for c in cg['consumers']:
cgroups_size += self.sizeof_pointer()*2 + 8 # streamConsumer
cgroups_size += self.sizeof_string(c['name'])
pending = len(c['pending'])
cgroups_size += self.sizeof_stream_radix_tree(pending)
size = self._current_size + radix_tree_size + cgroups_size
self._current_length = items
self.emit_record("stream", key, size, self._current_encoding, 1, self._len_largest_element, self._key_expiry)
self.end_key()
def start_sorted_set(self, key, length, expiry, info):
self._current_length = length
self._current_encoding = info['encoding']
size = self.top_level_object_overhead(key, expiry)
self._key_expiry = expiry
if 'sizeof_value' in info:
size += info['sizeof_value']
elif 'encoding' in info and info['encoding'] == 'skiplist':
size += self.skiplist_overhead(length)
else:
raise Exception('start_sorted_set', 'Could not find encoding or sizeof_value in info object %s' % info)
self._current_size = size
def zadd(self, key, score, member):
if(self.element_length(member) > self._len_largest_element):
self._len_largest_element = self.element_length(member)
if self._current_encoding == 'skiplist':
self._current_size += 8 # score (double)
self._current_size += self.sizeof_string(member)
if self._redis_version < StrictVersion('4.0'):
self._current_size += self.robj_overhead()
self._current_size += self.skiplist_entry_overhead()
def end_sorted_set(self, key):
self.emit_record("sortedset", key, self._current_size, self._current_encoding, self._current_length,
self._len_largest_element, self._key_expiry)
self.end_key()
def end_key(self):
self._db_keys += 1
self._current_encoding = None
self._current_size = 0
self._len_largest_element = 0
self._key_expiry = None
def sizeof_string(self, string):
# https://github.com/antirez/redis/blob/unstable/src/sds.h
try:
num = int(string)
if num < REDIS_SHARED_INTEGERS :
return 0
else :
return 0 # the integer is part of the robj, no extra memory
except ValueError:
pass
l = len(string)
if self._redis_version < StrictVersion('3.2'):
return self.malloc_overhead(l + 8 + 1)
if l < 2**5:
return self.malloc_overhead(l + 1 + 1)
if l < 2**8:
return self.malloc_overhead(l + 1 + 2 + 1)
if l < 2**16:
return self.malloc_overhead(l + 1 + 4 + 1)
if l < 2**32:
return self.malloc_overhead(l + 1 + 8 + 1)
return self.malloc_overhead(l + 1 + 16 + 1)
def top_level_object_overhead(self, key, expiry):
# Each top level object is an entry in a dictionary, and so we have to include
# the overhead of a dictionary entry
return self.hashtable_entry_overhead() + self.sizeof_string(key) + self.robj_overhead() + self.key_expiry_overhead(expiry)
def key_expiry_overhead(self, expiry):
# If there is no expiry, there isn't any overhead
if not expiry:
return 0
self._db_expires += 1
# Key expiry is stored in a hashtable, so we have to pay for the cost of a hashtable entry
# The timestamp itself is stored as an int64, which is a 8 bytes
return self.hashtable_entry_overhead() + 8
def hashtable_overhead(self, size):
# See https://github.com/antirez/redis/blob/unstable/src/dict.h
# See the structures dict and dictht
# 2 * (3 unsigned longs + 1 pointer) + int + long + 2 pointers
#
# Additionally, see **table in dictht
# The length of the table is the next power of 2
# When the hashtable is rehashing, another instance of **table is created
# Due to the possibility of rehashing during loading, we calculate the worse
# case in which both tables are allocated, and so multiply
# the size of **table by 1.5
return 4 + 7*self.sizeof_long() + 4*self.sizeof_pointer() + self.next_power(size)*self.sizeof_pointer()*1.5
def hashtable_entry_overhead(self):
# See https://github.com/antirez/redis/blob/unstable/src/dict.h
# Each dictEntry has 2 pointers + int64
return 2*self.sizeof_pointer() + 8
def linkedlist_overhead(self):
# See https://github.com/antirez/redis/blob/unstable/src/adlist.h
# A list has 5 pointers + an unsigned long
return self.sizeof_long() + 5*self.sizeof_pointer()
def quicklist_overhead(self, zip_count):
quicklist = 2*self.sizeof_pointer()+self.sizeof_long()+2*4
quickitem = 4*self.sizeof_pointer()+self.sizeof_long()+2*4
return quicklist + zip_count*quickitem
def linkedlist_entry_overhead(self):
# See https://github.com/antirez/redis/blob/unstable/src/adlist.h
# A node has 3 pointers
return 3*self.sizeof_pointer()
def ziplist_header_overhead(self):
# See https://github.com/antirez/redis/blob/unstable/src/ziplist.c
# <zlbytes><zltail><zllen><entry><entry><zlend>
return 4 + 4 + 2 + 1
def ziplist_entry_overhead(self, value):
# See https://github.com/antirez/redis/blob/unstable/src/ziplist.c
if self.is_integer_type(value):
header = 1
if value < 12:
size = 0
elif value < 2**8:
size = 1
elif value < 2**16:
size = 2
elif value < 2**24:
size = 3
elif value < 2**32:
size = 4
else:
size = 8
else:
size = len(value)
if size <= 63:
header = 1
elif size <= 16383:
header = 2
else:
header = 5
# add len again for prev_len of the next record
prev_len = 1 if size < 254 else 5
return prev_len + header + size
def skiplist_overhead(self, size):
return 2*self.sizeof_pointer() + self.hashtable_overhead(size) + (2*self.sizeof_pointer() + 16)
def skiplist_entry_overhead(self):
return self.hashtable_entry_overhead() + 2*self.sizeof_pointer() + 8 + (self.sizeof_pointer() + 8) * self.zset_random_level()
def robj_overhead(self):
return self.sizeof_pointer() + 8
def malloc_overhead(self, size):
alloc = get_jemalloc_allocation(size)
self._total_internal_frag += alloc - size
return alloc
def size_t(self):
return self.sizeof_pointer()
def sizeof_pointer(self):
return self._pointer_size
def sizeof_long(self):
return self._long_size
def next_power(self, size):
power = 1
while (power <= size) :
power = power << 1
return power
def zset_random_level(self):
level = 1
rint = random.randint(0, 0xFFFF)
while (rint < ZSKIPLIST_P * 0xFFFF):
level += 1
rint = random.randint(0, 0xFFFF)
if level < ZSKIPLIST_MAXLEVEL :
return level
else:
return ZSKIPLIST_MAXLEVEL
def is_integer_type(self, ob):
if isinstance(ob, int):
return True
if sys.version_info < (3,):
if isinstance(ob, long):
return True
return False
def element_length(self, element):
if self.is_integer_type(element):
return self._long_size
return len(element)
# size classes from jemalloc 4.0.4 using LG_QUANTUM=3
jemalloc_size_classes = [
8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 384, 448, 512, 640, 768, 896, 1024,
1280, 1536, 1792, 2048, 2560, 3072, 3584, 4096, 5120, 6144, 7168, 8192, 10240, 12288, 14336, 16384, 20480, 24576,
28672, 32768, 40960, 49152, 57344, 65536, 81920, 98304, 114688,131072, 163840, 196608, 229376, 262144, 327680,
393216, 458752, 524288, 655360, 786432, 917504, 1048576, 1310720, 1572864, 1835008, 2097152, 2621440, 3145728,
3670016, 4194304, 5242880, 6291456, 7340032, 8388608, 10485760, 12582912, 14680064, 16777216, 20971520, 25165824,
29360128, 33554432, 41943040, 50331648, 58720256, 67108864, 83886080, 100663296, 117440512, 134217728, 167772160,
201326592, 234881024, 268435456, 335544320, 402653184, 469762048, 536870912, 671088640, 805306368, 939524096,
1073741824, 1342177280, 1610612736, 1879048192, 2147483648, 2684354560, 3221225472, 3758096384, 4294967296,
5368709120, 6442450944, 7516192768, 8589934592, 10737418240, 12884901888, 15032385536, 17179869184, 21474836480,
25769803776, 30064771072, 34359738368, 42949672960, 51539607552, 60129542144, 68719476736, 85899345920,
103079215104, 120259084288, 137438953472, 171798691840, 206158430208, 240518168576, 274877906944, 343597383680,
412316860416, 481036337152, 549755813888, 687194767360, 824633720832, 962072674304, 1099511627776,1374389534720,
1649267441664, 1924145348608, 2199023255552, 2748779069440, 3298534883328, 3848290697216, 4398046511104,
5497558138880, 6597069766656, 7696581394432, 8796093022208, 10995116277760, 13194139533312, 15393162788864,
17592186044416, 21990232555520, 26388279066624, 30786325577728, 35184372088832, 43980465111040, 52776558133248,
61572651155456, 70368744177664, 87960930222080, 105553116266496, 123145302310912, 140737488355328, 175921860444160,
211106232532992, 246290604621824, 281474976710656, 351843720888320, 422212465065984, 492581209243648,
562949953421312, 703687441776640, 844424930131968, 985162418487296, 1125899906842624, 1407374883553280,
1688849860263936, 1970324836974592, 2251799813685248, 2814749767106560, 3377699720527872, 3940649673949184,
4503599627370496, 5629499534213120, 6755399441055744, 7881299347898368, 9007199254740992, 11258999068426240,
13510798882111488, 15762598695796736, 18014398509481984, 22517998136852480, 27021597764222976,31525197391593472,
36028797018963968, 45035996273704960, 54043195528445952, 63050394783186944, 72057594037927936, 90071992547409920,
108086391056891904, 126100789566373888, 144115188075855872, 180143985094819840, 216172782113783808,
252201579132747776, 288230376151711744, 360287970189639680, 432345564227567616, 504403158265495552,
576460752303423488, 720575940379279360, 864691128455135232, 1008806316530991104, 1152921504606846976,
1441151880758558720, 1729382256910270464, 2017612633061982208, 2305843009213693952, 2882303761517117440,
3458764513820540928, 4035225266123964416, 4611686018427387904, 5764607523034234880, 6917529027641081856,
8070450532247928832, 9223372036854775808, 11529215046068469760, 13835058055282163712, 16140901064495857664
] # TODO: use different table depending oon the redis-version used
def get_jemalloc_allocation(size):
idx = bisect.bisect_left(jemalloc_size_classes, size)
alloc = jemalloc_size_classes[idx] if idx < len(jemalloc_size_classes) else size
return alloc
|
|
# Licensed under a 3-clause BSD style license - see LICENSES
from copy import deepcopy
from os.path import dirname, join
import numpy as np
import pytest
from astropy.table import Table
from numpy.random import RandomState
from numpy.testing import assert_allclose
import sncosmo
try:
import iminuit
HAS_IMINUIT = True
except ImportError:
HAS_IMINUIT = False
try:
import nestle
HAS_NESTLE = True
except ImportError:
HAS_NESTLE = False
try:
import emcee
HAS_EMCEE = True
except ImportError:
HAS_EMCEE = False
class TestFitting:
def setup_class(self):
model = sncosmo.Model(source='hsiao-subsampled')
params = {'t0': 56000., 'amplitude': 1.e-7, 'z': 0.2}
# generate fake data with no errors
points_per_band = 12
bands = points_per_band * ['bessellux', 'bessellb', 'bessellr',
'besselli']
times = params['t0'] + np.linspace(-10., 60., len(bands))
zp = len(bands) * [25.]
zpsys = len(bands) * ['ab']
model.set(**params)
flux = model.bandflux(bands, times, zp=zp, zpsys=zpsys)
fluxerr = len(bands) * [0.1 * np.max(flux)]
data = Table({
'time': times,
'band': bands,
'flux': flux,
'fluxerr': fluxerr,
'zp': zp,
'zpsys': zpsys
})
# reset parameters
model.set(z=0., t0=0., amplitude=1.)
self.model = model
self.data = data
self.params = params
def _test_mutation(self, fit_func):
"""Test a fitting function does not mutate arguments"""
# Some fitting functions require bounds for all varied parameters
bounds = {}
for param, param_val in self.params.items():
bounds[param] = (param_val * .95, param_val * 1.05)
# Preserve original input data
vparams = list(self.params.keys())
test_data = deepcopy(self.data)
test_model = deepcopy(self.model)
test_bounds = deepcopy(bounds)
test_vparams = deepcopy(vparams)
# Check for argument mutation
fit_func(test_data, test_model, test_vparams, bounds=test_bounds)
param_preserved = all(a == b for a, b in zip(vparams, test_vparams))
model_preserved = all(
a == b for a, b in
zip(self.model.parameters, test_model.parameters)
)
err_msg = '``{}`` argument was mutated'
assert all(self.data == test_data), err_msg.format('data')
assert bounds == test_bounds, err_msg.format('bounds')
assert param_preserved, err_msg.format('vparam_names')
assert model_preserved, err_msg.format('model')
@pytest.mark.skipif('not HAS_IMINUIT')
def test_fitlc_arg_mutation(self):
"""Test ``fit_lc`` does not mutate it's arguments"""
self._test_mutation(sncosmo.fit_lc)
@pytest.mark.skipif('not HAS_NESTLE')
def test_nestlc_arg_mutation(self):
"""Test ``nest_lc`` does not mutate it's arguments"""
self._test_mutation(sncosmo.nest_lc)
@pytest.mark.skipif('not HAS_EMCEE')
def test_mcmclc_arg_mutation(self):
"""Test ``mcmc_lc`` does not mutate it's arguments"""
self._test_mutation(sncosmo.mcmc_lc)
@pytest.mark.skipif('not HAS_IMINUIT')
def test_fit_lc(self):
"""Ensure that fit results match input model parameters (data are
noise-free).
Pass in parameter names in order different from that stored in
model; tests parameter re-ordering."""
res, fitmodel = sncosmo.fit_lc(self.data, self.model,
['amplitude', 'z', 't0'],
bounds={'z': (0., 1.0)})
# set model to true parameters and compare to fit results.
self.model.set(**self.params)
assert_allclose(res.parameters, self.model.parameters, rtol=1.e-3)
@pytest.mark.skipif('not HAS_IMINUIT')
def test_wrong_param_names(self):
"""Supplying parameter names that are not part of the model should
raise an error."""
# a parameter not in the model
with pytest.raises(ValueError):
res, fitmodel = sncosmo.fit_lc(self.data, self.model,
['t0', 'not_a_param'])
# no parameters
with pytest.raises(ValueError):
res, fitmodel = sncosmo.fit_lc(self.data, self.model, [])
@pytest.mark.skipif('not HAS_NESTLE')
def test_nest_lc(self):
"""Ensure that nested sampling runs.
Pass in parameter names in order different from that stored in
model; tests parameter re-ordering.
"""
rstate = RandomState(0)
self.model.set(**self.params)
res, fitmodel = sncosmo.nest_lc(
self.data, self.model, ['amplitude', 'z', 't0'],
bounds={'z': (0., 1.0)}, guess_amplitude_bound=True, npoints=50,
rstate=rstate)
assert_allclose(fitmodel.parameters, self.model.parameters, rtol=0.05)
@pytest.mark.might_download
@pytest.mark.skipif('not HAS_IMINUIT')
def test_fit_lc_vs_snfit():
"""Test fit_lc versus snfit result for one SN."""
# purposefully use CCM dust to match snfit
model = sncosmo.Model(source='salt2',
effects=[sncosmo.CCM89Dust()],
effect_names=['mw'],
effect_frames=['obs'])
fname = join(dirname(__file__), "data", "lc-03D4ag.list")
data = sncosmo.read_lc(fname, format='salt2', read_covmat=True,
expand_bands=True)
model.set(mwebv=data.meta['MWEBV'], z=data.meta['Z_HELIO'])
result, fitted_model = sncosmo.fit_lc(
data, model, ['t0', 'x0', 'x1', 'c'],
bounds={'x1': (-3., 3.), 'c': (-0.4, 0.4)},
modelcov=True,
phase_range=(-15., 45.),
wave_range=(3000., 7000.),
warn=False,
verbose=False)
print(result)
assert result.ndof == 25
assert result.nfit == 3
assert_allclose(fitted_model['t0'], 52830.9313, atol=0.01, rtol=0.)
assert_allclose(fitted_model['x0'], 5.6578663e-05, atol=0., rtol=0.005)
assert_allclose(fitted_model['x1'], 0.937399344, atol=0.005, rtol=0.)
assert_allclose(fitted_model['c'], -0.0851965244, atol=0.001, rtol=0.)
# errors
assert_allclose(result.errors['t0'], 0.0955792638, atol=0., rtol=0.01)
assert_allclose(result.errors['x0'], 1.52745001e-06, atol=0., rtol=0.01)
assert_allclose(result.errors['x1'], 0.104657847, atol=0., rtol=0.01)
assert_allclose(result.errors['c'], 0.0234763446, atol=0., rtol=0.01)
|
|
#!/usr/bin/env python
# Copyright (c) 2015, Robot Control and Pattern Recognition Group,
# Institute of Control and Computation Engineering
# Warsaw University of Technology
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Warsaw University of Technology nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYright HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author: Dawid Seredynski
#
import roslib
roslib.load_manifest('velma_scripts')
import rospy
import tf
from std_msgs.msg import *
from sensor_msgs.msg import *
from geometry_msgs.msg import *
from visualization_msgs.msg import *
import tf
from tf import *
from tf.transformations import *
import tf_conversions.posemath as pm
from tf2_msgs.msg import *
import PyKDL
import math
import numpy as np
import copy
import matplotlib.pyplot as plt
import thread
import random
import openravepy
from openravepy import *
from optparse import OptionParser
from openravepy.misc import OpenRAVEGlobalArguments
import itertools
import operator
import rospkg
import velmautils
import openraveinstance
class TestOrURDF:
"""
"""
def KDLToOpenrave(self, T):
ret = numpy.array([
[T.M[0,0], T.M[0,1], T.M[0,2], T.p.x()],
[T.M[1,0], T.M[1,1], T.M[1,2], T.p.y()],
[T.M[2,0], T.M[2,1], T.M[2,2], T.p.z()],
[0, 0, 0, 1]])
return ret
def __init__(self):
self.pub_marker = velmautils.MarkerPublisher()
def spin(self):
#
# Initialise Openrave
#
# parser = OptionParser(description='Openrave Velma interface')
# OpenRAVEGlobalArguments.addOptions(parser)
# (options, leftargs) = parser.parse_args()
# options._collision = "fcl"
# env = OpenRAVEGlobalArguments.parseAndCreate(options,defaultviewer=True)
# xacro_uri = "package://velma_description/robots/velma.urdf.xacro"
# srdf_uri = "package://velma_description/robots/velma.srdf"
rospack = rospkg.RosPack()
openrave = openraveinstance.OpenraveInstance()
openrave.startOpenraveURDF()#env_file=rospack.get_path('velma_scripts')+"/data/romoco/romoco.env.xml")#, collision='fcl')
openrave.readRobot(xacro_uri=rospack.get_path('velma_description') + '/robots/velma.urdf.xacro', srdf_uri=rospack.get_path('velma_description') + '/robots/velma.srdf')
# openrave.startOpenrave(rospack.get_path('velma_scripts')+"/data/romoco/romoco_robot.env.xml")
# print "geometry group:", openrave.env.GetCollisionChecker().GetGeometryGroup()
# openrave.env.GetCollisionChecker().SetGeometryGroup("spheres")
# print "geometry group:", openrave.env.GetCollisionChecker().GetGeometryGroup()
# urdf_module = RaveCreateModule(env, 'urdf')
# name = urdf_module.SendCommand('load ' + urdf_uri + ' ' + srdf_uri)
# robot_rave = env.GetRobot(name)
# for man in robot_rave.GetManipulators():
# print "manipulator", man
# print "gripper", man.GetEndEffector()
# for joint in openrave.robot_rave.GetJoints():
# print joint
# TEST: wrist joints collision
if False:
tab2=[
[-0.397855401039,-2.90307354927],
[2.12894010544,-2.90307354927],
[2.12043237686,-1.87363839149],
[1.92475450039,-1.43123674393],
[0.77621114254,-1.39720571041],
[0.350824713707,-1.00585031509],
[0.401871085167,-0.571956157684],
[0.810242056847,0.414940297604],
[1.34622907639,0.942419290543],
[2.11192464828,1.01898884773],
[2.12894010544,2.8906891346],
[-0.814733862877,2.8906891346],
[-1.22310483456,2.27813267708],
[-2.21850919724,2.29514837265],
[-2.22701668739,-1.32063627243],
[-1.81013822556,-1.66945314407],
[-0.814733862877,-1.73751521111],
[-0.423378348351,-2.09483933449],
]
m_id = 0
for pt in tab2:
m_id = self.pub_marker.publishSinglePointMarker(PyKDL.Vector(pt[0],pt[1],0.1), m_id, r=0, g=0, b=1, namespace='default', frame_id='torso_base', m_type=Marker.CUBE, scale=Vector3(0.1, 0.1, 0.1), T=None)
lim5_lo, lim5_up = openrave.robot_rave.GetJoint("right_arm_5_joint").GetLimits()
lim6_lo, lim6_up = openrave.robot_rave.GetJoint("right_arm_6_joint").GetLimits()
for q5 in np.linspace(lim5_lo[0], lim5_up[0], 20):
for q6 in np.linspace(lim6_lo[0], lim6_up[0], 20):
conf = {
"right_arm_5_joint":q5,
"right_arm_6_joint":q6,
}
openrave.updateRobotConfigurationRos(conf)
openrave.env.UpdatePublishedBodies()
report = CollisionReport()
if openrave.robot_rave.CheckSelfCollision(report):
m_id = self.pub_marker.publishSinglePointMarker(PyKDL.Vector(q5,q6,0), m_id, r=1, g=0, b=0, namespace='default', frame_id='torso_base', m_type=Marker.CUBE, scale=Vector3(0.1, 0.1, 0.1), T=None)
else:
m_id = self.pub_marker.publishSinglePointMarker(PyKDL.Vector(q5,q6,0), m_id, r=0, g=1, b=0, namespace='default', frame_id='torso_base', m_type=Marker.CUBE, scale=Vector3(0.1, 0.1, 0.1), T=None)
rospy.sleep(0.01)
raw_input(".")
exit(0)
if False:
for link in openrave.robot_rave.GetLinks():
geoms = link.GetGeometries()
print "geoms:", len(geoms)
col_geoms = link.GetGroupNumGeometries("collision")
print "col_geoms:", col_geoms
vis_geoms = link.GetGroupNumGeometries("visual")
print "vis_geoms:", vis_geoms
print link, link.GetCollisionData()
for g in geoms:
info = g.GetInfo()
print " geom", g.GetType()
print " mesh_collision", info._meshcollision
if len(info._meshcollision.vertices) > 0:
x = info._meshcollision.vertices[0][0]
y = info._meshcollision.vertices[0][1]
z = info._meshcollision.vertices[0][2]
print " mesh_collision", math.sqrt(x*x+y*y+z*z)
print " modifable", info._bModifiable
print " render", info._filenamerender
print " coll", info._filenamecollision
# TEST: collision
if False:
sphere = RaveCreateKinBody(openrave.env,'')
sphere.SetName("sphere")
sphere.InitFromSpheres(numpy.array([[0,0,0,0.1]]),True)
openrave.env.Add(sphere,True)
x = -0.3
while True:
tr = self.KDLToOpenrave(PyKDL.Frame(PyKDL.Vector(x,0.8,1.9)))
sphere.SetTransform(tr)
openrave.env.UpdatePublishedBodies()
report = CollisionReport()
ret = openrave.env.CheckCollision(sphere, report)
if report.plink1 == None:
print None
else:
print report.plink1.GetParent().GetName(), report.plink2.GetName()
# print " ", report.vLinkColliding
for link1, link2 in report.vLinkColliding:
print " ", link1.GetParent().GetName(), link2.GetName()
# print report.plink1.GetParent().GetName(), report.plink2.GetParent().GetName()
raw_input(".")
x += 0.005
exit(0)
# CollisionOptions:
# CO_Distance = 1, ///< Compute distance measurements, this is usually slow and not all checkers support it.
# CO_UseTolerance = 2, ///< not used
# CO_Contacts = 4, ///< Return the contact points of the collision in the \ref CollisionReport. Note that this takes longer to compute.
# CO_RayAnyHit = 8, ///< When performing collision with rays, if this is set, algorithm just returns any hit instead of the closest (can be faster)
# Allows planners to greatly reduce redundant collision checks.
# If set and the target object is a robot, then only the links controlled by the currently set active DOFs and their attached bodies will be checked for collisions.
# The things that **will not be** checked for collision are:
# - links that do not remove with respect to each other as a result of moving the active dofs.
# CO_ActiveDOFs = 0x10,
# CO_AllLinkCollisions = 0x20, ///< if set then all the link collisions will be returned inside CollisionReport::vLinkColliding. Collision is slower because more pairs have to be checked.
# CO_AllGeometryContacts = 0x40, ///< if set, then will return the contacts of all the colliding geometries. This option can be very slow.
if True:
openrave.env.GetCollisionChecker().SetCollisionOptions(4)
box = RaveCreateKinBody(openrave.env,'')
box.SetName("box")
box.InitFromBoxes(numpy.array([[0,0,0,0.1,0.1,0.1]]),True)
openrave.env.Add(box,True)
sphere = RaveCreateKinBody(openrave.env,'')
sphere.SetName("sphere")
sphere.InitFromSpheres(numpy.array([[0,0,0,0.1]]),True)
openrave.env.Add(sphere,True)
transforms = [
self.KDLToOpenrave(PyKDL.Frame(PyKDL.Vector(0,0,0.198))),
self.KDLToOpenrave(PyKDL.Frame(PyKDL.Vector(0,0,-0.198))),
self.KDLToOpenrave(PyKDL.Frame(PyKDL.Vector(0,0.198,0))),
self.KDLToOpenrave(PyKDL.Frame(PyKDL.Vector(0,-0.198,0))),
self.KDLToOpenrave(PyKDL.Frame(PyKDL.Vector(0.198,0,0))),
self.KDLToOpenrave(PyKDL.Frame(PyKDL.Vector(-0.198,0,0)))
]
for tr in transforms:
print "transform", tr
sphere.SetTransform(tr)
openrave.env.UpdatePublishedBodies()
report = CollisionReport()
ret = openrave.env.CheckCollision(box, report)
print report.plink1.GetParent().GetName(), report.plink2.GetParent().GetName()
print report.contacts[0]
ret = openrave.env.CheckCollision(sphere, report)
# print ret
# print report
print report.plink1.GetParent().GetName(), report.plink2.GetParent().GetName()
print report.contacts[0]
raw_input(".")
exit(0)
if __name__ == '__main__':
rospy.init_node('test_or_urdf')
task = TestOrURDF()
rospy.sleep(1)
task.spin()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test suites for 'common' code used throughout the OpenStack HTTP API.
"""
from lxml import etree
import webob
import webob.exc
import xml.dom.minidom as minidom
from nova import exception
from nova import test
from nova.api.openstack import common
from nova.api.openstack import xmlutil
NS = "{http://docs.openstack.org/compute/api/v1.1}"
ATOMNS = "{http://www.w3.org/2005/Atom}"
class LimiterTest(test.TestCase):
"""
Unit tests for the `nova.api.openstack.common.limited` method which takes
in a list of items and, depending on the 'offset' and 'limit' GET params,
returns a subset or complete set of the given items.
"""
def setUp(self):
""" Run before each test. """
super(LimiterTest, self).setUp()
self.tiny = range(1)
self.small = range(10)
self.medium = range(1000)
self.large = range(10000)
def test_limiter_offset_zero(self):
""" Test offset key works with 0. """
req = webob.Request.blank('/?offset=0')
self.assertEqual(common.limited(self.tiny, req), self.tiny)
self.assertEqual(common.limited(self.small, req), self.small)
self.assertEqual(common.limited(self.medium, req), self.medium)
self.assertEqual(common.limited(self.large, req), self.large[:1000])
def test_limiter_offset_medium(self):
""" Test offset key works with a medium sized number. """
req = webob.Request.blank('/?offset=10')
self.assertEqual(common.limited(self.tiny, req), [])
self.assertEqual(common.limited(self.small, req), self.small[10:])
self.assertEqual(common.limited(self.medium, req), self.medium[10:])
self.assertEqual(common.limited(self.large, req), self.large[10:1010])
def test_limiter_offset_over_max(self):
""" Test offset key works with a number over 1000 (max_limit). """
req = webob.Request.blank('/?offset=1001')
self.assertEqual(common.limited(self.tiny, req), [])
self.assertEqual(common.limited(self.small, req), [])
self.assertEqual(common.limited(self.medium, req), [])
self.assertEqual(
common.limited(self.large, req), self.large[1001:2001])
def test_limiter_offset_blank(self):
""" Test offset key works with a blank offset. """
req = webob.Request.blank('/?offset=')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
def test_limiter_offset_bad(self):
""" Test offset key works with a BAD offset. """
req = webob.Request.blank(u'/?offset=\u0020aa')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
def test_limiter_nothing(self):
""" Test request with no offset or limit """
req = webob.Request.blank('/')
self.assertEqual(common.limited(self.tiny, req), self.tiny)
self.assertEqual(common.limited(self.small, req), self.small)
self.assertEqual(common.limited(self.medium, req), self.medium)
self.assertEqual(common.limited(self.large, req), self.large[:1000])
def test_limiter_limit_zero(self):
""" Test limit of zero. """
req = webob.Request.blank('/?limit=0')
self.assertEqual(common.limited(self.tiny, req), self.tiny)
self.assertEqual(common.limited(self.small, req), self.small)
self.assertEqual(common.limited(self.medium, req), self.medium)
self.assertEqual(common.limited(self.large, req), self.large[:1000])
def test_limiter_limit_medium(self):
""" Test limit of 10. """
req = webob.Request.blank('/?limit=10')
self.assertEqual(common.limited(self.tiny, req), self.tiny)
self.assertEqual(common.limited(self.small, req), self.small)
self.assertEqual(common.limited(self.medium, req), self.medium[:10])
self.assertEqual(common.limited(self.large, req), self.large[:10])
def test_limiter_limit_over_max(self):
""" Test limit of 3000. """
req = webob.Request.blank('/?limit=3000')
self.assertEqual(common.limited(self.tiny, req), self.tiny)
self.assertEqual(common.limited(self.small, req), self.small)
self.assertEqual(common.limited(self.medium, req), self.medium)
self.assertEqual(common.limited(self.large, req), self.large[:1000])
def test_limiter_limit_and_offset(self):
""" Test request with both limit and offset. """
items = range(2000)
req = webob.Request.blank('/?offset=1&limit=3')
self.assertEqual(common.limited(items, req), items[1:4])
req = webob.Request.blank('/?offset=3&limit=0')
self.assertEqual(common.limited(items, req), items[3:1003])
req = webob.Request.blank('/?offset=3&limit=1500')
self.assertEqual(common.limited(items, req), items[3:1003])
req = webob.Request.blank('/?offset=3000&limit=10')
self.assertEqual(common.limited(items, req), [])
def test_limiter_custom_max_limit(self):
""" Test a max_limit other than 1000. """
items = range(2000)
req = webob.Request.blank('/?offset=1&limit=3')
self.assertEqual(
common.limited(items, req, max_limit=2000), items[1:4])
req = webob.Request.blank('/?offset=3&limit=0')
self.assertEqual(
common.limited(items, req, max_limit=2000), items[3:])
req = webob.Request.blank('/?offset=3&limit=2500')
self.assertEqual(
common.limited(items, req, max_limit=2000), items[3:])
req = webob.Request.blank('/?offset=3000&limit=10')
self.assertEqual(common.limited(items, req, max_limit=2000), [])
def test_limiter_negative_limit(self):
""" Test a negative limit. """
req = webob.Request.blank('/?limit=-3000')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
def test_limiter_negative_offset(self):
""" Test a negative offset. """
req = webob.Request.blank('/?offset=-30')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
class PaginationParamsTest(test.TestCase):
"""
Unit tests for the `nova.api.openstack.common.get_pagination_params`
method which takes in a request object and returns 'marker' and 'limit'
GET params.
"""
def test_no_params(self):
""" Test no params. """
req = webob.Request.blank('/')
self.assertEqual(common.get_pagination_params(req), {})
def test_valid_marker(self):
""" Test valid marker param. """
req = webob.Request.blank(
'/?marker=263abb28-1de6-412f-b00b-f0ee0c4333c2')
self.assertEqual(common.get_pagination_params(req),
{'marker': '263abb28-1de6-412f-b00b-f0ee0c4333c2'})
def test_valid_limit(self):
""" Test valid limit param. """
req = webob.Request.blank('/?limit=10')
self.assertEqual(common.get_pagination_params(req), {'limit': 10})
def test_invalid_limit(self):
""" Test invalid limit param. """
req = webob.Request.blank('/?limit=-2')
self.assertRaises(
webob.exc.HTTPBadRequest, common.get_pagination_params, req)
def test_valid_limit_and_marker(self):
""" Test valid limit and marker parameters. """
marker = '263abb28-1de6-412f-b00b-f0ee0c4333c2'
req = webob.Request.blank('/?limit=20&marker=%s' % marker)
self.assertEqual(common.get_pagination_params(req),
{'marker': marker, 'limit': 20})
class MiscFunctionsTest(test.TestCase):
def test_remove_major_version_from_href(self):
fixture = 'http://www.testsite.com/v1/images'
expected = 'http://www.testsite.com/images'
actual = common.remove_version_from_href(fixture)
self.assertEqual(actual, expected)
def test_remove_version_from_href(self):
fixture = 'http://www.testsite.com/v1.1/images'
expected = 'http://www.testsite.com/images'
actual = common.remove_version_from_href(fixture)
self.assertEqual(actual, expected)
def test_remove_version_from_href_2(self):
fixture = 'http://www.testsite.com/v1.1/'
expected = 'http://www.testsite.com/'
actual = common.remove_version_from_href(fixture)
self.assertEqual(actual, expected)
def test_remove_version_from_href_3(self):
fixture = 'http://www.testsite.com/v10.10'
expected = 'http://www.testsite.com'
actual = common.remove_version_from_href(fixture)
self.assertEqual(actual, expected)
def test_remove_version_from_href_4(self):
fixture = 'http://www.testsite.com/v1.1/images/v10.5'
expected = 'http://www.testsite.com/images/v10.5'
actual = common.remove_version_from_href(fixture)
self.assertEqual(actual, expected)
def test_remove_version_from_href_bad_request(self):
fixture = 'http://www.testsite.com/1.1/images'
self.assertRaises(ValueError,
common.remove_version_from_href,
fixture)
def test_remove_version_from_href_bad_request_2(self):
fixture = 'http://www.testsite.com/v/images'
self.assertRaises(ValueError,
common.remove_version_from_href,
fixture)
def test_remove_version_from_href_bad_request_3(self):
fixture = 'http://www.testsite.com/v1.1images'
self.assertRaises(ValueError,
common.remove_version_from_href,
fixture)
def test_get_id_from_href_with_int_url(self):
fixture = 'http://www.testsite.com/dir/45'
actual = common.get_id_from_href(fixture)
expected = '45'
self.assertEqual(actual, expected)
def test_get_id_from_href_with_int(self):
fixture = '45'
actual = common.get_id_from_href(fixture)
expected = '45'
self.assertEqual(actual, expected)
def test_get_id_from_href_with_int_url_query(self):
fixture = 'http://www.testsite.com/dir/45?asdf=jkl'
actual = common.get_id_from_href(fixture)
expected = '45'
self.assertEqual(actual, expected)
def test_get_id_from_href_with_uuid_url(self):
fixture = 'http://www.testsite.com/dir/abc123'
actual = common.get_id_from_href(fixture)
expected = "abc123"
self.assertEqual(actual, expected)
def test_get_id_from_href_with_uuid_url_query(self):
fixture = 'http://www.testsite.com/dir/abc123?asdf=jkl'
actual = common.get_id_from_href(fixture)
expected = "abc123"
self.assertEqual(actual, expected)
def test_get_id_from_href_with_uuid(self):
fixture = 'abc123'
actual = common.get_id_from_href(fixture)
expected = 'abc123'
self.assertEqual(actual, expected)
def test_get_version_from_href(self):
fixture = 'http://www.testsite.com/v1.1/images'
expected = '1.1'
actual = common.get_version_from_href(fixture)
self.assertEqual(actual, expected)
def test_get_version_from_href_2(self):
fixture = 'http://www.testsite.com/v1.1'
expected = '1.1'
actual = common.get_version_from_href(fixture)
self.assertEqual(actual, expected)
def test_get_version_from_href_default(self):
fixture = 'http://www.testsite.com/images'
expected = '2'
actual = common.get_version_from_href(fixture)
self.assertEqual(actual, expected)
def test_raise_http_conflict_for_instance_invalid_state(self):
# Correct args
exc = exception.InstanceInvalidState(attr='fake_attr',
state='fake_state', method='fake_method')
try:
common.raise_http_conflict_for_instance_invalid_state(exc,
'meow')
except Exception, e:
self.assertTrue(isinstance(e, webob.exc.HTTPConflict))
msg = str(e)
self.assertEqual(msg,
"Cannot 'meow' while instance is in fake_attr fake_state")
else:
self.fail("webob.exc.HTTPConflict was not raised")
# Incorrect args
exc = exception.InstanceInvalidState()
try:
common.raise_http_conflict_for_instance_invalid_state(exc,
'meow')
except Exception, e:
self.assertTrue(isinstance(e, webob.exc.HTTPConflict))
msg = str(e)
self.assertEqual(msg,
"Instance is in an invalid state for 'meow'")
else:
self.fail("webob.exc.HTTPConflict was not raised")
class MetadataXMLDeserializationTest(test.TestCase):
deserializer = common.MetadataXMLDeserializer()
def test_create(self):
request_body = """
<metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
<meta key='123'>asdf</meta>
<meta key='567'>jkl;</meta>
</metadata>"""
output = self.deserializer.deserialize(request_body, 'create')
expected = {"body": {"metadata": {"123": "asdf", "567": "jkl;"}}}
self.assertEquals(output, expected)
def test_create_empty(self):
request_body = """
<metadata xmlns="http://docs.openstack.org/compute/api/v1.1"/>"""
output = self.deserializer.deserialize(request_body, 'create')
expected = {"body": {"metadata": {}}}
self.assertEquals(output, expected)
def test_update_all(self):
request_body = """
<metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
<meta key='123'>asdf</meta>
<meta key='567'>jkl;</meta>
</metadata>"""
output = self.deserializer.deserialize(request_body, 'update_all')
expected = {"body": {"metadata": {"123": "asdf", "567": "jkl;"}}}
self.assertEquals(output, expected)
def test_update(self):
request_body = """
<meta xmlns="http://docs.openstack.org/compute/api/v1.1"
key='123'>asdf</meta>"""
output = self.deserializer.deserialize(request_body, 'update')
expected = {"body": {"meta": {"123": "asdf"}}}
self.assertEquals(output, expected)
class MetadataXMLSerializationTest(test.TestCase):
def test_xml_declaration(self):
serializer = common.MetadataTemplate()
fixture = {
'metadata': {
'one': 'two',
'three': 'four',
},
}
output = serializer.serialize(fixture)
print output
has_dec = output.startswith("<?xml version='1.0' encoding='UTF-8'?>")
self.assertTrue(has_dec)
def test_index(self):
serializer = common.MetadataTemplate()
fixture = {
'metadata': {
'one': 'two',
'three': 'four',
},
}
output = serializer.serialize(fixture)
print output
root = etree.XML(output)
xmlutil.validate_schema(root, 'metadata')
metadata_dict = fixture['metadata']
metadata_elems = root.findall('{0}meta'.format(NS))
self.assertEqual(len(metadata_elems), 2)
for i, metadata_elem in enumerate(metadata_elems):
(meta_key, meta_value) = metadata_dict.items()[i]
self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
self.assertEqual(str(metadata_elem.text).strip(), str(meta_value))
def test_index_null(self):
serializer = common.MetadataTemplate()
fixture = {
'metadata': {
None: None,
},
}
output = serializer.serialize(fixture)
print output
root = etree.XML(output)
xmlutil.validate_schema(root, 'metadata')
metadata_dict = fixture['metadata']
metadata_elems = root.findall('{0}meta'.format(NS))
self.assertEqual(len(metadata_elems), 1)
for i, metadata_elem in enumerate(metadata_elems):
(meta_key, meta_value) = metadata_dict.items()[i]
self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
self.assertEqual(str(metadata_elem.text).strip(), str(meta_value))
def test_index_unicode(self):
serializer = common.MetadataTemplate()
fixture = {
'metadata': {
u'three': u'Jos\xe9',
},
}
output = serializer.serialize(fixture)
print output
root = etree.XML(output)
xmlutil.validate_schema(root, 'metadata')
metadata_dict = fixture['metadata']
metadata_elems = root.findall('{0}meta'.format(NS))
self.assertEqual(len(metadata_elems), 1)
for i, metadata_elem in enumerate(metadata_elems):
(meta_key, meta_value) = metadata_dict.items()[i]
self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
self.assertEqual(metadata_elem.text.strip(), meta_value)
def test_show(self):
serializer = common.MetaItemTemplate()
fixture = {
'meta': {
'one': 'two',
},
}
output = serializer.serialize(fixture)
print output
root = etree.XML(output)
meta_dict = fixture['meta']
(meta_key, meta_value) = meta_dict.items()[0]
self.assertEqual(str(root.get('key')), str(meta_key))
self.assertEqual(root.text.strip(), meta_value)
def test_update_all(self):
serializer = common.MetadataTemplate()
fixture = {
'metadata': {
'key6': 'value6',
'key4': 'value4',
},
}
output = serializer.serialize(fixture)
print output
root = etree.XML(output)
xmlutil.validate_schema(root, 'metadata')
metadata_dict = fixture['metadata']
metadata_elems = root.findall('{0}meta'.format(NS))
self.assertEqual(len(metadata_elems), 2)
for i, metadata_elem in enumerate(metadata_elems):
(meta_key, meta_value) = metadata_dict.items()[i]
self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
self.assertEqual(str(metadata_elem.text).strip(), str(meta_value))
def test_update_item(self):
serializer = common.MetaItemTemplate()
fixture = {
'meta': {
'one': 'two',
},
}
output = serializer.serialize(fixture)
print output
root = etree.XML(output)
meta_dict = fixture['meta']
(meta_key, meta_value) = meta_dict.items()[0]
self.assertEqual(str(root.get('key')), str(meta_key))
self.assertEqual(root.text.strip(), meta_value)
def test_create(self):
serializer = common.MetadataTemplate()
fixture = {
'metadata': {
'key9': 'value9',
'key2': 'value2',
'key1': 'value1',
},
}
output = serializer.serialize(fixture)
print output
root = etree.XML(output)
xmlutil.validate_schema(root, 'metadata')
metadata_dict = fixture['metadata']
metadata_elems = root.findall('{0}meta'.format(NS))
self.assertEqual(len(metadata_elems), 3)
for i, metadata_elem in enumerate(metadata_elems):
(meta_key, meta_value) = metadata_dict.items()[i]
self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
self.assertEqual(str(metadata_elem.text).strip(), str(meta_value))
actual = minidom.parseString(output.replace(" ", ""))
expected = minidom.parseString("""
<metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
<meta key="key2">value2</meta>
<meta key="key9">value9</meta>
<meta key="key1">value1</meta>
</metadata>
""".replace(" ", "").replace("\n", ""))
self.assertEqual(expected.toxml(), actual.toxml())
|
|
# Imports
from django.conf import settings
from django.contrib.auth.models import User
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render
from django.views import generic
import requests
from rest_framework import viewsets
#import pandas as pd
#import numpy as np
import matplotlib
# fix dependency issue in Heroku: doesn't support Tk
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvasAgg
from project.apicc.forms import SubmitCryptoasset, PortfolioForm
from project.apicc.serializers import (UserSerializer,
CryptoassetSerializer,
CryptoassetV1Serializer,
)
from .models import Cryptoasset, CryptoassetVersionOne, Portfolio
from .fetch_coinlist import(mpl_plot_fig_basic,
df_data_to_float,
get_portfolio,
)
# Create your views here.
#==============================================================================
# #class IndexView(LoginRequiredMixin, generic.ListView):
# class IndexView(generic.ListView):
# login_url = '../../accounts/login/'
# redirect_field_name = 'redirect_to'
# template_name = 'dcai/index.html'
# context_object_name = 'cryptoasset_list'
#
# def get_queryset(self):
# """return list of cryptoassets in the database"""
# return Cryptoasset.objects.filter().order_by('pk')
#==============================================================================
def index(request):
template_name = 'index.html'
return render(request, template_name, {})
#==============================================================================
# def save_cryptoasset(request):
# def delete_all_item():
# print("clear all entries in the DB")
# Cryptoasset.objects.all().delete()
#
# if request.method == "POST":
# delete_all_item()
# print("POST")
# form = SubmitCryptoasset(request.POST)
# if form.is_valid():
# url = form.cleaned_data['url']
# r = requests.get(url)
# json = r.json()
# data = json['Data']
#
# for num, key in enumerate(data):
# serializer = CryptoassetSerializer(data=data[key])
# if serializer.is_valid():
# print("serializer is valid")
# embed = serializer.save()
# print(serializer.errors)
# else:
# print("serializer in invalid")
# print(serializer.errors)
# serializer.is_valid()
# if num==len(data):
# return render(request, 'embeds.html', {'embed': embed})
#
# else:
# print("render form")
# form = SubmitCryptoasset()
#
# return render(request, 'index.html', {'form': form})
#==============================================================================
# Global variable declarations
TOP_BY_MARKETCAP = 500
# Function based view declarations
#==============================================================================
# def draw_mpl(request):
# #def draw_mpl(request, portfolio=0):
# # size = request.GET.get('size')
# # return render(request, 'dcai/draw_mpl.html', {'size':size})
# #TODO make pythonesque
# #TODO figure out why GET request doesn't take strings?
# if request.GET.get('portfolio')==None:
# print("no get request")
# portfolio = 0
# else:
# print("get GET")
# portfolio = request.GET.get('portfolio')
#
# return render(request, 'apicc/draw_mpl.html', {'portfolio':portfolio})
#==============================================================================
def draw_mpl(request):
if request.GET.get('pname')==None:
print("no get request")
pname = 0
else:
print("get GET")
pname = request.GET.get('pname')
return render(request, 'apicc/draw_mpl.html', {'pname':pname})
def draw_mpl_model(request):
#TODO write dynamic title -_-'
#TODO make pythonesque
print("draw_mpl_model")
form = PortfolioForm()
if request.GET.get('pname')==None:
print("no get request")
pname = 0
print("print: {}".format(pname))
else:
pname = request.GET.get('pname')
print("get POST: {}".format(pname))
print("let's render", pname)
return render(request, 'apicc/draw_mpl_model.html', {'pname': pname, 'form':form})
def momentum_plot(request, pname):
#def momentum_plot(rquest, size=1,):
# portfolio = name
print("\n\nmomentum_plot\n\n")
# print(type(portfolio), portfolio)
if pname=='0':
print("No input: return empty plot")
f = plt.figure()
ax = f.add_subplot(111)
# ax.plot([1,2,3,4], color='b')
ax.axis('off')
else:
# data = CryptoassetVersionOne.objects.values_list('symbol', flat=True).filter(portfolio__pname__startswith='Anonymous').distinct()
data = CryptoassetVersionOne.objects.values_list('symbol', flat=True).filter(portfolio__id=pname).distinct()
print("got ticker data")
data_list = list(data)
print("list of retrieved data: \n{}".format(data_list))
#
portfolio_description = Portfolio.objects.values_list('pdescription', flat=True).filter(id=pname).distinct()
portfolio_description = list(portfolio_description)[0]
print('portfolio_description: {}\type: {}'.format(portfolio_description, type(portfolio_description)))
f = mpl_plot_fig_basic(data_list, portfolio_description)
# f = plt.figure()
# ax = f.add_subplot(111)
# ax.plot([1,2,3,4], color='r')
# ax.axis('off')
def output_mlp_quantprofile(fig, portfolio):
'''
Export figure to file
'''
print('export figure to file')
#TODO: to prevent clutter: save outputfiles to ./image/folder
# filename = tempfile.NamedTemporaryFile(
## dir='static/temp',
# dir='dcai/static/dcai/',
# suffix='.png', delete=False)
#TODO rewrite static folder reference
filename = "apicc/static/dcai/momentum_{}.png".format(portfolio)
plotPng = filename.split('/')[-1]
dpi = 250
fig.savefig(filename, dpi=dpi, facecolor='w', edgecolor='w',
# fig.savefig(filename, dpi=250, facecolor='0.7', edgecolor='r',
orientation='landscape', papertype=None, format=None,
transparent=False, bbox_inches='tight', pad_inches=0,
frameon=None)
# filename.close()
# plotPng = filename.name.split('/')[-1]
print(plotPng)
#TODO destroy tempfile after use
return plotPng
# plotPng = output_mlp_quantprofile(f, portfolio)
canvas = FigureCanvasAgg(f)
response = HttpResponse(content_type='image/png')
canvas.print_png(response)
matplotlib.pyplot.close(f)
#TODO find out how to catch temp file name
print("give response")
return response
# return response, plotPng
# else:
# print("not right argument")
def save_cryptoasset(request):
def delete_all_item():
print("clear all entries in the DB")
CryptoassetVersionOne.objects.all().delete()
# delete_all_item()
if request.method == "POST":
print("POST")
# delete_all_item()
form = SubmitCryptoasset(request.POST)
if form.is_valid():
url = form.cleaned_data['url']
url = 'https://api.coinmarketcap.com/v1/ticker/'
r = requests.get(url)
data = r.json()
top_x = TOP_BY_MARKETCAP
data = data[:top_x]
print("Looping through all items.")
#TODO check if exists, if so update, if not create
# check on last updated field
for num, key in enumerate(data):
# for num, key in enumerate(data[:1]):
print(key['rank'], key['symbol'])
# serializer = CryptoassetV1Serializer(data=data[key])
serializer = CryptoassetV1Serializer(data=key)
if serializer.is_valid():
# print("{}. serializer is valid".format(num))
embed = serializer.save()
else:
print("{}. serializer in invalid. Printing errors:".format(num))
print(serializer.errors)
# serializer.is_valid()
if num==len(data):
return render(request, 'embeds.html', {'embed': embed})
else:
print("render form")
form = SubmitCryptoasset()
return render(request, 'apicc/update.html', {'form': form})
# Class based view declarations
class UserViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = User.objects.all()
serializer_class = UserSerializer
|
|
"""Unit tests for the application"""
import io
import os
import sys
import unittest
import colorful
from prettytable import PrettyTable
from src.dojo import Dojo
from src.helpers import get_residents, remove_person, find_room, find_person
class TestSpaceAllocator(unittest.TestCase):
""" Tests"""
def setUp(self):
""" Initial test setup"""
self.dojo = Dojo()
self.testoffice = self.dojo.create_room("office", "testoffice")
self.testlivingspace = self.dojo.create_room(
"living_space", "testlivingspace")
def test_create_room(self):
"""Tests that a room is created successfully"""
initial_room_count = len(self.dojo.rooms)
blue_office = self.dojo.create_room("office", "Blue")
self.assertTrue(blue_office)
new_room_count = len(self.dojo.rooms)
self.assertEqual(new_room_count - initial_room_count, 1)
def test_create_room_multiple(self):
"""Tests that multiple rooms are created at a single time successfully"""
initial_room_count = len(self.dojo.rooms)
offices = self.dojo.create_room("office", "Blue", "Black", "Brown")
self.assertTrue(offices)
new_room_count = len(self.dojo.rooms)
self.assertEqual(new_room_count - initial_room_count, 3)
def test_create_room_duplicate(self):
"""Tests that duplicate rooms are not created"""
initial_room_count = len(self.dojo.people)
self.testoffice = self.dojo.create_room("office", "testoffice")
new_room_count = len(self.dojo.people)
self.assertEqual(new_room_count - initial_room_count, 0)
def test_add_person(self):
"""Test that person is added to the system"""
initial_person_count = len(self.dojo.people)
person = self.dojo.add_person("Neil", "Armstrong", "Staff")
self.assertTrue(person)
new_person_count = len(self.dojo.people)
self.assertEqual(new_person_count - initial_person_count, 1)
def test_add_person_has_oofice(self):
"""Test that a person is assigned an office"""
person = self.dojo.add_person("Neil", "Armstrong", "Staff")
self.assertTrue(person)
self.assertTrue(self.dojo.people[-1].has_office)
def test_add_person_has_living_space(self):
"""Test that person is assigned a living space"""
person = self.dojo.add_person("Eden", "Hazard", "Fellow", "Y")
self.assertTrue(person)
self.assertTrue(self.dojo.people[-1].has_living_space)
def test_add_person_return_type(self):
"""Tests the return type of method add_person"""
person = self.dojo.add_person("Eden", "Hazard", "Fellow", "Y")
self.assertEqual(
{'Person': 'Eden Hazard', 'Rooms': [{'office': 'testoffice'},
{'living_space': 'testlivingspace'}]}, person)
def test_add_person_maximum(self):
"""Tests that the maximum number of people is not exceeded"""
self.dojo.add_person("Neil", "Armstrong", "Staff", "Y")
self.dojo.add_person("Harry", "Kane", "Fellow", "Y")
self.dojo.add_person("Eden", "Hazard", "Staff", "Y")
self.dojo.add_person("Ngolo", "Kante", "Staff", "Y")
self.dojo.add_person("Eric", "Dier", "Staff", "Y")
self.dojo.add_person("Dele", "Ali", "Fellow", "Y")
self.dojo.add_person("Diego", "Costa", "Fellow", "Y")
self.dojo.add_person("Willian", "Borges", "Staff", "Y")
self.dojo.add_person("Tibaut", "Courtois", "Fellow", "Y")
self.assertEqual(len(self.testoffice.residents), 6)
def test_print_room(self):
"""Tests the output of print_room"""
self.dojo.add_person("Neil", "Armstrong", "Staff", "Y")
self.dojo.add_person("Harry", "Kane", "Fellow", "Y")
self.dojo.add_person("Eden", "Hazard", "Staff", "Y")
self.dojo.add_person("Ngolo", "Kante", "Staff", "Y")
self.dojo.add_person("Eric", "Dier", "Staff", "Y")
self.dojo.add_person("Dele", "Ali", "Fellow", "Y")
self.dojo.add_person("Diego", "Costa", "Fellow", "Y")
self.dojo.add_person("Willian", "Borges", "Staff", "Y")
self.dojo.add_person("Tibaut", "Courtois", "Fellow", "Y")
result = self.dojo.print_room("testoffice")
non_existent_room = self.dojo.print_room("test room")
self.assertEqual(
['Neil Armstrong', 'Harry Kane', 'Eden Hazard', 'Ngolo Kante',
'Eric Dier', 'Dele Ali'], result)
self.assertFalse(non_existent_room)
def test_reallocate_person(self):
"""Tests that correct information is printed on print_allocations"""
dojo = Dojo()
test_office = dojo.create_room("office", "testoffice")
another_test_office = dojo.create_room("office", "orange")
dojo.add_person("Neil", "Armstrong", "Staff", "Y")
person = dojo.people[0]
old_office = [elem['office']
for elem in person.rooms_occupied if 'office' in elem]
result1 = dojo.print_room(old_office[0])
self.assertIn("Neil Armstrong", result1)
un_occupied_room = test_office if not test_office.residents else another_test_office
print(un_occupied_room.name)
dojo.reallocate_person(1, un_occupied_room.name)
result2 = dojo.print_room(old_office[0])
self.assertNotIn("Neil Armstrong", result2)
def test_print_allocations(self):
"""Tests the output of print_allocations"""
self.dojo.add_person("Dele", "Ali", "Fellow", "Y")
result = self.dojo.print_allocations()
print(result)
self.assertEqual([{'testoffice': ['DELE ALI']}, {
'testlivingspace': ['DELE ALI']}], result)
def test_print_allocations_on_file(self):
"""Tests that correct output is written to the file
"""
self.dojo.add_person("Dele", "Ali", "Fellow", "Y")
result = self.dojo.print_allocations("allocations.txt", "N")
file = open("allocations.txt").read()
self.assertTrue("Room: testoffice" in file)
self.assertTrue("DELE ALI" in file)
self.assertTrue("Room: testlivingspace" in file)
self.assertEqual([{'testoffice': ['DELE ALI']}, {
'testlivingspace': ['DELE ALI']}], result)
def test_print_allocations_tabular_view(self):
"""Tests that tabular data is output on print_allocations"""
# Create StringIO object and redirect output
self.dojo.add_person("Dele", "Ali", "Fellow", "Y")
program_captured_output = io.StringIO()
sys.stdout = program_captured_output
self.dojo.print_allocations("", "Y")
sys.stdout = sys.__stdout__
table = PrettyTable(['Name', 'Type', 'Office', 'Living Space'])
table.add_row(["Dele Ali", "fellow", "testoffice", "testlivingspace"])
captured_output = io.StringIO()
sys.stdout = captured_output
print(colorful.blue("List showing people with space and their respective rooms"))
print(colorful.blue(table))
sys.stdout = sys.__stdout__
print(program_captured_output.getvalue().strip())
print(captured_output.getvalue())
self.assertTrue(captured_output.getvalue().strip()
in program_captured_output.getvalue().strip())
def test_print_unallocated_tabular(self):
"""Tests that tabular data is output on test_tabular_output_on_print_unallocated"""
dojo = Dojo()
dojo.add_person("Kylian", "Mbappe", "Fellow", "Y")
dojo.add_person("Gianluggi", "Buffon", "Fellow", "N")
dojo.create_room("office", "red")
dojo.add_person("Timoue", "Bakayoko", "Fellow", "Y")
program_captured_output = io.StringIO()
sys.stdout = program_captured_output
dojo.print_unallocated()
sys.stdout = sys.__stdout__
table = PrettyTable(['Name', 'Person id', 'Missing'])
table.add_row(["Kylian Mbappe", "1", "Office and Living Space"])
table.add_row(["Gianluggi Buffon", "2", "Office"])
table.add_row(["Timoue Bakayoko", "3", "Living Space"])
captured_output = io.StringIO()
sys.stdout = captured_output
print(colorful.blue("Table showing people along with missing rooms"))
print(colorful.blue(table))
sys.stdout = sys.__stdout__
print(program_captured_output.getvalue().strip())
print(captured_output.getvalue())
self.assertTrue(captured_output.getvalue().strip()
in program_captured_output.getvalue().strip())
def test_reallocate_person_no_office(self):
"""Tests reallocate if person had no office"""
dojo = Dojo()
dojo.add_person("John", "Ashaba", "Staff", "Y")
dojo.create_room("office", "orange")
dojo.reallocate_person(1, "orange")
target_room = find_room(dojo.rooms, "orange")
person = find_person(dojo.people, 1)
self.assertIn(person, target_room.residents)
def test_reallocate_person_no_living_space(self):
"""Tests reallocate if person had no living space"""
self.dojo.add_person("John", "Ashaba", "Staff", "Y")
self.dojo.create_room("living_space", "gorrilla")
self.dojo.reallocate_person(1, "gorrilla")
target_room = find_room(self.dojo.rooms, "gorrilla")
person = find_person(self.dojo.people, 1)
self.assertIn(person, target_room.residents)
def test_load_people(self):
"""Tests that person exists after load_people"""
self.dojo.load_people("resources/people.txt")
last_person = find_person(self.dojo.people, 7)
self.assertIn(last_person, self.dojo.people)
def test_if_person_exists_in_target_room_after_reallocation(self):
"""Tests that person exists after reallocation"""
self.dojo.create_room("office", "orange")
self.dojo.create_room("living_space", "lion")
self.dojo.add_person("John", "Ashaba", "Fellow", "Y")
person = self.dojo.people[0]
old_office = [elem['office']
for elem in person.rooms_occupied if 'office' in elem]
old_living_space = [
elem['living_space']
for elem in person.rooms_occupied if 'living_space' in elem]
result1 = self.dojo.print_room(old_office[0])
result2 = self.dojo.print_room(old_living_space[0])
self.assertIn("John Ashaba", result1)
self.assertIn("John Ashaba", result2)
self.dojo.reallocate_person(1, "orange")
self.dojo.reallocate_person(1, "lion")
target_office_room = find_room(self.dojo.rooms, "orange")
target_living_room = find_room(self.dojo.rooms, "orange")
person = find_person(self.dojo.people, 1)
self.assertIn(person, target_office_room.residents)
self.assertIn(person, target_living_room.residents)
def test_persists_data(self):
"""Tests that the application persists data"""
dojo1 = Dojo()
dojo1.create_room("office", "orange")
dojo1.add_person("John", "Ashaba", "Staff", "Y")
if os.path.exists("resources/testdb.db"):
os.remove("resources/testdb.db")
dojo1.save_state("testdb.db")
dojo2 = Dojo()
dojo2.load_state("resources/testdb.db")
room = find_room(dojo2.rooms, "orange")
self.assertIn(room, dojo2.rooms)
|
|
from functools import wraps
from flask import \
render_template, redirect, url_for, \
abort, flash, request, current_app, \
make_response
from flask_login import \
login_required, current_user, \
login_user, logout_user
from sqlalchemy.orm import lazyload
from datetime import date, datetime, timedelta, time
from calendar import Calendar
from urllib.parse import quote
from .models import *
from .forms import *
################################################################################
print('Initializing views')
# fix_month_year :: Month -> Year -> (Month, Year)
def fix_month_year(m, y):
while m > 12:
m -= 12
y += 1
while m < 1:
m += 12
y -= 1
return (m, y)
# Returns the date of the first day of the next month
def get_next_month(this_month):
(m, y) = fix_month_year(this_month.month + 1, this_month.year)
return date(y, m, 1)
# Returns the date of the first day of the prev month
def get_prev_month(this_month):
(m, y) = fix_month_year(this_month.month - 1, this_month.year)
return date(y, m, 1)
#TODO fix validity checker
valid_paths = ['/', '/login', '/logout', '/register']
def next_is_valid(next):
for i in valid_paths:
if next == i:
return True
return False
def logout_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if current_user.is_anonymous:
return f(*args, **kwargs)
return redirect(url_for('calendar_route'))
return decorated_function
def parse_date_from_args(args):
today = date.today()
try:
s_year = int(request.args.get('year') or today.year)
s_month = int(request.args.get('month') or today.month)
s_day = int(request.args.get('day') or today.day)
return date(s_year, s_month, s_day)
except:
return today
################################################################################
# / redirects to calendar
@current_app.route('/')
def root():
return redirect(url_for('calendar_route'))
################################################################################
# Login form
# Get request displays a login-form
# Post checks user credentials and loggs in the user, redirects to calendar
@current_app.route('/login', methods=['GET', 'POST'])
@logout_required
def login_route():
form = LoginForm()
if form.validate_on_submit():
username = request.form['username']
password = request.form['password']
user = User.query.filter_by(username=username).first()
if user is None or not user.verify_password(password):
flash('Invalid username/password pair')
return render_template('login.html', title='Sign In', form=form)
login_user(user)
flash('Logged in as {}'.format(username))
next = request.args.get('next')
if next_is_valid(next):
return redirect(next)
return redirect(url_for('calendar_route'))
return render_template('login.html', title='Sign In', form=form)
################################################################################
# Loggs out the user
# Redirects to calendar
@current_app.route('/logout')
@login_required
def logout_route():
logout_user()
flash('Logged out')
return redirect(url_for('calendar_route'))
################################################################################
# On GET displays a register new user form
# On post checks if the username is already taken
# Adds a user-entry and redirects to login
@current_app.route('/register', methods=['GET','POST'])
@logout_required
def register_route():
form = RegisterForm()
if form.validate_on_submit():
username = request.form['username']
pw = request.form['password']
pw_retype = request.form['retype_password']
same_count = User.query.filter_by(username=username).count()
if same_count != 0:
flash('Username {} already taken'.format(username))
elif pw != pw_retype:
flash('Passwords do not match')
else:
new_user = User(username, pw)
db.session.add(new_user)
db.session.commit()
flash('Successfully registered')
return redirect(url_for('login_route'))
return render_template('register.html', title='Register', form=form)
################################################################################
# get_month_events :: Year -> Month -> [[Day]]
# type Day = (Date, [Event])
def get_month_events(year, month):
# Get the day-dates of the current month
cal = Calendar(0) # default replace by user db? (starting day)
the_month = cal.monthdatescalendar(year, month)
# First day of first week
begin = the_month[0][0]
# Last day of last week
end = the_month[-1][-1]
events = Event.query.filter(
Event.event_date > begin.strftime('%Y-%m-%d'),
Event.event_date < end.strftime('%Y-%m-%d')) \
.options(lazyload('creator')).all()
# Load the days for the calendar
def per_day(day):
# Get the interval bounds of that day
day_start = datetime.combine(day, time())
day_end = day_start + timedelta(days = 1)
# Run through all events
day_events = []
for e in events:
if e.event_date >= day_start and e.event_date < day_end:
day_events.append(e)
return (day, day_events)
def per_week(week):
return [per_day(d) for d in week]
def per_month(month):
return [per_week(w) for w in month]
return per_month(the_month)
# Load the event ids of all your subscribed events.
# Anon users are not subscribed to anything.
def get_subscribed_event_ids():
if not current_user.is_anonymous:
your_subscriptions = db.session.query(Subscription.event_id) \
.filter(Subscription.user_id == current_user.id).all()
return [x for (x,) in your_subscriptions]
return []
# Returns a list of the next #limit events.
# If only_yours is True, only your events are listed.
# If start_date is None, today is chosen.
# If end_date is set, only events up to that date are queried.
def get_next_events(limit, start_date = None, end_date = None, only_yours = True):
# Load the events for the event sidebar
start_date = start_date or datetime.today()
query = db.session.query(Event)
if only_yours and not current_user.is_anonymous:
query = query.join(Subscription) \
.filter(Subscription.user_id == current_user.id)
query = query.filter(Event.event_date > start_date)
if end_date:
query = query.filter(Event.event_date < end_date)
query = query \
.options(lazyload('creator')) \
.order_by(Event.event_date)
if limit:
query = query.limit(limit)
return query.all()
# get_day_events :: [Event]
def get_day_events(year, month, day):
today = date(year,month,day)
tomorrow = today + timedelta(days=1)
events = Event.query.filter(
Event.event_date > today.strftime('%Y-%m-%d'),
Event.event_date < tomorrow.strftime('%Y-%m-%d')).all()
return events
################################################################################
# Route to subscribe for events. Requires the 'subscribe'-field.
# If the current user is not already subscriber for that event, a subscription
# with 'Yes' and an empty comment is added.
# This route redirects to the submitted 'next' address, or back to the calendar-view.
@current_app.route('/subscribe', methods=['POST'])
@login_required
def subscribe_route():
subscribe = request.form.get('subscribe')
if subscribe is not None:
# Integrity checks
optionally_redundant_subscriptions = Subscription.query\
.filter(Subscription.event_id == int(subscribe))\
.filter(Subscription.user_id == current_user.id).all()
if not optionally_redundant_subscriptions:
s = Subscription(current_user.id, int(subscribe))
db.session.add(s)
db.session.commit()
flash('Subscribed to event')
else:
flash('Already subscribed to that event')
next = request.form.get('next') or url_for('calendar_route')
return redirect(next)
################################################################################
# displays the calendar.
# If year and month are submitted, displays a month view
# If the day is also submitted displays a day-view
@current_app.route('/calendar', methods=['GET'])
def calendar_route():
now = date.today()
year = int(request.args.get('year') or now.year)
month = int(request.args.get('month') or now.month)
day = request.args.get('day')
# Get the current (localized) month name.
at_month = date(year, month, 1)
month_name = at_month.strftime("%B")
# Month view
if day is None:
day_events = get_month_events(year, month)
your_subscriptions = get_subscribed_event_ids()
next_events = get_next_events(limit = 5)
return render_template(
'calendar_month.html',
title='Calendar',
day_events = day_events,
your_subscriptions = your_subscriptions,
next_events = next_events,
month_name=month_name)
# Day view
day = int(day)
day_and_events = get_day_events(year, month, day)
#day_and_events = [(e,e.creator) for e in day_and_events]
return render_template(
'calendar_day.html',
title='Calendar',
year=year,
month=month,
day=day,
month_name=month_name,
this_path=quote(request.path),
day_and_events=day_and_events)
################################################################################
# Route for /event
# Handles GET requests, which display the event. Event owners are presented with an edit form
# Right side of this view contains the list of subscribers, which can edit their own subscriptions.
@current_app.route('/event', methods=['GET'])
def event_route():
event_id = request.args.get('id')
if event_id is None:
flash('Event id required for "/event"')
return redirect(url_for('calendar_route'))
event = Event.query.filter(Event.id == event_id).first()
if event is None:
flash('Event with id ' + event_id + ' not found')
return redirect(url_for('calendar_route'))
# Helper function to create a subscription_form on the fly.
def make_subscription_form(subscr):
subscr_form = SubscriptionForm()
subscr_form.subscriptionid.data = subscr.id
subscr_form.comment.data = subscr.comment
subscr_form.commitment.data = subscr.commitment
return subscr_form
event_form = EditForm()
event_form.id.data = event.id
event_form.name.data = event.name
event_form.date.data = event.event_date.date()
event_form.time.data = event.event_date.time()
event_form.description.data = event.description
# Additional fields
event_form.timeleft = event.remaining_time
event_form.creatorid = event.creator_id
cuser_is_subscribed = False
if not current_user.is_anonymous:
current_user_suscribed = Subscription.query \
.filter(Subscription.event_id == event.id) \
.filter(Subscription.user_id == current_user.id).first()
cuser_is_subscribed = not current_user_suscribed is None
return render_template(
'event.html',
title = 'Event',
is_subscribed = cuser_is_subscribed,
subscriptions = event.subscriptions,
make_subscription_form = make_subscription_form,
event_form = event_form)
################################################################################
# Handles POST requests, which are used to edit the event.
# Redirects to /event?id={{request.form['eventid']}}
@current_app.route('/edit_event', methods=['POST'])
def edit_event_route():
id = request.form['id']
event_form = EditForm()
if event_form.validate_on_submit():
event = Event.query.filter(Event.id == int(id)).one()
if event.creator_id == current_user.id:
event.name = request.form['name']
date = event_form.date.data
time = event_form.time.data
event.event_date = datetime.combine(date, time)
event.description = request.form['description']
db.session.commit()
flash('Event updated')
else:
flash('You cannot edit this event')
return redirect(url_for('event_route', id=id))
################################################################################
@current_app.route('/edit_subscription', methods=['POST'])
def edit_subscription_route():
form = SubscriptionForm()
if form.validate_on_submit():
subscription = Subscription.query \
.filter(Subscription.id == request.form.get('subscriptionid')) \
.options(lazyload('user')) \
.options(lazyload('event')).one()
subscription.comment = request.form.get('comment')
subscription.commitment = request.form.get('commitment')
db.session.commit()
flash('Subscription updated')
return redirect(url_for('event_route', id=subscription.event_id))
################################################################################
@current_app.route('/subscriptions', methods=['GET'])
@login_required
def subscriptions_route():
# events = current_user.subscriptions
events = db.session.query(Event).join(Subscription) \
.filter(Subscription.user_id==current_user.id) \
.order_by(Event.event_date).all()
return render_template(
'subscriptions.html',
title='Your subscriptions',
events=events)
################################################################################
# The get request may contain a predefined year, month, day.
# If these parameters are omitted, the current ones are chosen.
@current_app.route('/add', methods=['GET', 'POST'])
@login_required
def add_route():
form = EventForm()
if form.validate_on_submit():
event_name = request.form['name']
date = form.date.data
time = form.time.data
event_date = datetime.combine(date, time)
event_descr = request.form['description']
e = Event(event_date, event_name, event_descr, current_user.id)
db.session.add(e)
db.session.commit()
s = Subscription(current_user.id, e.id)
db.session.add(s)
db.session.commit()
return redirect(url_for('event_route', id=e.id))
form.date.data = parse_date_from_args(request.args)
#form.time.data = parse_date_from_args(request.args)
return render_template(
'add_event.html',
form=form,
title='Add Event')
################################################################################
@current_app.route('/<other>', methods=['GET', 'POST'])
def not_found(other = None):
flash('Invalid path: {}'.format(other))
return redirect(url_for('calendar_route'))
################################################################################
"""
# Returns the localized month name
# Assuming date(2016, month, 1) returns the first day of month=month
def month_to_name(month):
at_month = date(2016, month, 1)
return at_month.strftime("%B")
# Returns a list of weeks where each week contains a list of days
# Hardcoding Monday to be the first day of the week
def date_to_weeks(year, month):
start_date = date(year, month, 1)
day_of_week = start_date.weekday()
one_day = timedelta(1)
start_date -= day_of_week * one_day
weeks = []
while start_date.month <= month:
week = []
for i in range(0,7):
week.append(start_date.strftime("%A"))
start_date += one_day
weeks.append(week)
return weeks
# Can be performed once! TODO
# Returns a list of localized weekday names
# Hardcoding Monday to be the first day of the week
def week_to_days():
now = date.today()
one_day = timedelta(1)
monday = now - now.weekday() * one_day
weekdays = []
for i in range(0,7):
weekdays.append((monday + timedelta(i)).strftime("%A"))
return weekdays
"""
|
|
# byteplay - Python bytecode assembler/disassembler.
# Copyright (C) 2006-2010 Noam Yorav-Raphael
# Homepage: http://code.google.com/p/byteplay
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# Many thanks to Greg X for adding support for Python 2.6 and 2.7!
__version__ = '0.2'
__all__ = ['opmap', 'opname', 'opcodes',
'cmp_op', 'hasarg', 'hasname', 'hasjrel', 'hasjabs',
'hasjump', 'haslocal', 'hascompare', 'hasfree', 'hascode',
'hasflow', 'getse',
'Opcode', 'SetLineno', 'Label', 'isopcode', 'Code',
'CodeList', 'printcodelist']
import opcode
from dis import findlabels
import types
from array import array
import operator
import itertools
import sys
import warnings
from cStringIO import StringIO
######################################################################
# Define opcodes and information about them
python_version = '.'.join(str(x) for x in sys.version_info[:2])
if python_version not in ('2.4', '2.5', '2.6', '2.7'):
warnings.warn("byteplay doesn't support Python version "+python_version)
class Opcode(int):
"""An int which represents an opcode - has a nicer repr."""
def __repr__(self):
return opname[self]
__str__ = __repr__
class CodeList(list):
"""A list for storing opcode tuples - has a nicer __str__."""
def __str__(self):
f = StringIO()
printcodelist(self, f)
return f.getvalue()
opmap = dict((name.replace('+', '_'), Opcode(code))
for name, code in opcode.opmap.iteritems()
if name != 'EXTENDED_ARG')
opname = dict((code, name) for name, code in opmap.iteritems())
opcodes = set(opname)
def globalize_opcodes():
for name, code in opmap.iteritems():
globals()[name] = code
__all__.append(name)
globalize_opcodes()
cmp_op = opcode.cmp_op
hasarg = set(x for x in opcodes if x >= opcode.HAVE_ARGUMENT)
hasconst = set(Opcode(x) for x in opcode.hasconst)
hasname = set(Opcode(x) for x in opcode.hasname)
hasjrel = set(Opcode(x) for x in opcode.hasjrel)
hasjabs = set(Opcode(x) for x in opcode.hasjabs)
hasjump = hasjrel.union(hasjabs)
haslocal = set(Opcode(x) for x in opcode.haslocal)
hascompare = set(Opcode(x) for x in opcode.hascompare)
hasfree = set(Opcode(x) for x in opcode.hasfree)
hascode = set([MAKE_FUNCTION, MAKE_CLOSURE])
class _se:
"""Quick way of defining static stack effects of opcodes"""
# Taken from assembler.py by Phillip J. Eby
NOP = 0,0
POP_TOP = 1,0
ROT_TWO = 2,2
ROT_THREE = 3,3
ROT_FOUR = 4,4
DUP_TOP = 1,2
UNARY_POSITIVE = UNARY_NEGATIVE = UNARY_NOT = UNARY_CONVERT = \
UNARY_INVERT = GET_ITER = LOAD_ATTR = 1,1
IMPORT_FROM = 1,2
BINARY_POWER = BINARY_MULTIPLY = BINARY_DIVIDE = BINARY_FLOOR_DIVIDE = \
BINARY_TRUE_DIVIDE = BINARY_MODULO = BINARY_ADD = BINARY_SUBTRACT = \
BINARY_SUBSCR = BINARY_LSHIFT = BINARY_RSHIFT = BINARY_AND = \
BINARY_XOR = BINARY_OR = COMPARE_OP = 2,1
INPLACE_POWER = INPLACE_MULTIPLY = INPLACE_DIVIDE = \
INPLACE_FLOOR_DIVIDE = INPLACE_TRUE_DIVIDE = INPLACE_MODULO = \
INPLACE_ADD = INPLACE_SUBTRACT = INPLACE_LSHIFT = INPLACE_RSHIFT = \
INPLACE_AND = INPLACE_XOR = INPLACE_OR = 2,1
SLICE_0, SLICE_1, SLICE_2, SLICE_3 = \
(1,1),(2,1),(2,1),(3,1)
STORE_SLICE_0, STORE_SLICE_1, STORE_SLICE_2, STORE_SLICE_3 = \
(2,0),(3,0),(3,0),(4,0)
DELETE_SLICE_0, DELETE_SLICE_1, DELETE_SLICE_2, DELETE_SLICE_3 = \
(1,0),(2,0),(2,0),(3,0)
STORE_SUBSCR = 3,0
DELETE_SUBSCR = STORE_ATTR = 2,0
DELETE_ATTR = STORE_DEREF = 1,0
PRINT_NEWLINE = 0,0
PRINT_EXPR = PRINT_ITEM = PRINT_NEWLINE_TO = IMPORT_STAR = 1,0
STORE_NAME = STORE_GLOBAL = STORE_FAST = 1,0
PRINT_ITEM_TO = 2,0
LOAD_LOCALS = LOAD_CONST = LOAD_NAME = LOAD_GLOBAL = LOAD_FAST = \
LOAD_CLOSURE = LOAD_DEREF = BUILD_MAP = 0,1
DELETE_FAST = DELETE_GLOBAL = DELETE_NAME = 0,0
EXEC_STMT = 3,0
BUILD_CLASS = 3,1
STORE_MAP = MAP_ADD = 2,0
SET_ADD = 1,0
if python_version == '2.4':
YIELD_VALUE = 1,0
IMPORT_NAME = 1,1
LIST_APPEND = 2,0
elif python_version == '2.5':
YIELD_VALUE = 1,1
IMPORT_NAME = 2,1
LIST_APPEND = 2,0
elif python_version == '2.6':
YIELD_VALUE = 1,1
IMPORT_NAME = 2,1
LIST_APPEND = 2,0
elif python_version == '2.7':
YIELD_VALUE = 1,1
IMPORT_NAME = 2,1
LIST_APPEND = 1,0
_se = dict((op, getattr(_se, opname[op]))
for op in opcodes
if hasattr(_se, opname[op]))
hasflow = opcodes - set(_se) - \
set([CALL_FUNCTION, CALL_FUNCTION_VAR, CALL_FUNCTION_KW,
CALL_FUNCTION_VAR_KW, BUILD_TUPLE, BUILD_LIST,
UNPACK_SEQUENCE, BUILD_SLICE, DUP_TOPX,
RAISE_VARARGS, MAKE_FUNCTION, MAKE_CLOSURE])
if python_version == '2.7':
hasflow = hasflow - set([BUILD_SET])
def getse(op, arg=None):
"""Get the stack effect of an opcode, as a (pop, push) tuple.
If an arg is needed and is not given, a ValueError is raised.
If op isn't a simple opcode, that is, the flow doesn't always continue
to the next opcode, a ValueError is raised.
"""
try:
return _se[op]
except KeyError:
# Continue to opcodes with an effect that depends on arg
pass
if arg is None:
raise ValueError("Opcode stack behaviour depends on arg")
def get_func_tup(arg, nextra):
if arg > 0xFFFF:
raise ValueError("Can only split a two-byte argument")
return (nextra + 1 + (arg & 0xFF) + 2*((arg >> 8) & 0xFF),
1)
if op == CALL_FUNCTION:
return get_func_tup(arg, 0)
elif op == CALL_FUNCTION_VAR:
return get_func_tup(arg, 1)
elif op == CALL_FUNCTION_KW:
return get_func_tup(arg, 1)
elif op == CALL_FUNCTION_VAR_KW:
return get_func_tup(arg, 2)
elif op == BUILD_TUPLE:
return arg, 1
elif op == BUILD_LIST:
return arg, 1
elif python_version == '2.7' and op == BUILD_SET:
return arg, 1
elif op == UNPACK_SEQUENCE:
return 1, arg
elif op == BUILD_SLICE:
return arg, 1
elif op == DUP_TOPX:
return arg, arg*2
elif op == RAISE_VARARGS:
return 1+arg, 1
elif op == MAKE_FUNCTION:
return 1+arg, 1
elif op == MAKE_CLOSURE:
if python_version == '2.4':
raise ValueError("The stack effect of MAKE_CLOSURE depends on TOS")
else:
return 2+arg, 1
else:
raise ValueError("The opcode %r isn't recognized or has a special "
"flow control" % op)
class SetLinenoType(object):
def __repr__(self):
return 'SetLineno'
SetLineno = SetLinenoType()
class Label(object):
pass
def isopcode(obj):
"""Return whether obj is an opcode - not SetLineno or Label"""
return obj is not SetLineno and not isinstance(obj, Label)
# Flags from code.h
CO_OPTIMIZED = 0x0001 # use LOAD/STORE_FAST instead of _NAME
CO_NEWLOCALS = 0x0002 # only cleared for module/exec code
CO_VARARGS = 0x0004
CO_VARKEYWORDS = 0x0008
CO_NESTED = 0x0010 # ???
CO_GENERATOR = 0x0020
CO_NOFREE = 0x0040 # set if no free or cell vars
CO_GENERATOR_ALLOWED = 0x1000 # unused
# The future flags are only used on code generation, so we can ignore them.
# (It does cause some warnings, though.)
CO_FUTURE_DIVISION = 0x2000
CO_FUTURE_ABSOLUTE_IMPORT = 0x4000
CO_FUTURE_WITH_STATEMENT = 0x8000
######################################################################
# Define the Code class
class Code(object):
"""An object which holds all the information which a Python code object
holds, but in an easy-to-play-with representation.
The attributes are:
Affecting action
----------------
code - list of 2-tuples: the code
freevars - list of strings: the free vars of the code (those are names
of variables created in outer functions and used in the function)
args - list of strings: the arguments of the code
varargs - boolean: Does args end with a '*args' argument
varkwargs - boolean: Does args end with a '**kwargs' argument
newlocals - boolean: Should a new local namespace be created.
(True in functions, False for module and exec code)
Not affecting action
--------------------
name - string: the name of the code (co_name)
filename - string: the file name of the code (co_filename)
firstlineno - int: the first line number (co_firstlineno)
docstring - string or None: the docstring (the first item of co_consts,
if it's str or unicode)
code is a list of 2-tuples. The first item is an opcode, or SetLineno, or a
Label instance. The second item is the argument, if applicable, or None.
code can be a CodeList instance, which will produce nicer output when
being printed.
"""
def __init__(self, code, freevars, args, varargs, varkwargs, newlocals,
name, filename, firstlineno, docstring):
self.code = code
self.freevars = freevars
self.args = args
self.varargs = varargs
self.varkwargs = varkwargs
self.newlocals = newlocals
self.name = name
self.filename = filename
self.firstlineno = firstlineno
self.docstring = docstring
@staticmethod
def _findlinestarts(code):
"""Find the offsets in a byte code which are start of lines in the
source.
Generate pairs (offset, lineno) as described in Python/compile.c.
This is a modified version of dis.findlinestarts, which allows multiple
"line starts" with the same line number.
"""
byte_increments = [ord(c) for c in code.co_lnotab[0::2]]
line_increments = [ord(c) for c in code.co_lnotab[1::2]]
lineno = code.co_firstlineno
addr = 0
for byte_incr, line_incr in zip(byte_increments, line_increments):
if byte_incr:
yield (addr, lineno)
addr += byte_incr
lineno += line_incr
yield (addr, lineno)
@classmethod
def from_code(cls, co):
"""Disassemble a Python code object into a Code object."""
co_code = co.co_code
labels = dict((addr, Label()) for addr in findlabels(co_code))
linestarts = dict(cls._findlinestarts(co))
cellfree = co.co_cellvars + co.co_freevars
code = CodeList()
n = len(co_code)
i = 0
extended_arg = 0
while i < n:
op = Opcode(ord(co_code[i]))
if i in labels:
code.append((labels[i], None))
if i in linestarts:
code.append((SetLineno, linestarts[i]))
i += 1
if op in hascode:
lastop, lastarg = code[-1]
if lastop != LOAD_CONST:
raise ValueError(
"%s should be preceded by LOAD_CONST code" % op)
code[-1] = (LOAD_CONST, Code.from_code(lastarg))
if op not in hasarg:
code.append((op, None))
else:
arg = ord(co_code[i]) + ord(co_code[i+1])*256 + extended_arg
extended_arg = 0
i += 2
if op == opcode.EXTENDED_ARG:
extended_arg = arg << 16
elif op in hasconst:
code.append((op, co.co_consts[arg]))
elif op in hasname:
code.append((op, co.co_names[arg]))
elif op in hasjabs:
code.append((op, labels[arg]))
elif op in hasjrel:
code.append((op, labels[i + arg]))
elif op in haslocal:
code.append((op, co.co_varnames[arg]))
elif op in hascompare:
code.append((op, cmp_op[arg]))
elif op in hasfree:
code.append((op, cellfree[arg]))
else:
code.append((op, arg))
varargs = bool(co.co_flags & CO_VARARGS)
varkwargs = bool(co.co_flags & CO_VARKEYWORDS)
newlocals = bool(co.co_flags & CO_NEWLOCALS)
args = co.co_varnames[:co.co_argcount + varargs + varkwargs]
if co.co_consts and isinstance(co.co_consts[0], basestring):
docstring = co.co_consts[0]
else:
docstring = None
return cls(code = code,
freevars = co.co_freevars,
args = args,
varargs = varargs,
varkwargs = varkwargs,
newlocals = newlocals,
name = co.co_name,
filename = co.co_filename,
firstlineno = co.co_firstlineno,
docstring = docstring,
)
def __eq__(self, other):
if (self.freevars != other.freevars or
self.args != other.args or
self.varargs != other.varargs or
self.varkwargs != other.varkwargs or
self.newlocals != other.newlocals or
self.name != other.name or
self.filename != other.filename or
self.firstlineno != other.firstlineno or
self.docstring != other.docstring or
len(self.code) != len(other.code)
):
return False
# Compare code. This isn't trivial because labels should be matching,
# not equal.
labelmapping = {}
for (op1, arg1), (op2, arg2) in itertools.izip(self.code, other.code):
if isinstance(op1, Label):
if labelmapping.setdefault(op1, op2) is not op2:
return False
else:
if op1 != op2:
return False
if op1 in hasjump:
if labelmapping.setdefault(arg1, arg2) is not arg2:
return False
elif op1 in hasarg:
if arg1 != arg2:
return False
return True
def _compute_flags(self):
opcodes = set(op for op, arg in self.code if isopcode(op))
optimized = (STORE_NAME not in opcodes and
LOAD_NAME not in opcodes and
DELETE_NAME not in opcodes)
generator = (YIELD_VALUE in opcodes)
nofree = not (opcodes.intersection(hasfree))
flags = 0
if optimized: flags |= CO_OPTIMIZED
if self.newlocals: flags |= CO_NEWLOCALS
if self.varargs: flags |= CO_VARARGS
if self.varkwargs: flags |= CO_VARKEYWORDS
if generator: flags |= CO_GENERATOR
if nofree: flags |= CO_NOFREE
return flags
def _compute_stacksize(self):
"""Get a code list, compute its maximal stack usage."""
# This is done by scanning the code, and computing for each opcode
# the stack state at the opcode.
code = self.code
# A mapping from labels to their positions in the code list
label_pos = dict((op, pos)
for pos, (op, arg) in enumerate(code)
if isinstance(op, Label))
# sf_targets are the targets of SETUP_FINALLY opcodes. They are recorded
# because they have special stack behaviour. If an exception was raised
# in the block pushed by a SETUP_FINALLY opcode, the block is popped
# and 3 objects are pushed. On return or continue, the block is popped
# and 2 objects are pushed. If nothing happened, the block is popped by
# a POP_BLOCK opcode and 1 object is pushed by a (LOAD_CONST, None)
# operation.
#
# Our solution is to record the stack state of SETUP_FINALLY targets
# as having 3 objects pushed, which is the maximum. However, to make
# stack recording consistent, the get_next_stacks function will always
# yield the stack state of the target as if 1 object was pushed, but
# this will be corrected in the actual stack recording.
sf_targets = set(label_pos[arg]
for op, arg in code
if op == SETUP_FINALLY)
# What we compute - for each opcode, its stack state, as an n-tuple.
# n is the number of blocks pushed. For each block, we record the number
# of objects pushed.
stacks = [None] * len(code)
def get_next_stacks(pos, curstack):
"""Get a code position and the stack state before the operation
was done, and yield pairs (pos, curstack) for the next positions
to be explored - those are the positions to which you can get
from the given (pos, curstack).
If the given position was already explored, nothing will be yielded.
"""
op, arg = code[pos]
if isinstance(op, Label):
# We should check if we already reached a node only if it is
# a label.
if pos in sf_targets:
curstack = curstack[:-1] + (curstack[-1] + 2,)
if stacks[pos] is None:
stacks[pos] = curstack
else:
if stacks[pos] != curstack:
raise ValueError("Inconsistent code")
return
def newstack(n):
# Return a new stack, modified by adding n elements to the last
# block
if curstack[-1] + n < 0:
raise ValueError("Popped a non-existing element")
return curstack[:-1] + (curstack[-1]+n,)
if not isopcode(op):
# label or SetLineno - just continue to next line
yield pos+1, curstack
elif op in (STOP_CODE, RETURN_VALUE, RAISE_VARARGS):
# No place in particular to continue to
pass
elif op == MAKE_CLOSURE and python_version == '2.4':
# This is only relevant in Python 2.4 - in Python 2.5 the stack
# effect of MAKE_CLOSURE can be calculated from the arg.
# In Python 2.4, it depends on the number of freevars of TOS,
# which should be a code object.
if pos == 0:
raise ValueError("MAKE_CLOSURE can't be the first opcode")
lastop, lastarg = code[pos-1]
if lastop != LOAD_CONST:
raise ValueError(
"MAKE_CLOSURE should come after a LOAD_CONST op")
try:
nextrapops = len(lastarg.freevars)
except AttributeError:
try:
nextrapops = len(lastarg.co_freevars)
except AttributeError:
raise ValueError(
"MAKE_CLOSURE preceding const should "
"be a code or a Code object")
yield pos+1, newstack(-arg-nextrapops)
elif op not in hasflow:
# Simple change of stack
pop, push = getse(op, arg)
yield pos+1, newstack(push - pop)
elif op in (JUMP_FORWARD, JUMP_ABSOLUTE):
# One possibility for a jump
yield label_pos[arg], curstack
elif python_version < '2.7' and op in (JUMP_IF_FALSE, JUMP_IF_TRUE):
# Two possibilities for a jump
yield label_pos[arg], curstack
yield pos+1, curstack
elif python_version >= '2.7' and op in (POP_JUMP_IF_FALSE, POP_JUMP_IF_TRUE):
# Two possibilities for a jump
yield label_pos[arg], newstack(-1)
yield pos+1, newstack(-1)
elif python_version >= '2.7' and op in (JUMP_IF_TRUE_OR_POP, JUMP_IF_FALSE_OR_POP):
# Two possibilities for a jump
yield label_pos[arg], curstack
yield pos+1, newstack(-1)
elif op == FOR_ITER:
# FOR_ITER pushes next(TOS) on success, and pops TOS and jumps
# on failure
yield label_pos[arg], newstack(-1)
yield pos+1, newstack(1)
elif op == BREAK_LOOP:
# BREAK_LOOP jumps to a place specified on block creation, so
# it is ignored here
pass
elif op == CONTINUE_LOOP:
# CONTINUE_LOOP jumps to the beginning of a loop which should
# already ave been discovered, but we verify anyway.
# It pops a block.
if python_version == '2.6':
pos, stack = label_pos[arg], curstack[:-1]
if stacks[pos] != stack: #this could be a loop with a 'with' inside
yield pos, stack[:-1] + (stack[-1]-1,)
else:
yield pos, stack
else:
yield label_pos[arg], curstack[:-1]
elif op == SETUP_LOOP:
# We continue with a new block.
# On break, we jump to the label and return to current stack
# state.
yield label_pos[arg], curstack
yield pos+1, curstack + (0,)
elif op == SETUP_EXCEPT:
# We continue with a new block.
# On exception, we jump to the label with 3 extra objects on
# stack
yield label_pos[arg], newstack(3)
yield pos+1, curstack + (0,)
elif op == SETUP_FINALLY:
# We continue with a new block.
# On exception, we jump to the label with 3 extra objects on
# stack, but to keep stack recording consistent, we behave as
# if we add only 1 object. Extra 2 will be added to the actual
# recording.
yield label_pos[arg], newstack(1)
yield pos+1, curstack + (0,)
elif python_version == '2.7' and op == SETUP_WITH:
yield label_pos[arg], curstack
yield pos+1, newstack(-1) + (1,)
elif op == POP_BLOCK:
# Just pop the block
yield pos+1, curstack[:-1]
elif op == END_FINALLY:
# Since stack recording of SETUP_FINALLY targets is of 3 pushed
# objects (as when an exception is raised), we pop 3 objects.
yield pos+1, newstack(-3)
elif op == WITH_CLEANUP:
# Since WITH_CLEANUP is always found after SETUP_FINALLY
# targets, and the stack recording is that of a raised
# exception, we can simply pop 1 object and let END_FINALLY
# pop the remaining 3.
if python_version == '2.7':
yield pos+1, newstack(2)
else:
yield pos+1, newstack(-1)
else:
assert False, "Unhandled opcode: %r" % op
# Now comes the calculation: open_positions holds positions which are
# yet to be explored. In each step we take one open position, and
# explore it by adding the positions to which you can get from it, to
# open_positions. On the way, we update maxsize.
# open_positions is a list of tuples: (pos, stack state)
maxsize = 0
open_positions = [(0, (0,))]
while open_positions:
pos, curstack = open_positions.pop()
maxsize = max(maxsize, sum(curstack))
open_positions.extend(get_next_stacks(pos, curstack))
return maxsize
def to_code(self):
"""Assemble a Python code object from a Code object."""
co_argcount = len(self.args) - self.varargs - self.varkwargs
co_stacksize = self._compute_stacksize()
co_flags = self._compute_flags()
co_consts = [self.docstring]
co_names = []
co_varnames = list(self.args)
co_freevars = tuple(self.freevars)
# We find all cellvars beforehand, for two reasons:
# 1. We need the number of them to construct the numeric argument
# for ops in "hasfree".
# 2. We need to put arguments which are cell vars in the beginning
# of co_cellvars
cellvars = set(arg for op, arg in self.code
if isopcode(op) and op in hasfree
and arg not in co_freevars)
co_cellvars = [x for x in self.args if x in cellvars]
def index(seq, item, eq=operator.eq, can_append=True):
"""Find the index of item in a sequence and return it.
If it is not found in the sequence, and can_append is True,
it is appended to the sequence.
eq is the equality operator to use.
"""
for i, x in enumerate(seq):
if eq(x, item):
return i
else:
if can_append:
seq.append(item)
return len(seq) - 1
else:
raise IndexError("Item not found")
# List of tuples (pos, label) to be filled later
jumps = []
# A mapping from a label to its position
label_pos = {}
# Last SetLineno
lastlineno = self.firstlineno
lastlinepos = 0
co_code = array('B')
co_lnotab = array('B')
for i, (op, arg) in enumerate(self.code):
if isinstance(op, Label):
label_pos[op] = len(co_code)
elif op is SetLineno:
incr_lineno = arg - lastlineno
incr_pos = len(co_code) - lastlinepos
lastlineno = arg
lastlinepos = len(co_code)
if incr_lineno == 0 and incr_pos == 0:
co_lnotab.append(0)
co_lnotab.append(0)
else:
while incr_pos > 255:
co_lnotab.append(255)
co_lnotab.append(0)
incr_pos -= 255
while incr_lineno > 255:
co_lnotab.append(incr_pos)
co_lnotab.append(255)
incr_pos = 0
incr_lineno -= 255
if incr_pos or incr_lineno:
co_lnotab.append(incr_pos)
co_lnotab.append(incr_lineno)
elif op == opcode.EXTENDED_ARG:
raise ValueError("EXTENDED_ARG not supported in Code objects")
elif not op in hasarg:
co_code.append(op)
else:
if op in hasconst:
if isinstance(arg, Code) and i < len(self.code)-1 and \
self.code[i+1][0] in hascode:
arg = arg.to_code()
arg = index(co_consts, arg, operator.is_)
elif op in hasname:
arg = index(co_names, arg)
elif op in hasjump:
# arg will be filled later
jumps.append((len(co_code), arg))
arg = 0
elif op in haslocal:
arg = index(co_varnames, arg)
elif op in hascompare:
arg = index(cmp_op, arg, can_append=False)
elif op in hasfree:
try:
arg = index(co_freevars, arg, can_append=False) \
+ len(cellvars)
except IndexError:
arg = index(co_cellvars, arg)
else:
# arg is ok
pass
if arg > 0xFFFF:
co_code.append(opcode.EXTENDED_ARG)
co_code.append((arg >> 16) & 0xFF)
co_code.append((arg >> 24) & 0xFF)
co_code.append(op)
co_code.append(arg & 0xFF)
co_code.append((arg >> 8) & 0xFF)
for pos, label in jumps:
jump = label_pos[label]
if co_code[pos] in hasjrel:
jump -= pos+3
if jump > 0xFFFF:
raise NotImplementedError("Extended jumps not implemented")
co_code[pos+1] = jump & 0xFF
co_code[pos+2] = (jump >> 8) & 0xFF
co_code = co_code.tostring()
co_lnotab = co_lnotab.tostring()
co_consts = tuple(co_consts)
co_names = tuple(co_names)
co_varnames = tuple(co_varnames)
co_nlocals = len(co_varnames)
co_cellvars = tuple(co_cellvars)
return types.CodeType(co_argcount, co_nlocals, co_stacksize, co_flags,
co_code, co_consts, co_names, co_varnames,
self.filename, self.name, self.firstlineno, co_lnotab,
co_freevars, co_cellvars)
def printcodelist(codelist, to=sys.stdout):
"""Get a code list. Print it nicely."""
labeldict = {}
pendinglabels = []
for i, (op, arg) in enumerate(codelist):
if isinstance(op, Label):
pendinglabels.append(op)
elif op is SetLineno:
pass
else:
while pendinglabels:
labeldict[pendinglabels.pop()] = i
lineno = None
islabel = False
for i, (op, arg) in enumerate(codelist):
if op is SetLineno:
lineno = arg
print >> to
continue
if isinstance(op, Label):
islabel = True
continue
if lineno is None:
linenostr = ''
else:
linenostr = str(lineno)
lineno = None
if islabel:
islabelstr = '>>'
islabel = False
else:
islabelstr = ''
if op in hasconst:
argstr = repr(arg)
elif op in hasjump:
try:
argstr = 'to ' + str(labeldict[arg])
except KeyError:
argstr = repr(arg)
elif op in hasarg:
argstr = str(arg)
else:
argstr = ''
print >> to, '%3s %2s %4d %-20s %s' % (
linenostr,
islabelstr,
i,
op,
argstr)
def recompile(filename):
"""Create a .pyc by disassembling the file and assembling it again, printing
a message that the reassembled file was loaded."""
# Most of the code here based on the compile.py module.
import os
import imp
import marshal
import struct
f = open(filename, 'U')
try:
timestamp = long(os.fstat(f.fileno()).st_mtime)
except AttributeError:
timestamp = long(os.stat(filename).st_mtime)
codestring = f.read()
f.close()
if codestring and codestring[-1] != '\n':
codestring = codestring + '\n'
try:
codeobject = compile(codestring, filename, 'exec')
except SyntaxError:
print >> sys.stderr, "Skipping %s - syntax error." % filename
return
cod = Code.from_code(codeobject)
message = "reassembled %r imported.\n" % filename
cod.code[:0] = [ # __import__('sys').stderr.write(message)
(LOAD_GLOBAL, '__import__'),
(LOAD_CONST, 'sys'),
(CALL_FUNCTION, 1),
(LOAD_ATTR, 'stderr'),
(LOAD_ATTR, 'write'),
(LOAD_CONST, message),
(CALL_FUNCTION, 1),
(POP_TOP, None),
]
codeobject2 = cod.to_code()
fc = open(filename+'c', 'wb')
fc.write('\0\0\0\0')
fc.write(struct.pack('<l', timestamp))
marshal.dump(codeobject2, fc)
fc.flush()
fc.seek(0, 0)
fc.write(imp.get_magic())
fc.close()
def recompile_all(path):
"""recursively recompile all .py files in the directory"""
import os
if os.path.isdir(path):
for root, dirs, files in os.walk(path):
for name in files:
if name.endswith('.py'):
filename = os.path.abspath(os.path.join(root, name))
print >> sys.stderr, filename
recompile(filename)
else:
filename = os.path.abspath(path)
recompile(filename)
def main():
import os
if len(sys.argv) != 2 or not os.path.exists(sys.argv[1]):
print("""\
Usage: %s dir
Search recursively for *.py in the given directory, disassemble and assemble
them, adding a note when each file is imported.
Use it to test byteplay like this:
> byteplay.py Lib
> make test
Some FutureWarnings may be raised, but that's expected.
Tip: before doing this, check to see which tests fail even without reassembling
them...
""" % sys.argv[0])
sys.exit(1)
recompile_all(sys.argv[1])
if __name__ == '__main__':
main()
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Create threads to run multiple enqueue ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import weakref
from tensorflow.core.protobuf import queue_runner_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
class QueueRunner(object):
"""Holds a list of enqueue operations for a queue, each to be run in a thread.
Queues are a convenient TensorFlow mechanism to compute tensors
asynchronously using multiple threads. For example in the canonical 'Input
Reader' setup one set of threads generates filenames in a queue; a second set
of threads read records from the files, processes them, and enqueues tensors
on a second queue; a third set of threads dequeues these input records to
construct batches and runs them through training operations.
There are several delicate issues when running multiple threads that way:
closing the queues in sequence as the input is exhausted, correctly catching
and reporting exceptions, etc.
The `QueueRunner`, combined with the `Coordinator`, helps handle these issues.
"""
def __init__(self, queue=None, enqueue_ops=None, close_op=None,
cancel_op=None, queue_closed_exception_types=None,
queue_runner_def=None, import_scope=None):
"""Create a QueueRunner.
On construction the `QueueRunner` adds an op to close the queue. That op
will be run if the enqueue ops raise exceptions.
When you later call the `create_threads()` method, the `QueueRunner` will
create one thread for each op in `enqueue_ops`. Each thread will run its
enqueue op in parallel with the other threads. The enqueue ops do not have
to all be the same op, but it is expected that they all enqueue tensors in
`queue`.
Args:
queue: A `Queue`.
enqueue_ops: List of enqueue ops to run in threads later.
close_op: Op to close the queue. Pending enqueue ops are preserved.
cancel_op: Op to close the queue and cancel pending enqueue ops.
queue_closed_exception_types: Optional tuple of Exception types that
indicate that the queue has been closed when raised during an enqueue
operation. Defaults to `(tf.errors.OutOfRangeError,)`. Another common
case includes `(tf.errors.OutOfRangeError, tf.errors.CancelledError)`,
when some of the enqueue ops may dequeue from other Queues.
queue_runner_def: Optional `QueueRunnerDef` protocol buffer. If specified,
recreates the QueueRunner from its contents. `queue_runner_def` and the
other arguments are mutually exclusive.
import_scope: Optional `string`. Name scope to add. Only used when
initializing from protocol buffer.
Raises:
ValueError: If both `queue_runner_def` and `queue` are both specified.
ValueError: If `queue` or `enqueue_ops` are not provided when not
restoring from `queue_runner_def`.
"""
if queue_runner_def:
if queue or enqueue_ops:
raise ValueError("queue_runner_def and queue are mutually exclusive.")
self._init_from_proto(queue_runner_def,
import_scope=import_scope)
else:
self._init_from_args(
queue=queue, enqueue_ops=enqueue_ops,
close_op=close_op, cancel_op=cancel_op,
queue_closed_exception_types=queue_closed_exception_types)
# Protect the count of runs to wait for.
self._lock = threading.Lock()
# A map from a session object to the number of outstanding queue runner
# threads for that session.
self._runs_per_session = weakref.WeakKeyDictionary()
# List of exceptions raised by the running threads.
self._exceptions_raised = []
def _init_from_args(self, queue=None, enqueue_ops=None, close_op=None,
cancel_op=None, queue_closed_exception_types=None):
"""Create a QueueRunner from arguments.
Args:
queue: A `Queue`.
enqueue_ops: List of enqueue ops to run in threads later.
close_op: Op to close the queue. Pending enqueue ops are preserved.
cancel_op: Op to close the queue and cancel pending enqueue ops.
queue_closed_exception_types: Tuple of exception types, which indicate
the queue has been safely closed.
Raises:
ValueError: If `queue` or `enqueue_ops` are not provided when not
restoring from `queue_runner_def`.
TypeError: If `queue_closed_exception_types` is provided, but is not
a non-empty tuple of error types (subclasses of `tf.errors.OpError`).
"""
if not queue or not enqueue_ops:
raise ValueError("Must provide queue and enqueue_ops.")
self._queue = queue
self._enqueue_ops = enqueue_ops
self._close_op = close_op
self._cancel_op = cancel_op
if queue_closed_exception_types is not None:
if (not isinstance(queue_closed_exception_types, tuple)
or not queue_closed_exception_types
or not all(issubclass(t, errors.OpError)
for t in queue_closed_exception_types)):
raise TypeError(
"queue_closed_exception_types, when provided, "
"must be a tuple of tf.error types, but saw: %s"
% queue_closed_exception_types)
self._queue_closed_exception_types = queue_closed_exception_types
# Close when no more will be produced, but pending enqueues should be
# preserved.
if self._close_op is None:
self._close_op = self._queue.close()
# Close and cancel pending enqueues since there was an error and we want
# to unblock everything so we can cleanly exit.
if self._cancel_op is None:
self._cancel_op = self._queue.close(cancel_pending_enqueues=True)
if not self._queue_closed_exception_types:
self._queue_closed_exception_types = (errors.OutOfRangeError,)
else:
self._queue_closed_exception_types = tuple(
self._queue_closed_exception_types)
def _init_from_proto(self, queue_runner_def, import_scope=None):
"""Create a QueueRunner from `QueueRunnerDef`.
Args:
queue_runner_def: Optional `QueueRunnerDef` protocol buffer.
import_scope: Optional `string`. Name scope to add.
"""
assert isinstance(queue_runner_def, queue_runner_pb2.QueueRunnerDef)
g = ops.get_default_graph()
self._queue = g.as_graph_element(
ops.prepend_name_scope(queue_runner_def.queue_name, import_scope))
self._enqueue_ops = [g.as_graph_element(
ops.prepend_name_scope(op, import_scope))
for op in queue_runner_def.enqueue_op_name]
self._close_op = g.as_graph_element(ops.prepend_name_scope(
queue_runner_def.close_op_name, import_scope))
self._cancel_op = g.as_graph_element(ops.prepend_name_scope(
queue_runner_def.cancel_op_name, import_scope))
self._queue_closed_exception_types = tuple(
errors.exception_type_from_error_code(code)
for code in queue_runner_def.queue_closed_exception_types)
# Legacy support for old QueueRunnerDefs created before this field
# was added.
if not self._queue_closed_exception_types:
self._queue_closed_exception_types = (errors.OutOfRangeError,)
@property
def queue(self):
return self._queue
@property
def enqueue_ops(self):
return self._enqueue_ops
@property
def close_op(self):
return self._close_op
@property
def cancel_op(self):
return self._cancel_op
@property
def queue_closed_exception_types(self):
return self._queue_closed_exception_types
@property
def exceptions_raised(self):
"""Exceptions raised but not handled by the `QueueRunner` threads.
Exceptions raised in queue runner threads are handled in one of two ways
depending on whether or not a `Coordinator` was passed to
`create_threads()`:
* With a `Coordinator`, exceptions are reported to the coordinator and
forgotten by the `QueueRunner`.
* Without a `Coordinator`, exceptions are captured by the `QueueRunner` and
made available in this `exceptions_raised` property.
Returns:
A list of Python `Exception` objects. The list is empty if no exception
was captured. (No exceptions are captured when using a Coordinator.)
"""
return self._exceptions_raised
@property
def name(self):
"""The string name of the underlying Queue."""
return self._queue.name
# pylint: disable=broad-except
def _run(self, sess, enqueue_op, coord=None):
"""Execute the enqueue op in a loop, close the queue in case of error.
Args:
sess: A Session.
enqueue_op: The Operation to run.
coord: Optional Coordinator object for reporting errors and checking
for stop conditions.
"""
decremented = False
try:
# Make a cached callable from the `enqueue_op` to decrease the
# Python overhead in the queue-runner loop.
enqueue_callable = sess.make_callable(enqueue_op)
while True:
if coord and coord.should_stop():
break
try:
enqueue_callable()
except self._queue_closed_exception_types: # pylint: disable=catching-non-exception
# This exception indicates that a queue was closed.
with self._lock:
self._runs_per_session[sess] -= 1
decremented = True
if self._runs_per_session[sess] == 0:
try:
sess.run(self._close_op)
except Exception as e:
# Intentionally ignore errors from close_op.
logging.vlog(1, "Ignored exception: %s", str(e))
return
except Exception as e:
# This catches all other exceptions.
if coord:
coord.request_stop(e)
else:
logging.error("Exception in QueueRunner: %s", str(e))
with self._lock:
self._exceptions_raised.append(e)
raise
finally:
# Make sure we account for all terminations: normal or errors.
if not decremented:
with self._lock:
self._runs_per_session[sess] -= 1
def _close_on_stop(self, sess, cancel_op, coord):
"""Close the queue when the Coordinator requests stop.
Args:
sess: A Session.
cancel_op: The Operation to run.
coord: Coordinator.
"""
coord.wait_for_stop()
try:
sess.run(cancel_op)
except Exception as e:
# Intentionally ignore errors from cancel_op.
logging.vlog(1, "Ignored exception: %s", str(e))
# pylint: enable=broad-except
def create_threads(self, sess, coord=None, daemon=False, start=False):
"""Create threads to run the enqueue ops for the given session.
This method requires a session in which the graph was launched. It creates
a list of threads, optionally starting them. There is one thread for each
op passed in `enqueue_ops`.
The `coord` argument is an optional coordinator that the threads will use
to terminate together and report exceptions. If a coordinator is given,
this method starts an additional thread to close the queue when the
coordinator requests a stop.
If previously created threads for the given session are still running, no
new threads will be created.
Args:
sess: A `Session`.
coord: Optional `Coordinator` object for reporting errors and checking
stop conditions.
daemon: Boolean. If `True` make the threads daemon threads.
start: Boolean. If `True` starts the threads. If `False` the
caller must call the `start()` method of the returned threads.
Returns:
A list of threads.
"""
with self._lock:
try:
if self._runs_per_session[sess] > 0:
# Already started: no new threads to return.
return []
except KeyError:
# We haven't seen this session yet.
pass
self._runs_per_session[sess] = len(self._enqueue_ops)
self._exceptions_raised = []
ret_threads = []
for op in self._enqueue_ops:
name = "QueueRunnerThread-{}-{}".format(self.name, op.name)
ret_threads.append(threading.Thread(target=self._run,
args=(sess, op, coord),
name=name))
if coord:
name = "QueueRunnerThread-{}-close_on_stop".format(self.name)
ret_threads.append(threading.Thread(target=self._close_on_stop,
args=(sess, self._cancel_op, coord),
name=name))
for t in ret_threads:
if coord:
coord.register_thread(t)
if daemon:
t.daemon = True
if start:
t.start()
return ret_threads
def to_proto(self, export_scope=None):
"""Converts this `QueueRunner` to a `QueueRunnerDef` protocol buffer.
Args:
export_scope: Optional `string`. Name scope to remove.
Returns:
A `QueueRunnerDef` protocol buffer, or `None` if the `Variable` is not in
the specified name scope.
"""
if (export_scope is None or
self.queue.name.startswith(export_scope)):
queue_runner_def = queue_runner_pb2.QueueRunnerDef()
queue_runner_def.queue_name = ops.strip_name_scope(
self.queue.name, export_scope)
for enqueue_op in self.enqueue_ops:
queue_runner_def.enqueue_op_name.append(
ops.strip_name_scope(enqueue_op.name, export_scope))
queue_runner_def.close_op_name = ops.strip_name_scope(
self.close_op.name, export_scope)
queue_runner_def.cancel_op_name = ops.strip_name_scope(
self.cancel_op.name, export_scope)
queue_runner_def.queue_closed_exception_types.extend([
errors.error_code_from_exception_type(cls)
for cls in self._queue_closed_exception_types])
return queue_runner_def
else:
return None
@staticmethod
def from_proto(queue_runner_def, import_scope=None):
"""Returns a `QueueRunner` object created from `queue_runner_def`."""
return QueueRunner(queue_runner_def=queue_runner_def,
import_scope=import_scope)
def add_queue_runner(qr, collection=ops.GraphKeys.QUEUE_RUNNERS):
"""Adds a `QueueRunner` to a collection in the graph.
When building a complex model that uses many queues it is often difficult to
gather all the queue runners that need to be run. This convenience function
allows you to add a queue runner to a well known collection in the graph.
The companion method `start_queue_runners()` can be used to start threads for
all the collected queue runners.
Args:
qr: A `QueueRunner`.
collection: A `GraphKey` specifying the graph collection to add
the queue runner to. Defaults to `GraphKeys.QUEUE_RUNNERS`.
"""
ops.add_to_collection(collection, qr)
def start_queue_runners(sess=None, coord=None, daemon=True, start=True,
collection=ops.GraphKeys.QUEUE_RUNNERS):
"""Starts all queue runners collected in the graph.
This is a companion method to `add_queue_runner()`. It just starts
threads for all queue runners collected in the graph. It returns
the list of all threads.
Args:
sess: `Session` used to run the queue ops. Defaults to the
default session.
coord: Optional `Coordinator` for coordinating the started threads.
daemon: Whether the threads should be marked as `daemons`, meaning
they don't block program exit.
start: Set to `False` to only create the threads, not start them.
collection: A `GraphKey` specifying the graph collection to
get the queue runners from. Defaults to `GraphKeys.QUEUE_RUNNERS`.
Raises:
ValueError: if `sess` is None and there isn't any default session.
TypeError: if `sess` is not a `tf.Session` object.
Returns:
A list of threads.
"""
if sess is None:
sess = ops.get_default_session()
if not sess:
raise ValueError("Cannot start queue runners: No default session is "
"registered. Use `with sess.as_default()` or pass an "
"explicit session to tf.start_queue_runners(sess=sess)")
if not isinstance(sess, session.SessionInterface):
# Following check is due to backward compatibility. (b/62061352)
if sess.__class__.__name__ in [
"MonitoredSession", "SingularMonitoredSession"]:
return []
raise TypeError("sess must be a `tf.Session` object. "
"Given class: {}".format(sess.__class__))
with sess.graph.as_default():
threads = []
for qr in ops.get_collection(collection):
threads.extend(qr.create_threads(sess, coord=coord, daemon=daemon,
start=start))
return threads
ops.register_proto_function(ops.GraphKeys.QUEUE_RUNNERS,
proto_type=queue_runner_pb2.QueueRunnerDef,
to_proto=QueueRunner.to_proto,
from_proto=QueueRunner.from_proto)
|
|
"""The ``lxml.isoschematron`` package implements ISO Schematron support on top
of the pure-xslt 'skeleton' implementation.
"""
import sys
import os.path
from lxml import etree as _etree # due to validator __init__ signature
# some compat stuff, borrowed from lxml.html
try:
bytes = __builtins__["bytes"]
except (KeyError, NameError):
# Python < 2.6
bytes = str
try:
unicode = __builtins__["unicode"]
except (KeyError, NameError):
# Python 3
unicode = str
try:
basestring = __builtins__["basestring"]
except (KeyError, NameError):
# Python 3
basestring = str
__all__ = ['extract_xsd', 'extract_rng', 'iso_dsdl_include',
'iso_abstract_expand', 'iso_svrl_for_xslt1',
'svrl_validation_errors', 'schematron_schema_valid',
'stylesheet_params', 'Schematron']
# some namespaces
#FIXME: Maybe lxml should provide a dedicated place for common namespace
#FIXME: definitions?
XML_SCHEMA_NS = "http://www.w3.org/2001/XMLSchema"
RELAXNG_NS = "http://relaxng.org/ns/structure/1.0"
SCHEMATRON_NS = "http://purl.oclc.org/dsdl/schematron"
SVRL_NS = "http://purl.oclc.org/dsdl/svrl"
# some helpers
_schematron_root = '{%s}schema' % SCHEMATRON_NS
_xml_schema_root = '{%s}schema' % XML_SCHEMA_NS
_resources_dir = os.path.join(os.path.dirname(__file__), 'resources')
# the iso-schematron skeleton implementation steps aka xsl transformations
extract_xsd = _etree.XSLT(_etree.parse(
os.path.join(_resources_dir, 'xsl', 'XSD2Schtrn.xsl')))
extract_rng = _etree.XSLT(_etree.parse(
os.path.join(_resources_dir, 'xsl', 'RNG2Schtrn.xsl')))
iso_dsdl_include = _etree.XSLT(_etree.parse(
os.path.join(_resources_dir, 'xsl', 'iso-schematron-xslt1',
'iso_dsdl_include.xsl')))
iso_abstract_expand = _etree.XSLT(_etree.parse(
os.path.join(_resources_dir, 'xsl', 'iso-schematron-xslt1',
'iso_abstract_expand.xsl')))
iso_svrl_for_xslt1 = _etree.XSLT(_etree.parse(
os.path.join(_resources_dir,
'xsl', 'iso-schematron-xslt1', 'iso_svrl_for_xslt1.xsl')))
# svrl result accessors
svrl_validation_errors = _etree.XPath(
'//svrl:failed-assert', namespaces={'svrl': SVRL_NS})
# RelaxNG validator for schematron schemas
schematron_schema_valid = _etree.RelaxNG(_etree.parse(
os.path.join(_resources_dir, 'rng', 'iso-schematron.rng')))
def stylesheet_params(**kwargs):
"""Convert keyword args to a dictionary of stylesheet parameters.
XSL stylesheet parameters must be XPath expressions, i.e.:
* string expressions, like "'5'"
* simple (number) expressions, like "5"
* valid XPath expressions, like "/a/b/text()"
This function converts native Python keyword arguments to stylesheet
parameters following these rules:
If an arg is a string wrap it with XSLT.strparam().
If an arg is an XPath object use its path string.
If arg is None raise TypeError.
Else convert arg to string.
"""
result = {}
for key, val in kwargs.items():
if isinstance(val, basestring):
val = _etree.XSLT.strparam(val)
elif val is None:
raise TypeError('None not allowed as a stylesheet parameter')
elif not isinstance(val, _etree.XPath):
val = unicode(val)
result[key] = val
return result
# helper function for use in Schematron __init__
def _stylesheet_param_dict(paramsDict, kwargsDict):
"""Return a copy of paramsDict, updated with kwargsDict entries, wrapped as
stylesheet arguments.
kwargsDict entries with a value of None are ignored.
"""
# beware of changing mutable default arg
paramsDict = dict(paramsDict)
for k, v in kwargsDict.items():
if v is not None: # None values do not override
paramsDict[k] = v
paramsDict = stylesheet_params(**paramsDict)
return paramsDict
class Schematron(_etree._Validator):
"""An ISO Schematron validator.
Pass a root Element or an ElementTree to turn it into a validator.
Alternatively, pass a filename as keyword argument 'file' to parse from
the file system.
Built on the Schematron language 'reference' skeleton pure-xslt
implementation, the validator is created as an XSLT 1.0 stylesheet using
these steps:
0) (Extract from XML Schema or RelaxNG schema)
1) Process inclusions
2) Process abstract patterns
3) Compile the schematron schema to XSLT
The ``include`` and ``expand`` keyword arguments can be used to switch off
steps 1) and 2).
To set parameters for steps 1), 2) and 3) hand parameter dictionaries to the
keyword arguments ``include_params``, ``expand_params`` or
``compile_params``.
For convenience, the compile-step parameter ``phase`` is also exposed as a
keyword argument ``phase``. This takes precedence if the parameter is also
given in the parameter dictionary.
If ``store_schematron`` is set to True, the (included-and-expanded)
schematron document tree is stored and available through the ``schematron``
property.
If ``store_xslt`` is set to True, the validation XSLT document tree will be
stored and can be retrieved through the ``validator_xslt`` property.
With ``store_report`` set to True (default: False), the resulting validation
report document gets stored and can be accessed as the ``validation_report``
property.
Schematron is a less well known, but very powerful schema language. The main
idea is to use the capabilities of XPath to put restrictions on the structure
and the content of XML documents. Here is a simple example::
>>> from lxml import isoschematron
>>> schematron = isoschematron.Schematron(etree.XML('''
... <schema xmlns="http://purl.oclc.org/dsdl/schematron" >
... <pattern id="id_only_attribute">
... <title>id is the only permitted attribute name</title>
... <rule context="*">
... <report test="@*[not(name()='id')]">Attribute
... <name path="@*[not(name()='id')]"/> is forbidden<name/>
... </report>
... </rule>
... </pattern>
... </schema>
... '''))
>>> xml = etree.XML('''
... <AAA name="aaa">
... <BBB id="bbb"/>
... <CCC color="ccc"/>
... </AAA>
... ''')
>>> schematron.validate(xml)
0
>>> xml = etree.XML('''
... <AAA id="aaa">
... <BBB id="bbb"/>
... <CCC/>
... </AAA>
... ''')
>>> schematron.validate(xml)
1
"""
# libxml2 error categorization for validation errors
_domain = _etree.ErrorDomains.SCHEMATRONV
_level = _etree.ErrorLevels.ERROR
_error_type = _etree.ErrorTypes.SCHEMATRONV_ASSERT
def _extract(self, element):
"""Extract embedded schematron schema from non-schematron host schema.
This method will only be called by __init__ if the given schema document
is not a schematron schema by itself.
Must return a schematron schema document tree or None.
"""
schematron = None
if element.tag == _xml_schema_root:
schematron = self._extract_xsd(element)
elif element.nsmap[element.prefix] == RELAXNG_NS:
# RelaxNG does not have a single unique root element
schematron = self._extract_rng(element)
return schematron
# customization points
# etree.XSLT objects that provide the extract, include, expand, compile
# steps
_extract_xsd = extract_xsd
_extract_rng = extract_rng
_include = iso_dsdl_include
_expand = iso_abstract_expand
_compile = iso_svrl_for_xslt1
# etree.XPath object that determines input document validity when applied to
# the svrl result report; must return a list of result elements (empty if
# valid)
_validation_errors = svrl_validation_errors
def __init__(self, etree=None, file=None, include=True, expand=True,
include_params={}, expand_params={}, compile_params={},
store_schematron=False, store_xslt=False, store_report=False,
phase=None):
super(Schematron, self).__init__()
self._store_report = store_report
self._schematron = None
self._validator_xslt = None
self._validation_report = None
# parse schema document, may be a schematron schema or an XML Schema or
# a RelaxNG schema with embedded schematron rules
try:
if etree is not None:
if isinstance(etree, _etree._Element):
root = etree
else:
root = etree.getroot()
elif file is not None:
root = _etree.parse(file).getroot()
except Exception:
raise _etree.SchematronParseError(
"No tree or file given: %s" % sys.exc_info()[1])
if root is None:
raise ValueError("Empty tree")
if root.tag == _schematron_root:
schematron = root
else:
schematron = self._extract(root)
if schematron is None:
raise _etree.SchematronParseError(
"Document is not a schematron schema or schematron-extractable")
# perform the iso-schematron skeleton implementation steps to get a
# validating xslt
if include:
schematron = self._include(schematron, **include_params)
if expand:
schematron = self._expand(schematron, **expand_params)
if not schematron_schema_valid(schematron):
raise _etree.SchematronParseError(
"invalid schematron schema: %s" %
schematron_schema_valid.error_log)
if store_schematron:
self._schematron = schematron
# add new compile keyword args here if exposing them
compile_kwargs = {'phase': phase}
compile_params = _stylesheet_param_dict(compile_params, compile_kwargs)
validator_xslt = self._compile(schematron, **compile_params)
if store_xslt:
self._validator_xslt = validator_xslt
self._validator = _etree.XSLT(validator_xslt)
def __call__(self, etree):
"""Validate doc using Schematron.
Returns true if document is valid, false if not.
"""
self._clear_error_log()
result = self._validator(etree)
if self._store_report:
self._validation_report = result
errors = self._validation_errors(result)
if errors:
if isinstance(etree, _etree._Element):
fname = etree.getroottree().docinfo.URL or '<file>'
else:
fname = etree.docinfo.URL or '<file>'
for error in errors:
# Does svrl report the line number, anywhere? Don't think so.
self._append_log_message(
domain=self._domain, type=self._error_type,
level=self._level, line=0, message=_etree.tounicode(error),
filename=fname)
return False
return True
def schematron(self):
"""ISO-schematron schema document (None if object has been initialized
with store_schematron=False).
"""
return self._schematron
schematron = property(schematron, doc=schematron.__doc__)
def validator_xslt(self):
"""ISO-schematron skeleton implementation XSLT validator document (None
if object has been initialized with store_xslt=False).
"""
return self._validator_xslt
validator_xslt = property(validator_xslt, doc=validator_xslt.__doc__)
def validation_report(self):
"""ISO-schematron validation result report (None if result-storing has
been turned off).
"""
return self._validation_report
validation_report = property(validation_report, doc=validation_report.__doc__)
|
|
import logging
import sys
from contextlib import contextmanager
from sqlalchemy import MetaData, Table, Column, String, literal_column,\
PrimaryKeyConstraint
from sqlalchemy.engine.strategies import MockEngineStrategy
from sqlalchemy.engine import url as sqla_url
from sqlalchemy.engine import Connection
from ..util.compat import callable, EncodedIO
from .. import ddl, util
log = logging.getLogger(__name__)
class MigrationContext(object):
"""Represent the database state made available to a migration
script.
:class:`.MigrationContext` is the front end to an actual
database connection, or alternatively a string output
stream given a particular database dialect,
from an Alembic perspective.
When inside the ``env.py`` script, the :class:`.MigrationContext`
is available via the
:meth:`.EnvironmentContext.get_context` method,
which is available at ``alembic.context``::
# from within env.py script
from alembic import context
migration_context = context.get_context()
For usage outside of an ``env.py`` script, such as for
utility routines that want to check the current version
in the database, the :meth:`.MigrationContext.configure`
method to create new :class:`.MigrationContext` objects.
For example, to get at the current revision in the
database using :meth:`.MigrationContext.get_current_revision`::
# in any application, outside of an env.py script
from alembic.migration import MigrationContext
from sqlalchemy import create_engine
engine = create_engine("postgresql://mydatabase")
conn = engine.connect()
context = MigrationContext.configure(conn)
current_rev = context.get_current_revision()
The above context can also be used to produce
Alembic migration operations with an :class:`.Operations`
instance::
# in any application, outside of the normal Alembic environment
from alembic.operations import Operations
op = Operations(context)
op.alter_column("mytable", "somecolumn", nullable=True)
"""
def __init__(self, dialect, connection, opts, environment_context=None):
self.environment_context = environment_context
self.opts = opts
self.dialect = dialect
self.script = opts.get('script')
as_sql = opts.get('as_sql', False)
transactional_ddl = opts.get("transactional_ddl")
self._transaction_per_migration = opts.get(
"transaction_per_migration", False)
self.on_version_apply_callbacks = opts.get('on_version_apply', ())
if as_sql:
self.connection = self._stdout_connection(connection)
assert self.connection is not None
else:
self.connection = connection
self._migrations_fn = opts.get('fn')
self.as_sql = as_sql
if "output_encoding" in opts:
self.output_buffer = EncodedIO(
opts.get("output_buffer") or sys.stdout,
opts['output_encoding']
)
else:
self.output_buffer = opts.get("output_buffer", sys.stdout)
self._user_compare_type = opts.get('compare_type', False)
self._user_compare_server_default = opts.get(
'compare_server_default',
False)
self.version_table = version_table = opts.get(
'version_table', 'alembic_version')
self.version_table_schema = version_table_schema = \
opts.get('version_table_schema', None)
self._version = Table(
version_table, MetaData(),
Column('version_num', String(32), nullable=False),
schema=version_table_schema)
if opts.get("version_table_pk", True):
self._version.append_constraint(
PrimaryKeyConstraint(
'version_num', name="%s_pkc" % version_table
)
)
self._start_from_rev = opts.get("starting_rev")
self.impl = ddl.DefaultImpl.get_by_dialect(dialect)(
dialect, self.connection, self.as_sql,
transactional_ddl,
self.output_buffer,
opts
)
log.info("Context impl %s.", self.impl.__class__.__name__)
if self.as_sql:
log.info("Generating static SQL")
log.info("Will assume %s DDL.",
"transactional" if self.impl.transactional_ddl
else "non-transactional")
@classmethod
def configure(cls,
connection=None,
url=None,
dialect_name=None,
dialect=None,
environment_context=None,
opts=None,
):
"""Create a new :class:`.MigrationContext`.
This is a factory method usually called
by :meth:`.EnvironmentContext.configure`.
:param connection: a :class:`~sqlalchemy.engine.Connection`
to use for SQL execution in "online" mode. When present,
is also used to determine the type of dialect in use.
:param url: a string database url, or a
:class:`sqlalchemy.engine.url.URL` object.
The type of dialect to be used will be derived from this if
``connection`` is not passed.
:param dialect_name: string name of a dialect, such as
"postgresql", "mssql", etc. The type of dialect to be used will be
derived from this if ``connection`` and ``url`` are not passed.
:param opts: dictionary of options. Most other options
accepted by :meth:`.EnvironmentContext.configure` are passed via
this dictionary.
"""
if opts is None:
opts = {}
if connection:
if not isinstance(connection, Connection):
util.warn(
"'connection' argument to configure() is expected "
"to be a sqlalchemy.engine.Connection instance, "
"got %r" % connection)
dialect = connection.dialect
elif url:
url = sqla_url.make_url(url)
dialect = url.get_dialect()()
elif dialect_name:
url = sqla_url.make_url("%s://" % dialect_name)
dialect = url.get_dialect()()
elif not dialect:
raise Exception("Connection, url, or dialect_name is required.")
return MigrationContext(dialect, connection, opts, environment_context)
def begin_transaction(self, _per_migration=False):
transaction_now = _per_migration == self._transaction_per_migration
if not transaction_now:
@contextmanager
def do_nothing():
yield
return do_nothing()
elif not self.impl.transactional_ddl:
@contextmanager
def do_nothing():
yield
return do_nothing()
elif self.as_sql:
@contextmanager
def begin_commit():
self.impl.emit_begin()
yield
self.impl.emit_commit()
return begin_commit()
else:
return self.bind.begin()
def get_current_revision(self):
"""Return the current revision, usually that which is present
in the ``alembic_version`` table in the database.
This method intends to be used only for a migration stream that
does not contain unmerged branches in the target database;
if there are multiple branches present, an exception is raised.
The :meth:`.MigrationContext.get_current_heads` should be preferred
over this method going forward in order to be compatible with
branch migration support.
If this :class:`.MigrationContext` was configured in "offline"
mode, that is with ``as_sql=True``, the ``starting_rev``
parameter is returned instead, if any.
"""
heads = self.get_current_heads()
if len(heads) == 0:
return None
elif len(heads) > 1:
raise util.CommandError(
"Version table '%s' has more than one head present; "
"please use get_current_heads()" % self.version_table)
else:
return heads[0]
def get_current_heads(self):
"""Return a tuple of the current 'head versions' that are represented
in the target database.
For a migration stream without branches, this will be a single
value, synonymous with that of
:meth:`.MigrationContext.get_current_revision`. However when multiple
unmerged branches exist within the target database, the returned tuple
will contain a value for each head.
If this :class:`.MigrationContext` was configured in "offline"
mode, that is with ``as_sql=True``, the ``starting_rev``
parameter is returned in a one-length tuple.
If no version table is present, or if there are no revisions
present, an empty tuple is returned.
.. versionadded:: 0.7.0
"""
if self.as_sql:
start_from_rev = self._start_from_rev
if start_from_rev == 'base':
start_from_rev = None
elif start_from_rev is not None and self.script:
start_from_rev = \
self.script.get_revision(start_from_rev).revision
return util.to_tuple(start_from_rev, default=())
else:
if self._start_from_rev:
raise util.CommandError(
"Can't specify current_rev to context "
"when using a database connection")
if not self._has_version_table():
return ()
return tuple(
row[0] for row in self.connection.execute(self._version.select())
)
def _ensure_version_table(self):
self._version.create(self.connection, checkfirst=True)
def _has_version_table(self):
return self.connection.dialect.has_table(
self.connection, self.version_table, self.version_table_schema)
def stamp(self, script_directory, revision):
"""Stamp the version table with a specific revision.
This method calculates those branches to which the given revision
can apply, and updates those branches as though they were migrated
towards that revision (either up or down). If no current branches
include the revision, it is added as a new branch head.
.. versionadded:: 0.7.0
"""
heads = self.get_current_heads()
if not self.as_sql and not heads:
self._ensure_version_table()
head_maintainer = HeadMaintainer(self, heads)
for step in script_directory._stamp_revs(revision, heads):
head_maintainer.update_to_step(step)
def run_migrations(self, **kw):
r"""Run the migration scripts established for this
:class:`.MigrationContext`, if any.
The commands in :mod:`alembic.command` will set up a function
that is ultimately passed to the :class:`.MigrationContext`
as the ``fn`` argument. This function represents the "work"
that will be done when :meth:`.MigrationContext.run_migrations`
is called, typically from within the ``env.py`` script of the
migration environment. The "work function" then provides an iterable
of version callables and other version information which
in the case of the ``upgrade`` or ``downgrade`` commands are the
list of version scripts to invoke. Other commands yield nothing,
in the case that a command wants to run some other operation
against the database such as the ``current`` or ``stamp`` commands.
:param \**kw: keyword arguments here will be passed to each
migration callable, that is the ``upgrade()`` or ``downgrade()``
method within revision scripts.
"""
self.impl.start_migrations()
heads = self.get_current_heads()
if not self.as_sql and not heads:
self._ensure_version_table()
head_maintainer = HeadMaintainer(self, heads)
starting_in_transaction = not self.as_sql and \
self._in_connection_transaction()
for step in self._migrations_fn(heads, self):
with self.begin_transaction(_per_migration=True):
if self.as_sql and not head_maintainer.heads:
# for offline mode, include a CREATE TABLE from
# the base
self._version.create(self.connection)
log.info("Running %s", step)
if self.as_sql:
self.impl.static_output("-- Running %s" % (step.short_log,))
step.migration_fn(**kw)
# previously, we wouldn't stamp per migration
# if we were in a transaction, however given the more
# complex model that involves any number of inserts
# and row-targeted updates and deletes, it's simpler for now
# just to run the operations on every version
head_maintainer.update_to_step(step)
for callback in self.on_version_apply_callbacks:
callback(ctx=self,
step=step.info,
heads=set(head_maintainer.heads),
run_args=kw)
if not starting_in_transaction and not self.as_sql and \
not self.impl.transactional_ddl and \
self._in_connection_transaction():
raise util.CommandError(
"Migration \"%s\" has left an uncommitted "
"transaction opened; transactional_ddl is False so "
"Alembic is not committing transactions"
% step)
if self.as_sql and not head_maintainer.heads:
self._version.drop(self.connection)
def _in_connection_transaction(self):
try:
meth = self.connection.in_transaction
except AttributeError:
return False
else:
return meth()
def execute(self, sql, execution_options=None):
"""Execute a SQL construct or string statement.
The underlying execution mechanics are used, that is
if this is "offline mode" the SQL is written to the
output buffer, otherwise the SQL is emitted on
the current SQLAlchemy connection.
"""
self.impl._exec(sql, execution_options)
def _stdout_connection(self, connection):
def dump(construct, *multiparams, **params):
self.impl._exec(construct)
return MockEngineStrategy.MockConnection(self.dialect, dump)
@property
def bind(self):
"""Return the current "bind".
In online mode, this is an instance of
:class:`sqlalchemy.engine.Connection`, and is suitable
for ad-hoc execution of any kind of usage described
in :ref:`sqlexpression_toplevel` as well as
for usage with the :meth:`sqlalchemy.schema.Table.create`
and :meth:`sqlalchemy.schema.MetaData.create_all` methods
of :class:`~sqlalchemy.schema.Table`,
:class:`~sqlalchemy.schema.MetaData`.
Note that when "standard output" mode is enabled,
this bind will be a "mock" connection handler that cannot
return results and is only appropriate for a very limited
subset of commands.
"""
return self.connection
@property
def config(self):
"""Return the :class:`.Config` used by the current environment, if any.
.. versionadded:: 0.6.6
"""
if self.environment_context:
return self.environment_context.config
else:
return None
def _compare_type(self, inspector_column, metadata_column):
if self._user_compare_type is False:
return False
if callable(self._user_compare_type):
user_value = self._user_compare_type(
self,
inspector_column,
metadata_column,
inspector_column.type,
metadata_column.type
)
if user_value is not None:
return user_value
return self.impl.compare_type(
inspector_column,
metadata_column)
def _compare_server_default(self, inspector_column,
metadata_column,
rendered_metadata_default,
rendered_column_default):
if self._user_compare_server_default is False:
return False
if callable(self._user_compare_server_default):
user_value = self._user_compare_server_default(
self,
inspector_column,
metadata_column,
rendered_column_default,
metadata_column.server_default,
rendered_metadata_default
)
if user_value is not None:
return user_value
return self.impl.compare_server_default(
inspector_column,
metadata_column,
rendered_metadata_default,
rendered_column_default)
class HeadMaintainer(object):
def __init__(self, context, heads):
self.context = context
self.heads = set(heads)
def _insert_version(self, version):
assert version not in self.heads
self.heads.add(version)
self.context.impl._exec(
self.context._version.insert().
values(
version_num=literal_column("'%s'" % version)
)
)
def _delete_version(self, version):
self.heads.remove(version)
ret = self.context.impl._exec(
self.context._version.delete().where(
self.context._version.c.version_num ==
literal_column("'%s'" % version)))
if not self.context.as_sql and ret.rowcount != 1:
raise util.CommandError(
"Online migration expected to match one "
"row when deleting '%s' in '%s'; "
"%d found"
% (version,
self.context.version_table, ret.rowcount))
def _update_version(self, from_, to_):
assert to_ not in self.heads
self.heads.remove(from_)
self.heads.add(to_)
ret = self.context.impl._exec(
self.context._version.update().
values(version_num=literal_column("'%s'" % to_)).where(
self.context._version.c.version_num
== literal_column("'%s'" % from_))
)
if not self.context.as_sql and ret.rowcount != 1:
raise util.CommandError(
"Online migration expected to match one "
"row when updating '%s' to '%s' in '%s'; "
"%d found"
% (from_, to_, self.context.version_table, ret.rowcount))
def update_to_step(self, step):
if step.should_delete_branch(self.heads):
vers = step.delete_version_num
log.debug("branch delete %s", vers)
self._delete_version(vers)
elif step.should_create_branch(self.heads):
vers = step.insert_version_num
log.debug("new branch insert %s", vers)
self._insert_version(vers)
elif step.should_merge_branches(self.heads):
# delete revs, update from rev, update to rev
(delete_revs, update_from_rev,
update_to_rev) = step.merge_branch_idents(self.heads)
log.debug(
"merge, delete %s, update %s to %s",
delete_revs, update_from_rev, update_to_rev)
for delrev in delete_revs:
self._delete_version(delrev)
self._update_version(update_from_rev, update_to_rev)
elif step.should_unmerge_branches(self.heads):
(update_from_rev, update_to_rev,
insert_revs) = step.unmerge_branch_idents(self.heads)
log.debug(
"unmerge, insert %s, update %s to %s",
insert_revs, update_from_rev, update_to_rev)
for insrev in insert_revs:
self._insert_version(insrev)
self._update_version(update_from_rev, update_to_rev)
else:
from_, to_ = step.update_version_num(self.heads)
log.debug("update %s to %s", from_, to_)
self._update_version(from_, to_)
class MigrationInfo(object):
"""Exposes information about a migration step to a callback listener.
The :class:`.MigrationInfo` object is available exclusively for the
benefit of the :paramref:`.EnvironmentContext.on_version_apply`
callback hook.
.. versionadded:: 0.9.3
"""
is_upgrade = None
"""True/False: indicates whether this operation ascends or descends the
version tree."""
is_stamp = None
"""True/False: indicates whether this operation is a stamp (i.e. whether
it results in any actual database operations)."""
up_revision_id = None
"""Version string corresponding to :attr:`.Revision.revision`.
In the case of a stamp operation, it is advised to use the
:attr:`.MigrationInfo.up_revision_ids` tuple as a stamp operation can
make a single movement from one or more branches down to a single
branchpoint, in which case there will be multiple "up" revisions.
.. seealso::
:attr:`.MigrationInfo.up_revision_ids`
"""
up_revision_ids = None
"""Tuple of version strings corresponding to :attr:`.Revision.revision`.
In the majority of cases, this tuple will be a single value, synonomous
with the scalar value of :attr:`.MigrationInfo.up_revision_id`.
It can be multiple revision identifiers only in the case of an
``alembic stamp`` operation which is moving downwards from multiple
branches down to their common branch point.
.. versionadded:: 0.9.4
"""
down_revision_ids = None
"""Tuple of strings representing the base revisions of this migration step.
If empty, this represents a root revision; otherwise, the first item
corresponds to :attr:`.Revision.down_revision`, and the rest are inferred
from dependencies.
"""
revision_map = None
"""The revision map inside of which this operation occurs."""
def __init__(self, revision_map, is_upgrade, is_stamp, up_revisions,
down_revisions):
self.revision_map = revision_map
self.is_upgrade = is_upgrade
self.is_stamp = is_stamp
self.up_revision_ids = util.to_tuple(up_revisions, default=())
if self.up_revision_ids:
self.up_revision_id = self.up_revision_ids[0]
else:
# this should never be the case with
# "upgrade", "downgrade", or "stamp" as we are always
# measuring movement in terms of at least one upgrade version
self.up_revision_id = None
self.down_revision_ids = util.to_tuple(down_revisions, default=())
@property
def is_migration(self):
"""True/False: indicates whether this operation is a migration.
At present this is true if and only the migration is not a stamp.
If other operation types are added in the future, both this attribute
and :attr:`~.MigrationInfo.is_stamp` will be false.
"""
return not self.is_stamp
@property
def source_revision_ids(self):
"""Active revisions before this migration step is applied."""
return self.down_revision_ids if self.is_upgrade \
else self.up_revision_ids
@property
def destination_revision_ids(self):
"""Active revisions after this migration step is applied."""
return self.up_revision_ids if self.is_upgrade \
else self.down_revision_ids
@property
def up_revision(self):
"""Get :attr:`~.MigrationInfo.up_revision_id` as a :class:`.Revision`."""
return self.revision_map.get_revision(self.up_revision_id)
@property
def up_revisions(self):
"""Get :attr:`~.MigrationInfo.up_revision_ids` as a :class:`.Revision`.
.. versionadded:: 0.9.4
"""
return self.revision_map.get_revisions(self.up_revision_ids)
@property
def down_revisions(self):
"""Get :attr:`~.MigrationInfo.down_revision_ids` as a tuple of
:class:`Revisions <.Revision>`."""
return self.revision_map.get_revisions(self.down_revision_ids)
@property
def source_revisions(self):
"""Get :attr:`~MigrationInfo.source_revision_ids` as a tuple of
:class:`Revisions <.Revision>`."""
return self.revision_map.get_revisions(self.source_revision_ids)
@property
def destination_revisions(self):
"""Get :attr:`~MigrationInfo.destination_revision_ids` as a tuple of
:class:`Revisions <.Revision>`."""
return self.revision_map.get_revisions(self.destination_revision_ids)
class MigrationStep(object):
@property
def name(self):
return self.migration_fn.__name__
@classmethod
def upgrade_from_script(cls, revision_map, script):
return RevisionStep(revision_map, script, True)
@classmethod
def downgrade_from_script(cls, revision_map, script):
return RevisionStep(revision_map, script, False)
@property
def is_downgrade(self):
return not self.is_upgrade
@property
def short_log(self):
return "%s %s -> %s" % (
self.name,
util.format_as_comma(self.from_revisions_no_deps),
util.format_as_comma(self.to_revisions_no_deps)
)
def __str__(self):
if self.doc:
return "%s %s -> %s, %s" % (
self.name,
util.format_as_comma(self.from_revisions_no_deps),
util.format_as_comma(self.to_revisions_no_deps),
self.doc
)
else:
return self.short_log
class RevisionStep(MigrationStep):
def __init__(self, revision_map, revision, is_upgrade):
self.revision_map = revision_map
self.revision = revision
self.is_upgrade = is_upgrade
if is_upgrade:
self.migration_fn = revision.module.upgrade
else:
self.migration_fn = revision.module.downgrade
def __repr__(self):
return "RevisionStep(%r, is_upgrade=%r)" % (
self.revision.revision, self.is_upgrade
)
def __eq__(self, other):
return isinstance(other, RevisionStep) and \
other.revision == self.revision and \
self.is_upgrade == other.is_upgrade
@property
def doc(self):
return self.revision.doc
@property
def from_revisions(self):
if self.is_upgrade:
return self.revision._all_down_revisions
else:
return (self.revision.revision, )
@property
def from_revisions_no_deps(self):
if self.is_upgrade:
return self.revision._versioned_down_revisions
else:
return (self.revision.revision, )
@property
def to_revisions(self):
if self.is_upgrade:
return (self.revision.revision, )
else:
return self.revision._all_down_revisions
@property
def to_revisions_no_deps(self):
if self.is_upgrade:
return (self.revision.revision, )
else:
return self.revision._versioned_down_revisions
@property
def _has_scalar_down_revision(self):
return len(self.revision._all_down_revisions) == 1
def should_delete_branch(self, heads):
"""A delete is when we are a. in a downgrade and b.
we are going to the "base" or we are going to a version that
is implied as a dependency on another version that is remaining.
"""
if not self.is_downgrade:
return False
if self.revision.revision not in heads:
return False
downrevs = self.revision._all_down_revisions
if not downrevs:
# is a base
return True
else:
# determine what the ultimate "to_revisions" for an
# unmerge would be. If there are none, then we're a delete.
to_revisions = self._unmerge_to_revisions(heads)
return not to_revisions
def merge_branch_idents(self, heads):
other_heads = set(heads).difference(self.from_revisions)
if other_heads:
ancestors = set(
r.revision for r in
self.revision_map._get_ancestor_nodes(
self.revision_map.get_revisions(other_heads),
check=False
)
)
from_revisions = list(
set(self.from_revisions).difference(ancestors))
else:
from_revisions = list(self.from_revisions)
return (
# delete revs, update from rev, update to rev
list(from_revisions[0:-1]), from_revisions[-1],
self.to_revisions[0]
)
def _unmerge_to_revisions(self, heads):
other_heads = set(heads).difference([self.revision.revision])
if other_heads:
ancestors = set(
r.revision for r in
self.revision_map._get_ancestor_nodes(
self.revision_map.get_revisions(other_heads),
check=False
)
)
return list(set(self.to_revisions).difference(ancestors))
else:
return self.to_revisions
def unmerge_branch_idents(self, heads):
to_revisions = self._unmerge_to_revisions(heads)
return (
# update from rev, update to rev, insert revs
self.from_revisions[0], to_revisions[-1],
to_revisions[0:-1]
)
def should_create_branch(self, heads):
if not self.is_upgrade:
return False
downrevs = self.revision._all_down_revisions
if not downrevs:
# is a base
return True
else:
# none of our downrevs are present, so...
# we have to insert our version. This is true whether
# or not there is only one downrev, or multiple (in the latter
# case, we're a merge point.)
if not heads.intersection(downrevs):
return True
else:
return False
def should_merge_branches(self, heads):
if not self.is_upgrade:
return False
downrevs = self.revision._all_down_revisions
if len(downrevs) > 1 and \
len(heads.intersection(downrevs)) > 1:
return True
return False
def should_unmerge_branches(self, heads):
if not self.is_downgrade:
return False
downrevs = self.revision._all_down_revisions
if self.revision.revision in heads and len(downrevs) > 1:
return True
return False
def update_version_num(self, heads):
if not self._has_scalar_down_revision:
downrev = heads.intersection(self.revision._all_down_revisions)
assert len(downrev) == 1, \
"Can't do an UPDATE because downrevision is ambiguous"
down_revision = list(downrev)[0]
else:
down_revision = self.revision._all_down_revisions[0]
if self.is_upgrade:
return down_revision, self.revision.revision
else:
return self.revision.revision, down_revision
@property
def delete_version_num(self):
return self.revision.revision
@property
def insert_version_num(self):
return self.revision.revision
@property
def info(self):
return MigrationInfo(revision_map=self.revision_map,
up_revisions=self.revision.revision,
down_revisions=self.revision._all_down_revisions,
is_upgrade=self.is_upgrade, is_stamp=False)
class StampStep(MigrationStep):
def __init__(self, from_, to_, is_upgrade, branch_move, revision_map=None):
self.from_ = util.to_tuple(from_, default=())
self.to_ = util.to_tuple(to_, default=())
self.is_upgrade = is_upgrade
self.branch_move = branch_move
self.migration_fn = self.stamp_revision
self.revision_map = revision_map
doc = None
def stamp_revision(self, **kw):
return None
def __eq__(self, other):
return isinstance(other, StampStep) and \
other.from_revisions == self.revisions and \
other.to_revisions == self.to_revisions and \
other.branch_move == self.branch_move and \
self.is_upgrade == other.is_upgrade
@property
def from_revisions(self):
return self.from_
@property
def to_revisions(self):
return self.to_
@property
def from_revisions_no_deps(self):
return self.from_
@property
def to_revisions_no_deps(self):
return self.to_
@property
def delete_version_num(self):
assert len(self.from_) == 1
return self.from_[0]
@property
def insert_version_num(self):
assert len(self.to_) == 1
return self.to_[0]
def update_version_num(self, heads):
assert len(self.from_) == 1
assert len(self.to_) == 1
return self.from_[0], self.to_[0]
def merge_branch_idents(self, heads):
return (
# delete revs, update from rev, update to rev
list(self.from_[0:-1]), self.from_[-1],
self.to_[0]
)
def unmerge_branch_idents(self, heads):
return (
# update from rev, update to rev, insert revs
self.from_[0], self.to_[-1],
list(self.to_[0:-1])
)
def should_delete_branch(self, heads):
return self.is_downgrade and self.branch_move
def should_create_branch(self, heads):
return self.is_upgrade and self.branch_move
def should_merge_branches(self, heads):
return len(self.from_) > 1
def should_unmerge_branches(self, heads):
return len(self.to_) > 1
@property
def info(self):
up, down = (self.to_, self.from_) if self.is_upgrade \
else (self.from_, self.to_)
return MigrationInfo(revision_map=self.revision_map,
up_revisions=up,
down_revisions=down,
is_upgrade=self.is_upgrade,
is_stamp=True)
|
|
#!/usr/bin/env python3
"""
enable_mount_voluems - Creator and Configurator of MOUNT volumes
Features:
1. Create GP2 SSD volumes for each private agent
2. Attaches volumes to each private agent
3. Formats volumes and configures fstab entires
4. Configures Mesos Agent and relaunches instances for changes to take effect.
Note: Currently, enable_mount_volumes only works with AWS DC/OS clusters.
"""
import boto3
import botocore
import logging
import os
import os.path
#import pprint
import sys
import time
import uuid
from fabric.api import run, env
from fabric.tasks import execute
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO, format="%(message)s")
def tag_match(instance, key, value):
tags = instance.get('Tags')
if not tags:
return False
for tag in tags:
if tag.get('Key') == key and tag.get('Value') == value:
return True
return False
def filter_reservations_tags(reservations, filter_key, filter_value):
filtered_reservations = []
logger.info('Values for {} (searching for "{}"):'.format(filter_key, filter_value))
for reservation in reservations:
instances = reservation['Instances']
if tag_match(reservation['Instances'][0], filter_key, filter_value):
filtered_reservations.append(reservation)
return filtered_reservations
def filter_gateway_instance(instances):
for instance in instances:
if tag_match(instance, 'role', 'mesos-master'):
return instance
def enumerate_instances(reservations):
bucket = []
for reservation in reservations:
instances = reservation['Instances']
for instance in instances:
bucket.append(instance)
return bucket
# A private slave doesn't have a PublicDnsName
def filter_instances_private(instances):
return [instance for instance in instances if len(instance.get('PublicDnsName', '')) == 0]
def create_volume(client, zone):
response = client.create_volume(
Size=24,
AvailabilityZone=zone,
VolumeType='gp2',
Encrypted=False
)
logger.info('Create volume response: {}'.format(response))
return response
def attach_volume(client, volume_id, instance_id, device='/dev/xvdm'):
response = client.attach_volume(
VolumeId=volume_id,
InstanceId=instance_id,
Device=device)
logger.info('Attach volume response: {}'.format(response))
return response
def configure_delete_on_termination(client, volume_id, instance_id, device='/dev/xvdm'):
response = client.modify_instance_attribute(
InstanceId=instance_id,
BlockDeviceMappings=[
{
'DeviceName': device,
'Ebs': {
'VolumeId': volume_id,
'DeleteOnTermination': True
}
},
]
)
logger.info('Instance attribute modification response: {}'.format(response))
return response
def tag_volume(client, volume_id):
response = client.create_tags(
Resources=[volume_id],
Tags=[
{
'Key': 'ccm_volume_name',
'Value': 'infinity-' + str(uuid.uuid1())
}
]
)
return response
def detach_volume(client, volume_id, instance_id, device='/dev/xvdm'):
response = client.detach_volume(
VolumeId=volume_id,
InstanceId=instance_id,
Device=device)
logger.info('Volume detach response: {}'.format(response))
return response
def configure_partition(device, partition_index, start, end, stdout):
device_partition = '{}{}'.format(device, partition_index) # e.g. /dev/xvdm1
mount_location = '/dcos/volume{}'.format(partition_index - 1) # e.g. /dcos/volume0
run('sudo parted -s {} mkpart primary ext4 {} {}'.format(device, start, end),
stdout=stdout)
run('sudo mkfs -t ext4 {}{}'.format(device_partition), stdout=stdout)
run('sudo mkdir -p {}'.format(mount_location), stdout=stdout)
run('sudo mount {} {}'.format(device_partition, mount_location),
stdout=stdout)
run('sudo sh -c "echo \'{} {} ext4 defaults 0 2\' >> /etc/fstab"'.format(device_partition, mount_location),
stdout=stdout)
def configure_device(device='/dev/xvdm', stdout=sys.stdout):
"""
Format the attached EBS volume as two MOUNT volumes and adds entries into fstab.
DC/OS will autodetect the '/dcos/volume#' volumes.
"""
device_name = os.path.basename(device)
run('until [[ "$(lsblk -o NAME -r | grep {} | wc -l)" -gt "0" ]]; do echo "Waiting for {}"; sleep 2; done'.format(device_name, device_name))
run('sudo parted -s {} mklabel gpt'.format(device))
configure_partition(device, 1, "0%", "50%", stdout=stdout)
configure_partition(device, 2, "50%", "100%", stdout=stdout)
def configure_mesos(stdout):
"""
Configures the newly created EBS volume as a Mesos agent resource
"""
run("sudo systemctl stop dcos-mesos-slave", stdout=stdout)
run("sudo rm -f /var/lib/mesos/slave/meta/slaves/latest", stdout=stdout)
run("sudo rm -f /var/lib/dcos/mesos-resources", stdout=stdout)
run("sudo systemctl start dcos-mesos-slave", stdout=stdout)
def main(stack_id = '', stdout=sys.stdout):
# Read inputs from environment
aws_access_key = os.environ.get('AWS_ACCESS_KEY_ID', '')
aws_secret_key = os.environ.get('AWS_SECRET_ACCESS_KEY', '')
stack_id = str(os.environ.get('STACK_ID', stack_id))
if not aws_access_key or not aws_secret_key or not stack_id:
logger.error('AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, and STACK_ID envvars are required.')
return 1
region_name = os.environ.get('AWS_DEFAULT_REGION', 'us-west-2')
# Create EC2 client
ec2 = boto3.client('ec2',
aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_key,
region_name=region_name)
# Get all provisioned instances
instances = ec2.describe_instances()
#logger.info('Instances: {}'.format(pprint.pformat(instances)))
all_reservations = instances.get('Reservations')
#logger.info('Reservations: {}'.format(pprint.pformat(all_reservations)))
# Filter instances for the given stack-id
stack_id_key = 'aws:cloudformation:stack-id'
reservations = filter_reservations_tags(all_reservations, stack_id_key, stack_id)
if not reservations:
logger.error('Unable to find any reservations with {} = {}.'.format(stack_id_key, stack_id))
return 1
logger.info('Found {} reservations with {} = {}'.format(len(reservations), stack_id_key, stack_id))
# Extract all the instance objects
instances = enumerate_instances(reservations)
#logger.info('Reservation instances:\n{}'.format(pprint.pformat(instances)))
# Extract the public host from our list of instances
gateway_instance = filter_gateway_instance(instances)
#logger.info('Gateway instance:\n{}'.format(pprint.pformat(gateway_instance)))
# This gateway ip will be used as a jump host for SSH into private nodes
gateway_ip = gateway_instance.get('PublicIpAddress')
logger.info('Gateway IP: {}'.format(gateway_ip))
# Attach EBS volumes to private instances only
private_instances = filter_instances_private(instances)
for instance in private_instances:
# If an instance is not running, ignore it.
if instance.get('State').get('Name') != 'running':
logger.info('Ignoring instance that is not running: {}'.format(instance))
continue
instance_id = instance['InstanceId']
azone = instance['Placement']['AvailabilityZone']
# Create volume for the instance in the same AvailabilityZone
volume = create_volume(ec2, azone)
logger.info('Creating volume: {}'.format(volume))
volume_id = volume['VolumeId']
# Wait for volume to be available.
volume_waiter = ec2.get_waiter('volume_available')
attempts = 0
max_attempts = 16
wait_time = 1
while attempts < max_attempts:
attempts += 1
try:
volume_waiter.wait(VolumeIds=[volume_id])
logger.info('Volume: {} is now available'.format(volume_id))
break
except botocore.exceptions.WaiterError as e:
logger.error('Error occured: {}'.format(e))
raise e
except botocore.exceptions.ClientError as e:
logger.error('Error occured: {}'.format(e))
if e.response['Error']['Code'] == 'RequestLimitExceeded':
curr_wait_time = 2**attempts * wait_time
logger.error('Going to wait for: {} before retrying.'.format(curr_wait_time))
time.sleep(curr_wait_time)
else:
raise e
# Attach the volume to our instance.
att_res = attach_volume(ec2, volume_id=volume_id, instance_id=instance_id)
logger.info('Attaching volume: {}'.format(att_res))
# Wait for volume to attach.
volume_attach = ec2.get_waiter('volume_in_use')
attempts = 0
max_attempts = 16
wait_time = 1
while attempts < max_attempts:
attempts += 1
try:
volume_attach.wait(VolumeIds=[volume_id])
logger.info('Volume: {} is now attached to instance: {}'.format(volume_id, instance_id))
break
except botocore.exceptions.WaiterError as e:
logger.error('Error occured: {}'.format(e))
raise e
except botocore.exceptions.ClientError as e:
logger.error('Error occured: {}'.format(e))
if e.response['Error']['Code'] == 'RequestLimitExceeded':
curr_wait_time = 2**attempts * wait_time
logger.error('Going to wait for: {} before retrying.'.format(curr_wait_time))
time.sleep(curr_wait_time)
else:
raise e
conf_res = configure_delete_on_termination(ec2, volume_id=volume_id, instance_id=instance_id)
logger.info('Delete on termination: {}'.format(conf_res))
tag_res = tag_volume(ec2, volume_id=volume_id)
logger.info('Tag volume: {}'.format(tag_res))
private_ip = instance.get('PrivateIpAddress')
env.hosts = [private_ip]
env.gateway = gateway_ip
env.user = 'core'
logger.info('Creating partitions on agent: {}'.format(private_ip))
execute(configure_device, '/dev/xvdm', stdout)
logger.info('Restarting agent so that it sees the partitions: {}'.format(private_ip))
execute(configure_mesos, stdout)
logger.info('Mount volumes enabled. Exiting now...')
return 0
if __name__ == '__main__':
sys.exit(main())
|
|
# Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved
# Python
from __future__ import absolute_import
import os
import unittest2 as unittest
# Django
import django
from django.core.urlresolvers import reverse
# AWX
from awx.main.models import * # noqa
from awx.main.tests.job_base import BaseJobTestMixin
import yaml
__all__ = ['JobTemplateLaunchTest', 'JobTemplateLaunchPasswordsTest']
@unittest.skipIf(os.environ.get('SKIP_SLOW_TESTS', False), 'Skipping slow test')
class JobTemplateLaunchTest(BaseJobTestMixin, django.test.TransactionTestCase):
def setUp(self):
super(JobTemplateLaunchTest, self).setUp()
self.url = reverse('api:job_template_list')
self.data = dict(
name = 'launched job template',
job_type = PERM_INVENTORY_DEPLOY,
inventory = self.inv_eng.pk,
project = self.proj_dev.pk,
credential = self.cred_sue.pk,
playbook = self.proj_dev.playbooks[0],
ask_variables_on_launch = True,
ask_credential_on_launch = True,
)
self.data_no_cred = dict(
name = 'launched job template no credential',
job_type = PERM_INVENTORY_DEPLOY,
inventory = self.inv_eng.pk,
project = self.proj_dev.pk,
playbook = self.proj_dev.playbooks[0],
ask_credential_on_launch = True,
ask_variables_on_launch = True,
)
self.data_cred_ask = dict(self.data)
self.data_cred_ask['name'] = 'launched job templated with ask passwords'
self.data_cred_ask['credential'] = self.cred_sue_ask.pk
with self.current_user(self.user_sue):
response = self.post(self.url, self.data, expect=201)
self.launch_url = reverse('api:job_template_launch',
args=(response['id'],))
def test_launch_job_template(self):
with self.current_user(self.user_sue):
self.data['name'] = 'something different'
response = self.post(self.url, self.data, expect=201)
detail_url = reverse('api:job_template_detail',
args=(response['id'],))
self.assertEquals(response['url'], detail_url)
def test_no_cred_update_template(self):
# You can still post the job template without a credential, just can't launch it without one
with self.current_user(self.user_sue):
response = self.post(self.url, self.data_no_cred, expect=201)
detail_url = reverse('api:job_template_detail',
args=(response['id'],))
self.assertEquals(response['url'], detail_url)
def test_invalid_auth_unauthorized(self):
# Invalid auth can't trigger the launch endpoint
self.check_invalid_auth(self.launch_url, {}, methods=('post',))
def test_credential_implicit(self):
# Implicit, attached credentials
with self.current_user(self.user_sue):
response = self.post(self.launch_url, {}, expect=201)
j = Job.objects.get(pk=response['job'])
self.assertTrue(j.status == 'new')
def test_launch_extra_vars_json(self):
# Sending extra_vars as a JSON string, implicit credentials
with self.current_user(self.user_sue):
data = dict(extra_vars = '{\"a\":3}')
response = self.post(self.launch_url, data, expect=201)
j = Job.objects.get(pk=response['job'])
ev_dict = yaml.load(j.extra_vars)
self.assertIn('a', ev_dict)
if 'a' in ev_dict:
self.assertEqual(ev_dict['a'], 3)
def test_launch_extra_vars_yaml(self):
# Sending extra_vars as a JSON string, implicit credentials
with self.current_user(self.user_sue):
data = dict(extra_vars = 'a: 3')
response = self.post(self.launch_url, data, expect=201)
j = Job.objects.get(pk=response['job'])
ev_dict = yaml.load(j.extra_vars)
self.assertIn('a', ev_dict)
if 'a' in ev_dict:
self.assertEqual(ev_dict['a'], 3)
def test_credential_explicit(self):
# Explicit, credential
with self.current_user(self.user_sue):
self.cred_sue.delete()
response = self.post(self.launch_url, {'credential': self.cred_doug.pk}, expect=201)
j = Job.objects.get(pk=response['job'])
self.assertEqual(j.status, 'new')
self.assertEqual(j.credential.pk, self.cred_doug.pk)
def test_credential_explicit_via_credential_id(self):
# Explicit, credential
with self.current_user(self.user_sue):
self.cred_sue.delete()
response = self.post(self.launch_url, {'credential_id': self.cred_doug.pk}, expect=201)
j = Job.objects.get(pk=response['job'])
self.assertEqual(j.status, 'new')
self.assertEqual(j.credential.pk, self.cred_doug.pk)
def test_credential_override(self):
# Explicit, credential
with self.current_user(self.user_sue):
response = self.post(self.launch_url, {'credential': self.cred_doug.pk}, expect=201)
j = Job.objects.get(pk=response['job'])
self.assertEqual(j.status, 'new')
self.assertEqual(j.credential.pk, self.cred_doug.pk)
def test_credential_override_via_credential_id(self):
# Explicit, credential
with self.current_user(self.user_sue):
response = self.post(self.launch_url, {'credential_id': self.cred_doug.pk}, expect=201)
j = Job.objects.get(pk=response['job'])
self.assertEqual(j.status, 'new')
self.assertEqual(j.credential.pk, self.cred_doug.pk)
def test_bad_credential_launch_fail(self):
# Can't launch a job template without a credential defined (or if we
# pass an invalid/inactive credential value).
with self.current_user(self.user_sue):
self.cred_sue.delete()
self.post(self.launch_url, {}, expect=400)
self.post(self.launch_url, {'credential': 0}, expect=400)
self.post(self.launch_url, {'credential_id': 0}, expect=400)
self.post(self.launch_url, {'credential': 'one'}, expect=400)
self.post(self.launch_url, {'credential_id': 'one'}, expect=400)
cred_doug_pk = self.cred_doug.pk
self.cred_doug.delete()
self.post(self.launch_url, {'credential': cred_doug_pk}, expect=400)
self.post(self.launch_url, {'credential_id': cred_doug_pk}, expect=400)
def test_explicit_unowned_cred(self):
# Explicitly specify a credential that we don't have access to
with self.current_user(self.user_juan):
launch_url = reverse('api:job_template_launch',
args=(self.jt_eng_run.pk,))
self.post(launch_url, {'credential_id': self.cred_sue.pk}, expect=403)
def test_no_project_fail(self):
# Job Templates without projects cannot be launched
with self.current_user(self.user_sue):
self.data['name'] = "missing proj"
response = self.post(self.url, self.data, expect=201)
jt = JobTemplate.objects.get(pk=response['id'])
jt.project = None
jt.save()
launch_url2 = reverse('api:job_template_launch',
args=(response['id'],))
self.post(launch_url2, {}, expect=400)
def test_no_inventory_fail(self):
# Job Templates without inventory cannot be launched
with self.current_user(self.user_sue):
self.data['name'] = "missing inv"
response = self.post(self.url, self.data, expect=201)
jt = JobTemplate.objects.get(pk=response['id'])
jt.inventory = None
jt.save()
launch_url3 = reverse('api:job_template_launch',
args=(response['id'],))
self.post(launch_url3, {}, expect=400)
def test_deleted_credential_fail(self):
# Job Templates with deleted credentials cannot be launched.
self.cred_sue.delete()
with self.current_user(self.user_sue):
self.post(self.launch_url, {}, expect=400)
@unittest.skipIf(os.environ.get('SKIP_SLOW_TESTS', False), 'Skipping slow test')
class JobTemplateLaunchPasswordsTest(BaseJobTestMixin, django.test.TransactionTestCase):
def setUp(self):
super(JobTemplateLaunchPasswordsTest, self).setUp()
self.url = reverse('api:job_template_list')
self.data = dict(
name = 'launched job template',
job_type = PERM_INVENTORY_DEPLOY,
inventory = self.inv_eng.pk,
project = self.proj_dev.pk,
credential = self.cred_sue_ask.pk,
playbook = self.proj_dev.playbooks[0],
ask_credential_on_launch = True,
)
with self.current_user(self.user_sue):
response = self.post(self.url, self.data, expect=201)
self.launch_url = reverse('api:job_template_launch',
args=(response['id'],))
# should return explicit credentials required passwords
def test_explicit_cred_with_ask_passwords_fail(self):
passwords_required = ['ssh_password', 'become_password', 'ssh_key_unlock']
# Job Templates with deleted credentials cannot be launched.
with self.current_user(self.user_sue):
self.cred_sue_ask.delete()
response = self.post(self.launch_url, {'credential_id': self.cred_sue_ask_many.pk}, expect=400)
for p in passwords_required:
self.assertIn(p, response['passwords_needed_to_start'])
self.assertEqual(len(passwords_required), len(response['passwords_needed_to_start']))
def test_explicit_cred_with_ask_password(self):
with self.current_user(self.user_sue):
response = self.post(self.launch_url, {'ssh_password': 'whatever'}, expect=201)
j = Job.objects.get(pk=response['job'])
self.assertEqual(j.status, 'new')
def test_explicit_cred_with_ask_password_empty_string_fail(self):
with self.current_user(self.user_sue):
response = self.post(self.launch_url, {'ssh_password': ''}, expect=400)
self.assertIn('ssh_password', response['passwords_needed_to_start'])
|
|
from __future__ import unicode_literals
from datetime import date
from django import forms
from django.contrib.admin.models import LogEntry
from django.contrib.admin.options import (
HORIZONTAL, VERTICAL, ModelAdmin, TabularInline,
)
from django.contrib.admin.sites import AdminSite
from django.contrib.admin.widgets import AdminDateWidget, AdminRadioSelect
from django.contrib.auth.models import User
from django.forms.widgets import Select
from django.test import SimpleTestCase, TestCase
from .models import Band, Concert
class MockRequest(object):
pass
class MockSuperUser(object):
def has_perm(self, perm):
return True
request = MockRequest()
request.user = MockSuperUser()
class ModelAdminTests(TestCase):
def setUp(self):
self.band = Band.objects.create(
name='The Doors',
bio='',
sign_date=date(1965, 1, 1),
)
self.site = AdminSite()
# form/fields/fieldsets interaction ##############################
def test_default_fields(self):
ma = ModelAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['name', 'bio', 'sign_date'])
self.assertEqual(list(ma.get_fields(request)), ['name', 'bio', 'sign_date'])
self.assertEqual(list(ma.get_fields(request, self.band)), ['name', 'bio', 'sign_date'])
self.assertIsNone(ma.get_exclude(request, self.band))
def test_default_fieldsets(self):
# fieldsets_add and fieldsets_change should return a special data structure that
# is used in the templates. They should generate the "right thing" whether we
# have specified a custom form, the fields argument, or nothing at all.
#
# Here's the default case. There are no custom form_add/form_change methods,
# no fields argument, and no fieldsets argument.
ma = ModelAdmin(Band, self.site)
self.assertEqual(ma.get_fieldsets(request), [(None, {'fields': ['name', 'bio', 'sign_date']})])
self.assertEqual(ma.get_fieldsets(request, self.band), [(None, {'fields': ['name', 'bio', 'sign_date']})])
def test_get_fieldsets(self):
# get_fieldsets() is called when figuring out form fields (#18681).
class BandAdmin(ModelAdmin):
def get_fieldsets(self, request, obj=None):
return [(None, {'fields': ['name', 'bio']})]
ma = BandAdmin(Band, self.site)
form = ma.get_form(None)
self.assertEqual(form._meta.fields, ['name', 'bio'])
class InlineBandAdmin(TabularInline):
model = Concert
fk_name = 'main_band'
can_delete = False
def get_fieldsets(self, request, obj=None):
return [(None, {'fields': ['day', 'transport']})]
ma = InlineBandAdmin(Band, self.site)
form = ma.get_formset(None).form
self.assertEqual(form._meta.fields, ['day', 'transport'])
def test_lookup_allowed_allows_nonexistent_lookup(self):
"""
A lookup_allowed allows a parameter whose field lookup doesn't exist.
(#21129).
"""
class BandAdmin(ModelAdmin):
fields = ['name']
ma = BandAdmin(Band, self.site)
self.assertTrue(ma.lookup_allowed('name__nonexistent', 'test_value'))
def test_field_arguments(self):
# If fields is specified, fieldsets_add and fieldsets_change should
# just stick the fields into a formsets structure and return it.
class BandAdmin(ModelAdmin):
fields = ['name']
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_fields(request)), ['name'])
self.assertEqual(list(ma.get_fields(request, self.band)), ['name'])
self.assertEqual(ma.get_fieldsets(request), [(None, {'fields': ['name']})])
self.assertEqual(ma.get_fieldsets(request, self.band), [(None, {'fields': ['name']})])
def test_field_arguments_restricted_on_form(self):
# If fields or fieldsets is specified, it should exclude fields on the
# Form class to the fields specified. This may cause errors to be
# raised in the db layer if required model fields aren't in fields/
# fieldsets, but that's preferable to ghost errors where a field in the
# Form class isn't being displayed because it's not in fields/fieldsets.
# Using `fields`.
class BandAdmin(ModelAdmin):
fields = ['name']
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['name'])
self.assertEqual(list(ma.get_form(request, self.band).base_fields), ['name'])
# Using `fieldsets`.
class BandAdmin(ModelAdmin):
fieldsets = [(None, {'fields': ['name']})]
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['name'])
self.assertEqual(list(ma.get_form(request, self.band).base_fields), ['name'])
# Using `exclude`.
class BandAdmin(ModelAdmin):
exclude = ['bio']
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['name', 'sign_date'])
# You can also pass a tuple to `exclude`.
class BandAdmin(ModelAdmin):
exclude = ('bio',)
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['name', 'sign_date'])
# Using `fields` and `exclude`.
class BandAdmin(ModelAdmin):
fields = ['name', 'bio']
exclude = ['bio']
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['name'])
def test_custom_form_meta_exclude_with_readonly(self):
"""
The custom ModelForm's `Meta.exclude` is respected when used in
conjunction with `ModelAdmin.readonly_fields` and when no
`ModelAdmin.exclude` is defined (#14496).
"""
# With ModelAdmin
class AdminBandForm(forms.ModelForm):
class Meta:
model = Band
exclude = ['bio']
class BandAdmin(ModelAdmin):
readonly_fields = ['name']
form = AdminBandForm
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['sign_date'])
# With InlineModelAdmin
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
exclude = ['day']
class ConcertInline(TabularInline):
readonly_fields = ['transport']
form = AdminConcertForm
fk_name = 'main_band'
model = Concert
class BandAdmin(ModelAdmin):
inlines = [ConcertInline]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['main_band', 'opening_band', 'id', 'DELETE'])
def test_custom_formfield_override_readonly(self):
class AdminBandForm(forms.ModelForm):
name = forms.CharField()
class Meta:
exclude = tuple()
model = Band
class BandAdmin(ModelAdmin):
form = AdminBandForm
readonly_fields = ['name']
ma = BandAdmin(Band, self.site)
# `name` shouldn't appear in base_fields because it's part of
# readonly_fields.
self.assertEqual(
list(ma.get_form(request).base_fields),
['bio', 'sign_date']
)
# But it should appear in get_fields()/fieldsets() so it can be
# displayed as read-only.
self.assertEqual(
list(ma.get_fields(request)),
['bio', 'sign_date', 'name']
)
self.assertEqual(
list(ma.get_fieldsets(request)),
[(None, {'fields': ['bio', 'sign_date', 'name']})]
)
def test_custom_form_meta_exclude(self):
"""
The custom ModelForm's `Meta.exclude` is overridden if
`ModelAdmin.exclude` or `InlineModelAdmin.exclude` are defined (#14496).
"""
# With ModelAdmin
class AdminBandForm(forms.ModelForm):
class Meta:
model = Band
exclude = ['bio']
class BandAdmin(ModelAdmin):
exclude = ['name']
form = AdminBandForm
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['bio', 'sign_date'])
# With InlineModelAdmin
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
exclude = ['day']
class ConcertInline(TabularInline):
exclude = ['transport']
form = AdminConcertForm
fk_name = 'main_band'
model = Concert
class BandAdmin(ModelAdmin):
inlines = [ConcertInline]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['main_band', 'opening_band', 'day', 'id', 'DELETE']
)
def test_overriding_get_exclude(self):
class BandAdmin(ModelAdmin):
def get_exclude(self, request, obj=None):
return ['name']
self.assertEqual(
list(BandAdmin(Band, self.site).get_form(request).base_fields),
['bio', 'sign_date']
)
def test_get_exclude_overrides_exclude(self):
class BandAdmin(ModelAdmin):
exclude = ['bio']
def get_exclude(self, request, obj=None):
return ['name']
self.assertEqual(
list(BandAdmin(Band, self.site).get_form(request).base_fields),
['bio', 'sign_date']
)
def test_get_exclude_takes_obj(self):
class BandAdmin(ModelAdmin):
def get_exclude(self, request, obj=None):
if obj:
return ['sign_date']
return ['name']
self.assertEqual(
list(BandAdmin(Band, self.site).get_form(request, self.band).base_fields),
['name', 'bio']
)
def test_custom_form_validation(self):
# If a form is specified, it should use it allowing custom validation
# to work properly. This won't break any of the admin widgets or media.
class AdminBandForm(forms.ModelForm):
delete = forms.BooleanField()
class BandAdmin(ModelAdmin):
form = AdminBandForm
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['name', 'bio', 'sign_date', 'delete'])
self.assertEqual(type(ma.get_form(request).base_fields['sign_date'].widget), AdminDateWidget)
def test_form_exclude_kwarg_override(self):
"""
The `exclude` kwarg passed to `ModelAdmin.get_form()` overrides all
other declarations (#8999).
"""
class AdminBandForm(forms.ModelForm):
class Meta:
model = Band
exclude = ['name']
class BandAdmin(ModelAdmin):
exclude = ['sign_date']
form = AdminBandForm
def get_form(self, request, obj=None, **kwargs):
kwargs['exclude'] = ['bio']
return super(BandAdmin, self).get_form(request, obj, **kwargs)
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['name', 'sign_date'])
def test_formset_exclude_kwarg_override(self):
"""
The `exclude` kwarg passed to `InlineModelAdmin.get_formset()`
overrides all other declarations (#8999).
"""
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
exclude = ['day']
class ConcertInline(TabularInline):
exclude = ['transport']
form = AdminConcertForm
fk_name = 'main_band'
model = Concert
def get_formset(self, request, obj=None, **kwargs):
kwargs['exclude'] = ['opening_band']
return super(ConcertInline, self).get_formset(request, obj, **kwargs)
class BandAdmin(ModelAdmin):
inlines = [ConcertInline]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['main_band', 'day', 'transport', 'id', 'DELETE']
)
def test_formset_overriding_get_exclude_with_form_fields(self):
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
fields = ['main_band', 'opening_band', 'day', 'transport']
class ConcertInline(TabularInline):
form = AdminConcertForm
fk_name = 'main_band'
model = Concert
def get_exclude(self, request, obj=None):
return ['opening_band']
class BandAdmin(ModelAdmin):
inlines = [ConcertInline]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['main_band', 'day', 'transport', 'id', 'DELETE']
)
def test_formset_overriding_get_exclude_with_form_exclude(self):
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
exclude = ['day']
class ConcertInline(TabularInline):
form = AdminConcertForm
fk_name = 'main_band'
model = Concert
def get_exclude(self, request, obj=None):
return ['opening_band']
class BandAdmin(ModelAdmin):
inlines = [ConcertInline]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['main_band', 'day', 'transport', 'id', 'DELETE']
)
def test_queryset_override(self):
# If the queryset of a ModelChoiceField in a custom form is overridden,
# RelatedFieldWidgetWrapper doesn't mess that up.
band2 = Band.objects.create(name='The Beatles', bio='', sign_date=date(1962, 1, 1))
ma = ModelAdmin(Concert, self.site)
form = ma.get_form(request)()
self.assertHTMLEqual(
str(form["main_band"]),
'<div class="related-widget-wrapper">'
'<select name="main_band" id="id_main_band" required>'
'<option value="" selected>---------</option>'
'<option value="%d">The Beatles</option>'
'<option value="%d">The Doors</option>'
'</select></div>' % (band2.id, self.band.id)
)
class AdminConcertForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(AdminConcertForm, self).__init__(*args, **kwargs)
self.fields["main_band"].queryset = Band.objects.filter(name='The Doors')
class ConcertAdminWithForm(ModelAdmin):
form = AdminConcertForm
ma = ConcertAdminWithForm(Concert, self.site)
form = ma.get_form(request)()
self.assertHTMLEqual(
str(form["main_band"]),
'<div class="related-widget-wrapper">'
'<select name="main_band" id="id_main_band" required>'
'<option value="" selected>---------</option>'
'<option value="%d">The Doors</option>'
'</select></div>' % self.band.id
)
def test_regression_for_ticket_15820(self):
"""
`obj` is passed from `InlineModelAdmin.get_fieldsets()` to
`InlineModelAdmin.get_formset()`.
"""
class CustomConcertForm(forms.ModelForm):
class Meta:
model = Concert
fields = ['day']
class ConcertInline(TabularInline):
model = Concert
fk_name = 'main_band'
def get_formset(self, request, obj=None, **kwargs):
if obj:
kwargs['form'] = CustomConcertForm
return super(ConcertInline, self).get_formset(request, obj, **kwargs)
class BandAdmin(ModelAdmin):
inlines = [ConcertInline]
Concert.objects.create(main_band=self.band, opening_band=self.band, day=1)
ma = BandAdmin(Band, self.site)
inline_instances = ma.get_inline_instances(request)
fieldsets = list(inline_instances[0].get_fieldsets(request))
self.assertEqual(fieldsets[0][1]['fields'], ['main_band', 'opening_band', 'day', 'transport'])
fieldsets = list(inline_instances[0].get_fieldsets(request, inline_instances[0].model))
self.assertEqual(fieldsets[0][1]['fields'], ['day'])
# radio_fields behavior ###########################################
def test_default_foreign_key_widget(self):
# First, without any radio_fields specified, the widgets for ForeignKey
# and fields with choices specified ought to be a basic Select widget.
# ForeignKey widgets in the admin are wrapped with RelatedFieldWidgetWrapper so
# they need to be handled properly when type checking. For Select fields, all of
# the choices lists have a first entry of dashes.
cma = ModelAdmin(Concert, self.site)
cmafa = cma.get_form(request)
self.assertEqual(type(cmafa.base_fields['main_band'].widget.widget), Select)
self.assertEqual(
list(cmafa.base_fields['main_band'].widget.choices),
[('', '---------'), (self.band.id, 'The Doors')])
self.assertEqual(type(cmafa.base_fields['opening_band'].widget.widget), Select)
self.assertEqual(
list(cmafa.base_fields['opening_band'].widget.choices),
[('', '---------'), (self.band.id, 'The Doors')]
)
self.assertEqual(type(cmafa.base_fields['day'].widget), Select)
self.assertEqual(
list(cmafa.base_fields['day'].widget.choices),
[('', '---------'), (1, 'Fri'), (2, 'Sat')]
)
self.assertEqual(type(cmafa.base_fields['transport'].widget), Select)
self.assertEqual(
list(cmafa.base_fields['transport'].widget.choices),
[('', '---------'), (1, 'Plane'), (2, 'Train'), (3, 'Bus')])
def test_foreign_key_as_radio_field(self):
# Now specify all the fields as radio_fields. Widgets should now be
# RadioSelect, and the choices list should have a first entry of 'None' if
# blank=True for the model field. Finally, the widget should have the
# 'radiolist' attr, and 'inline' as well if the field is specified HORIZONTAL.
class ConcertAdmin(ModelAdmin):
radio_fields = {
'main_band': HORIZONTAL,
'opening_band': VERTICAL,
'day': VERTICAL,
'transport': HORIZONTAL,
}
cma = ConcertAdmin(Concert, self.site)
cmafa = cma.get_form(request)
self.assertEqual(type(cmafa.base_fields['main_band'].widget.widget), AdminRadioSelect)
self.assertEqual(cmafa.base_fields['main_band'].widget.attrs, {'class': 'radiolist inline'})
self.assertEqual(
list(cmafa.base_fields['main_band'].widget.choices),
[(self.band.id, 'The Doors')]
)
self.assertEqual(type(cmafa.base_fields['opening_band'].widget.widget), AdminRadioSelect)
self.assertEqual(cmafa.base_fields['opening_band'].widget.attrs, {'class': 'radiolist'})
self.assertEqual(
list(cmafa.base_fields['opening_band'].widget.choices),
[('', 'None'), (self.band.id, 'The Doors')]
)
self.assertEqual(type(cmafa.base_fields['day'].widget), AdminRadioSelect)
self.assertEqual(cmafa.base_fields['day'].widget.attrs, {'class': 'radiolist'})
self.assertEqual(list(cmafa.base_fields['day'].widget.choices), [(1, 'Fri'), (2, 'Sat')])
self.assertEqual(type(cmafa.base_fields['transport'].widget), AdminRadioSelect)
self.assertEqual(cmafa.base_fields['transport'].widget.attrs, {'class': 'radiolist inline'})
self.assertEqual(
list(cmafa.base_fields['transport'].widget.choices),
[('', 'None'), (1, 'Plane'), (2, 'Train'), (3, 'Bus')]
)
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
exclude = ('transport',)
class ConcertAdmin(ModelAdmin):
form = AdminConcertForm
ma = ConcertAdmin(Concert, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['main_band', 'opening_band', 'day'])
class AdminConcertForm(forms.ModelForm):
extra = forms.CharField()
class Meta:
model = Concert
fields = ['extra', 'transport']
class ConcertAdmin(ModelAdmin):
form = AdminConcertForm
ma = ConcertAdmin(Concert, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['extra', 'transport'])
class ConcertInline(TabularInline):
form = AdminConcertForm
model = Concert
fk_name = 'main_band'
can_delete = True
class BandAdmin(ModelAdmin):
inlines = [ConcertInline]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['extra', 'transport', 'id', 'DELETE', 'main_band']
)
def test_log_actions(self):
ma = ModelAdmin(Band, self.site)
mock_request = MockRequest()
mock_request.user = User.objects.create(username='bill')
self.assertEqual(ma.log_addition(mock_request, self.band, 'added'), LogEntry.objects.latest('id'))
self.assertEqual(ma.log_change(mock_request, self.band, 'changed'), LogEntry.objects.latest('id'))
self.assertEqual(ma.log_change(mock_request, self.band, 'deleted'), LogEntry.objects.latest('id'))
class ModelAdminPermissionTests(SimpleTestCase):
class MockUser(object):
def has_module_perms(self, app_label):
if app_label == "modeladmin":
return True
return False
class MockAddUser(MockUser):
def has_perm(self, perm):
if perm == "modeladmin.add_band":
return True
return False
class MockChangeUser(MockUser):
def has_perm(self, perm):
if perm == "modeladmin.change_band":
return True
return False
class MockDeleteUser(MockUser):
def has_perm(self, perm):
if perm == "modeladmin.delete_band":
return True
return False
def test_has_add_permission(self):
"""
has_add_permission returns True for users who can add objects and
False for users who can't.
"""
ma = ModelAdmin(Band, AdminSite())
request = MockRequest()
request.user = self.MockAddUser()
self.assertTrue(ma.has_add_permission(request))
request.user = self.MockChangeUser()
self.assertFalse(ma.has_add_permission(request))
request.user = self.MockDeleteUser()
self.assertFalse(ma.has_add_permission(request))
def test_has_change_permission(self):
"""
has_change_permission returns True for users who can edit objects and
False for users who can't.
"""
ma = ModelAdmin(Band, AdminSite())
request = MockRequest()
request.user = self.MockAddUser()
self.assertFalse(ma.has_change_permission(request))
request.user = self.MockChangeUser()
self.assertTrue(ma.has_change_permission(request))
request.user = self.MockDeleteUser()
self.assertFalse(ma.has_change_permission(request))
def test_has_delete_permission(self):
"""
has_delete_permission returns True for users who can delete objects and
False for users who can't.
"""
ma = ModelAdmin(Band, AdminSite())
request = MockRequest()
request.user = self.MockAddUser()
self.assertFalse(ma.has_delete_permission(request))
request.user = self.MockChangeUser()
self.assertFalse(ma.has_delete_permission(request))
request.user = self.MockDeleteUser()
self.assertTrue(ma.has_delete_permission(request))
def test_has_module_permission(self):
"""
as_module_permission returns True for users who have any permission
for the module and False for users who don't.
"""
ma = ModelAdmin(Band, AdminSite())
request = MockRequest()
request.user = self.MockAddUser()
self.assertTrue(ma.has_module_permission(request))
request.user = self.MockChangeUser()
self.assertTrue(ma.has_module_permission(request))
request.user = self.MockDeleteUser()
self.assertTrue(ma.has_module_permission(request))
original_app_label = ma.opts.app_label
ma.opts.app_label = 'anotherapp'
try:
request.user = self.MockAddUser()
self.assertFalse(ma.has_module_permission(request))
request.user = self.MockChangeUser()
self.assertFalse(ma.has_module_permission(request))
request.user = self.MockDeleteUser()
self.assertFalse(ma.has_module_permission(request))
finally:
ma.opts.app_label = original_app_label
|
|
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
import os
import time
import novaclient.exceptions
from rally.common.i18n import _
from rally.common import log as logging
from rally.deployment.serverprovider import provider
from rally import exceptions
from rally import objects
from rally import osclients
from rally.task import utils
LOG = logging.getLogger(__name__)
SERVER_TYPE = "server"
KEYPAIR_TYPE = "keypair"
def _get_address(s):
if s.accessIPv4:
return s.accessIPv4
if s.accessIPv6:
return s.accessIPv6
for a in itertools.chain(s.addresses.get("public", []),
*s.addresses.values()):
return a["addr"]
raise RuntimeError("No address found for %s" % s)
@provider.configure(name="OpenStackProvider")
class OpenStackProvider(provider.ProviderFactory):
"""Provide VMs using an existing OpenStack cloud.
Sample configuration:
{
"type": "OpenStackProvider",
"amount": 42
"user": "admin",
"tenant": "admin",
"password": "secret",
"auth_url": "http://example.com/",
"flavor_id": 2,
"image": {
"checksum": "75846dd06e9fcfd2b184aba7fa2b2a8d",
"url": "http://example.com/disk1.img",
"name": "Ubuntu Precise(added by rally)",
"format": "qcow2",
"userdata": "#cloud-config\r\n disable_root: false"
}
}
"""
CONFIG_SCHEMA = {
"type": "object",
"properties": {
"type": {"type": "string"},
"deployment_name": {"type": "string"},
"amount": {"type": "integer"},
"user": {"type": "string"},
"nics": {"type": "array"},
"password": {"type": "string"},
"tenant": {"type": "string"},
"auth_url": {"type": "string"},
"region": {"type": "string"},
"config_drive": {"type": "boolean"},
"flavor_id": {"type": "string"},
"image": {
"type": "object",
"properties": {
"checksum": {"type": "string"},
"name": {"type": "string"},
"format": {"type": "string"},
"userdata": {"type": "string"},
"url": {"type": "string"},
"uuid": {"type": "string"},
},
"additionalProperties": False,
"anyOf": [
{
"title": "Create Image",
"required": ["name", "format", "url", "checksum"],
},
{
"title": "Existing image from checksum",
"required": ["checksum"]
},
{
"title": "Existing image from uuid",
"required": ["uuid"]
}
]
},
},
"additionalProperties": False,
"required": ["user", "password", "tenant", "deployment_name",
"auth_url", "flavor_id", "image"]
}
def __init__(self, deployment, config):
super(OpenStackProvider, self).__init__(deployment, config)
user_endpoint = objects.Endpoint(config["auth_url"], config["user"],
config["password"], config["tenant"],
region_name=config.get("region"))
clients = osclients.Clients(user_endpoint)
self.nova = clients.nova()
try:
self.glance = clients.glance()
except KeyError:
self.glance = None
LOG.warning(_("Glance endpoint not available in service catalog"
", only existing images can be used"))
def get_image_uuid(self):
"""Get image uuid. Download image if necessary."""
image_uuid = self.config["image"].get("uuid", None)
if image_uuid:
return image_uuid
else:
if not self.glance:
raise exceptions.InvalidConfigException(
"If glance is not available in the service catalog"
" obtained by the openstack server provider, then"
" images cannot be uploaded so the uuid of an"
" existing image must be specified in the"
" deployment config."
)
for image in self.glance.images.list():
if image.checksum == self.config["image"]["checksum"]:
LOG.info(_("Found image with appropriate checksum. Using it."))
return image.id
LOG.info(_("Downloading new image %s") % self.config["image"]["url"])
image = self.glance.images.create(
name=self.config["image"]["name"],
copy_from=self.config["image"]["url"],
disk_format=self.config["image"]["format"],
container_format="bare")
image.get()
if image.checksum != self.config["image"]["checksum"]:
raise exceptions.ChecksumMismatch(url=self.config["image"]["url"])
return image.id
def get_userdata(self):
userdata = self.config["image"].get("userdata", None)
if userdata is not None:
return userdata
userdata = self.config["image"].get("userdata_file", None)
if userdata is not None:
userdata = open(userdata, "r")
return userdata
def create_keypair(self):
public_key_path = self.config.get(
"ssh_public_key_file", os.path.expanduser("~/.ssh/id_rsa.pub"))
public_key = open(public_key_path, "r").read().strip()
key_name = self.config["deployment_name"] + "-key"
try:
key = self.nova.keypairs.find(name=key_name)
self.nova.keypairs.delete(key.id)
except novaclient.exceptions.NotFound:
pass
keypair = self.nova.keypairs.create(key_name, public_key)
self.resources.create({"id": keypair.id}, type=KEYPAIR_TYPE)
return keypair, public_key_path
def get_nics(self):
return self.config.get("nics", None)
def create_servers(self):
"""Create VMs with chosen image."""
image_uuid = self.get_image_uuid()
userdata = self.get_userdata()
flavor = self.config["flavor_id"]
nics = self.get_nics()
keypair, public_key_path = self.create_keypair()
os_servers = []
for i in range(self.config.get("amount", 1)):
name = "%s-%d" % (self.config["deployment_name"], i)
server = self.nova.servers.create(
name, image_uuid, flavor,
nics=nics,
key_name=keypair.name,
userdata=userdata,
config_drive=self.config.get("config_drive", False))
os_servers.append(server)
self.resources.create({"id": server.id}, type=SERVER_TYPE)
kwargs = {
"is_ready": utils.resource_is("ACTIVE"),
"update_resource": utils.get_from_manager(),
"timeout": 120,
"check_interval": 5
}
for os_server in os_servers:
utils.wait_for(os_server, **kwargs)
servers = [provider.Server(host=_get_address(s),
user="root",
key=public_key_path)
for s in os_servers]
for s in servers:
s.ssh.wait(timeout=120, interval=5)
# NOTE(eyerediskin): usually ssh is ready much earlier then cloud-init
time.sleep(8)
return servers
def destroy_servers(self):
for resource in self.resources.get_all(type=SERVER_TYPE):
try:
self.nova.servers.delete(resource["info"]["id"])
except novaclient.exceptions.NotFound:
LOG.warning("Nova instance %s not found, so not deleting." %
resource["info"]["id"])
try:
self.resources.delete(resource.id)
except exceptions.ResourceNotFound:
LOG.warning(
"Instance resource record not found in DB, not removing."
" Deployment: %(deployment)s Instance ID:%(id)s"
" Instance Nova UUID:%(uuid)s" %
dict(deployment=resource.deployment_uuid,
id=resource.id,
uuid=resource["info"]["id"]
)
)
for resource in self.resources.get_all(type=KEYPAIR_TYPE):
try:
self.nova.keypairs.delete(resource["info"]["id"])
except novaclient.exceptions.NotFound:
LOG.warning("Nova keypair %s not found, so not deleting." %
resource["info"]["id"])
try:
self.resources.delete(resource.id)
except exceptions.ResourceNotFound:
LOG.warning(
"Keypair resource record not found in DB, not removing."
" Deployment: %(deployment)s Keypair ID:%(id)s"
" Keypair Name:%(name)s" %
dict(deployment=resource.deployment_uuid,
id=resource.id,
name=resource["info"]["id"]
)
)
|
|
# Copyright 2020-2021 Efabless Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from typing import Iterable, Optional
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
from utils.utils import *
from .get_file_name import get_name
def debug(*args, **kwargs):
if os.getenv("REPORT_INFRASTRUCTURE_VERBOSE") == "1":
print(*args, **kwargs)
def parse_to_report(input_log: str, output_report: str, start: str, end: Optional[str] = None):
"""
Parses a log in the format
START_MARKER
data
END_MARKER
to a report file.
"""
if end is None:
end = f"{start}_end"
log_lines = open(input_log).read().split("\n")
with open(output_report, "w") as f:
started = False
for line in log_lines:
if line.strip() == end:
break
if started:
f.write(line)
if line.strip() == start:
started = True
if not started:
f.write("SKIPPED!")
class Artifact(object):
def __init__(self, run_path: str, kind: str, step: str, filename: str, find_by_partial_match: bool = False):
self.run_path = run_path
self.kind = kind
self.step = step
self.pathname = os.path.join(self.run_path, self.kind, self.step)
self.filename = filename
self.path = get_name(self.pathname, self.filename, find_by_partial_match)
if self.is_valid():
debug(f"Resolved {kind}, {step}, {filename} to {self.path}")
else:
debug(f"Failed to resolve {kind}, {step}, {filename}")
def is_valid(self) -> bool:
valid = os.path.exists(self.path) and os.path.isfile(self.path)
return valid
def get_content(self) -> Optional[str]:
if not self.is_valid():
return None
return open(self.path).read()
def is_logtoreport_valid(self) -> bool:
return self.is_valid() and os.path.getsize(self.path) > 10 # >10 bytes is a magic number, yes. It was this way in the script I rewrote and I'm not a fan of shaking beehives.
def log_to_report(self, report_name: str, start: str, end: Optional[str] = None):
report_path = os.path.join(self.run_path, "reports", self.step, report_name)
if not self.is_logtoreport_valid():
with open(report_path, "w") as f:
f.write(f"{self.step}:{self.filename} not found or empty.")
return
parse_to_report(self.path, report_path, start, end)
def generate_reports(self, *args: Iterable[Iterable[str]]):
for report in args:
filename = report[0]
start = report[1]
end = None
try:
end = report[2]
except:
pass
self.log_to_report(filename, start, end)
class Report(object):
def __init__(self, design_path, tag, design_name, params, run_path=None):
self.design_path = design_path
self.design_name = design_name
self.tag = tag
self.current_directory = os.path.dirname(__file__)
if run_path is None:
run_path=get_run_path(design=design_path, tag=tag)
self.run_path = run_path
self.configuration = params
self.raw_report = None
self.formatted_report = None
values = [
'design',
'design_name',
'config',
'flow_status',
'total_runtime',
'routed_runtime',
'DIEAREA_mm^2',
'CellPer_mm^2' ,
'OpenDP_Util',
'Peak_Memory_Usage_MB',
'cell_count',
'tritonRoute_violations',
'Short_violations',
'MetSpc_violations',
'OffGrid_violations',
'MinHole_violations',
'Other_violations',
'Magic_violations',
'antenna_violations',
'lvs_total_errors',
'cvc_total_errors',
'klayout_violations',
'wire_length',
'vias',
'wns',
'pl_wns',
'optimized_wns',
'fastroute_wns',
'spef_wns',
'tns',
'pl_tns',
'optimized_tns',
'fastroute_tns' ,
'spef_tns',
'HPWL',
'routing_layer1_pct',
'routing_layer2_pct',
'routing_layer3_pct',
'routing_layer4_pct',
'routing_layer5_pct',
'routing_layer6_pct',
'wires_count',
'wire_bits',
'public_wires_count',
'public_wire_bits',
'memories_count',
'memory_bits',
'processes_count',
'cells_pre_abc',
'AND',
'DFF',
'NAND',
'NOR',
'OR',
'XOR',
'XNOR',
'MUX',
'inputs',
'outputs',
'level',
'EndCaps',
'TapCells',
'Diodes',
'Total_Physical_Cells'
]
@classmethod
def get_header(Self):
header = ','.join(Self.values)
return header
def reports_from_logs(self):
rp = self.run_path
cts_log = Artifact(rp, "logs", "cts", "cts.log")
cts_log.generate_reports(
("cts.rpt", "check_report"),
("cts.timing.rpt", "timing_report"),
("cts.min_max.rpt", "min_max_report"),
("cts_wns.rpt", "wns_report"),
("cts_tns.rpt", "tns_report"),
("cts_clock_skew.rpt", "cts_clock_skew_report"),
)
routing_log = Artifact(rp, "logs", "routing", "fastroute.log")
routing_log.generate_reports(
("fastroute.rpt", "check_report"),
("fastroute.timing.rpt", "timing_report"),
("fastroute.min_max.rpt", "min_max_report"),
("fastroute_wns.rpt", "wns_report"),
("fastroute_tns.rpt", "tns_report")
)
placement_log = Artifact(rp, "logs", "placement", "replace.log")
placement_log.generate_reports(
("replace.rpt", "check_report"),
("replace.timing.rpt", "timing_report"),
("replace.min_max.rpt", "min_max_report"),
("replace_wns.rpt", "wns_report"),
("replace_tns.rpt", "tns_report")
)
sta_log = Artifact(rp, "logs", "synthesis", "opensta")
sta_log.generate_reports(
("opensta.rpt", "check_report"),
("opensta.timing.rpt", "timing_report"),
("opensta.min_max.rpt", "min_max_report"),
("opensta_wns.rpt", "wns_report"),
("opensta_tns.rpt", "tns_report"),
("opensta.slew.rpt", "check_slew")
)
sta_post_resizer_log = Artifact(rp, "logs", "synthesis", "opensta_post_resizer")
sta_post_resizer_log.generate_reports(
("opensta_post_resizer.rpt", "check_report"),
("opensta_post_resizer.timing.rpt", "timing_report"),
("opensta_post_resizer.min_max.rpt", "min_max_report"),
("opensta_post_resizer_wns.rpt", "wns_report"),
("opensta_post_resizer_tns.rpt", "tns_report"),
("opensta_post_resizer.slew.rpt", "check_slew")
)
sta_post_resizer_timing_log = Artifact(rp, "logs", "synthesis", "opensta_post_resizer_timing")
sta_post_resizer_timing_log.generate_reports(
("opensta_post_resizer_timing.rpt", "check_report"),
("opensta_post_resizer_timing.timing.rpt", "timing_report"),
("opensta_post_resizer_timing.min_max.rpt", "min_max_report"),
("opensta_post_resizer_timing_wns.rpt", "wns_report"),
("opensta_post_resizer_timing_tns.rpt", "tns_report"),
("opensta_post_resizer_timing.slew.rpt", "check_slew")
)
sta_post_resizer_routing_timing_log = Artifact(rp, "logs", "synthesis", "opensta_post_resizer_routing_timing")
sta_post_resizer_routing_timing_log.generate_reports(
("opensta_post_resizer_routing_timing.rpt", "check_report"),
("opensta_post_resizer_routing_timing.timing.rpt", "timing_report"),
("opensta_post_resizer_routing_timing.min_max.rpt", "min_max_report"),
("opensta_post_resizer_routing_timing_wns.rpt", "wns_report"),
("opensta_post_resizer_routing_timing_tns.rpt", "tns_report"),
("opensta_post_resizer_routing_timing.slew.rpt", "check_slew")
)
sta_spef_log = Artifact(rp, "logs", "synthesis", "opensta_spef")
sta_spef_log.generate_reports(
("opensta_spef.rpt", "check_report"),
("opensta_spef.timing.rpt", "timing_report"),
("opensta_spef.min_max.rpt", "min_max_report"),
("opensta_spef_wns.rpt", "wns_report"),
("opensta_spef_tns.rpt", "tns_report"),
("opensta_spef.slew.rpt", "check_slew")
)
sta_spef_tt_log = Artifact(rp, "logs", "synthesis", "opensta_spef_tt")
sta_spef_tt_log.generate_reports(
("opensta_spef_tt.rpt", "check_report"),
("opensta_spef_tt.timing.rpt", "timing_report"),
("opensta_spef_tt.min_max.rpt", "min_max_report"),
("opensta_spef_wns_tt.rpt", "wns_report"),
("opensta_spef_tns_tt.rpt", "tns_report"),
("opensta_spef_tt.slew.rpt", "check_slew")
)
def extract_all_values(self):
rp = self.run_path
self.reports_from_logs()
def re_get_last_capture(rx, string):
matches = re.findall(rx, string)
if len(matches) == 0:
return None
return matches[-1]
# Runtime
flow_status = "flow_exceptional_failure"
total_runtime = -1
try:
total_runtime_content = open(os.path.join(rp, "reports", "total_runtime.txt")).read().strip()
match = re.search(r"([\w ]+?)\s+for\s+(\w+)\/([\w\-\.]+)\s+in\s+(\w+)", total_runtime_content)
if match is not None:
flow_status = re.sub(r" ", "_", match[1])
total_runtime = match[4]
except Exception as e:
print(f"Warning: failed to get extract runtime info for {self.design}/{self.tag}: {e}", file=sys.stderr)
routed_runtime = -1
try:
routed_runtime_content = open(os.path.join(rp, "reports", "routed_runtime.txt")).read().strip()
match = re.search(r"([\w ]+?)\s+for\s+(\w+)\/([\w\-]+)\s+in\s+(\w+)", routed_runtime_content)
if match is not None:
routed_runtime = match[4]
except Exception as e:
pass
# Cell Count
cell_count = -1
yosys_report = Artifact(rp, 'reports', "synthesis", ".stat.rpt", True)
yosys_report_content = yosys_report.get_content()
if yosys_report_content is not None:
match = re.search(r"Number of cells:\s*(\d+)", yosys_report_content)
if match is not None:
cell_count = int(match[1])
# Die Area
die_area = -1
placed_def = Artifact(rp, 'results', "placement", f"{self.design_name}.placement.def")
def_content = placed_def.get_content()
if def_content is not None:
match = re.search(r"DIEAREA\s*\(\s*(\d+)\s+(\d+)\s*\)\s*\(\s*(\d+)\s+(\d+)\s*\)", def_content)
if match is not None:
lx, ly, ux, uy = float(match[1]), float(match[2]), float(match[3]), float(match[4])
die_area = ((ux - lx) / 1000) * ((uy - ly) / 1000)
die_area /= 1000000 # To mm^2
# Cells per micrometer
cells_per_mm = - 1
if cell_count != -1 and die_area != -1:
cells_per_mm = cell_count / die_area
# OpenDP Utilization and HPWL
utilization = -1
hpwl = -1 # Half Perimeter Wire Length?
replace_log = Artifact(rp, 'logs', "placement", "replace.log")
replace_log_content = replace_log.get_content()
if replace_log_content is not None:
match = re.search(r"Util\(%\):\s*([\d\.]+)", replace_log_content)
if match is not None:
utilization = float(match[1])
match = re_get_last_capture(r"HPWL:\s*([\d\.]+)", replace_log_content)
if match is not None:
hpwl = float(match)
# TritonRoute Logged Info Extraction
tr_log = Artifact(rp, 'logs', "routing", "tritonRoute.log")
tr_log_content = tr_log.get_content()
tr_memory_peak = -1
tr_violations = -1
wire_length = -1
vias = -1
if tr_log_content is not None:
match = re_get_last_capture(r"peak\s*=\s*([\d\.]+)", tr_log_content)
if match is not None:
tr_memory_peak = float(match)
match = re_get_last_capture(r"Number of violations\s*=\s*(\d+)", tr_log_content)
if match is not None:
tr_violations = int(match)
match = re_get_last_capture(r"Total wire length = ([\d\.]+)\s*\wm", tr_log_content)
if match is not None:
wire_length = int(match)
match = re_get_last_capture(r"Total number of vias = (\d+)", tr_log_content)
if match is not None:
vias = int(match)
# TritonRoute DRC Extraction
tr_drc = Artifact(rp, 'reports', "routing", "tritonRoute.drc")
tr_drc_content = tr_drc.get_content()
other_violations = tr_violations
short_violations = -1
metspc_violations = -1
offgrid_violations = -1
minhole_violations = -1
if tr_drc_content is not None:
short_violations = 0
metspc_violations = 0
offgrid_violations = 0
minhole_violations = 0
for line in tr_drc_content.split("\n"):
if "Short" in line:
short_violations += 1
other_violations -= 1
if "MetSpc" in line:
metspc_violations += 1
other_violations -= 1
if "OffGrid" in line:
offgrid_violations += 1
other_violations -= 1
if "MinHole" in line:
minhole_violations += 1
other_violations -= 1
# Magic Violations
magic_drc = Artifact(rp, 'reports', "magic", "magic.drc")
magic_drc_content = magic_drc.get_content()
magic_violations = -1
if magic_drc_content is not None:
# Magic DRC Content
match = re.search(r"COUNT:\s*(\d+)", magic_drc_content)
if match is not None:
magic_violations_raw = int(match[1])
# Not really sure why we do this
magic_violations = (magic_violations_raw + 3) // 4
# Klayout DRC Violations
klayout_drc = Artifact(rp, 'reports', 'klayout', 'magic.lydrc', True)
klayout_drc_content = klayout_drc.get_content()
klayout_violations = -1
if klayout_drc_content is not None:
klayout_violations = 0
for line in klayout_violations.split("\n"):
if "<item>" in line:
klayout_violations += 1
# Antenna Violations
arc_antenna_report = Artifact(rp, 'reports', "routing", "antenna.rpt")
aar_content = arc_antenna_report.get_content()
magic_antenna_report = Artifact(rp, 'reports', "magic", "magic.antenna_violators.rpt")
mar_content = magic_antenna_report.get_content()
antenna_violations = -1
if aar_content is not None:
match = re.search(r"Number of pins violated:\s*(\d+)", aar_content)
if match is not None:
antenna_violations = int(match[1])
elif mar_content is not None:
# Old Magic-Based Check: Just Count The Lines
antenna_violations = len(mar_content.split("\n"))
# OpenSTA Extractions
def sta_report_extraction(sta_report_filename: str, filter: str, kind='reports', step="synthesis"):
value = -1
report = Artifact(rp, kind, step, sta_report_filename)
report_content = report.get_content()
if report_content is not None:
match = re.search(rf"{filter}\s+(-?[\d\.]+)", report_content)
if match is not None:
value = float(match[1])
else:
debug(f"Didn't find {filter} in {sta_report_filename}")
else:
debug(f"Can't find {sta_report_filename}")
return value
wns = sta_report_extraction("opensta_wns.rpt", 'wns')
spef_wns = sta_report_extraction("opensta_spef_wns.rpt", 'wns')
opt_wns = sta_report_extraction("sta_post_resizer_timing_wns.rpt", 'wns')
pl_wns = sta_report_extraction("replace.log", 'wns', kind='logs', step="placement")
fr_wns = sta_report_extraction("fastroute.log", 'wns', kind='logs', step="routing")
tns = sta_report_extraction("opensta_tns.rpt", 'tns')
spef_tns = sta_report_extraction("opensta_spef_tns.rpt", 'tns')
opt_tns = sta_report_extraction("sta_post_resizer_timing_tns.rpt", 'tns')
pl_tns = sta_report_extraction("replace.log", 'tns', kind='logs', step="placement")
fr_tns = sta_report_extraction("fastroute.log", 'tns', kind='logs', step="routing")
# Yosys Metrics
yosys_metrics = [
"Number of wires:",
"Number of wire bits:",
"Number of public wires:",
"Number of public wire bits:",
"Number of memories:",
"Number of memory bits:",
"Number of processes:",
"Number of cells:",
"\$_AND_",
"\$_DFF_",
"\$_NAND_",
"\$_NOR_",
"\$_OR",
"\$_XOR",
"\$_XNOR",
"\$_MUX"
]
yosys_log = Artifact(rp, 'logs', "synthesis", "yosys.log")
yosys_log_content = yosys_log.get_content()
yosys_metrics_values = []
for metric in yosys_metrics:
metric_value = -1
if yosys_log_content is not None:
metric_value = 0
metric_name_escaped = re.escape(metric)
match = re.search(rf"{metric_name_escaped}\s*(\d+)", yosys_log_content)
if match is not None:
metric_value = int(match[1])
yosys_metrics_values.append(metric_value)
# ABC Info
abc_i = -1
abc_o = -1
abc_level = -1
if yosys_log_content is not None:
match = re.search(r"ABC:\s*netlist\s*:\s*i\/o\s*=\s*(\d+)\/\s*(\d+)\s+lat\s*=\s*(\d+)\s+nd\s*=\s*(\d+)\s*edge\s*=\s*(\d+)\s*area\s*=\s*([\d\.]+)\s+delay\s*=\s*([\d\.]+)\s*lev\s*=\s*(\d+)", yosys_log_content)
if match is not None:
abc_i = match[1]
abc_o = match[2]
# We don't use most of the ones in the middle.
abc_level = match[8]
# Fastroute Layer Usage Percentages
routing_log = Artifact(rp, 'logs', 'routing', 'fastroute.log')
routing_log_content = routing_log.get_content()
layer_usage = [-1] * 6 # 6 layers magic number, much?
if routing_log_content is not None:
routing_log_lines = routing_log_content.split("\n")
final_congestion_report_start_line = None
for i, line in enumerate(routing_log_lines):
if "Final congestion report" in line:
final_congestion_report_start_line = i
break
if final_congestion_report_start_line is not None:
start = final_congestion_report_start_line
header = start + 1
separator = header + 1
layer_start = separator + 1
for i in range(6):
line = routing_log_lines[layer_start + i]
match = re.search(r"([\d\.]+)%", line)
if match is not None:
layer_usage[i] = float(match[1])
# Process Filler Cells
tapcell_log = Artifact(rp, 'logs', 'floorplan', "tapcell.log") # Also includes endcap info
tapcell_log_content = tapcell_log.get_content()
diode_log = Artifact(rp, 'logs', 'placement', 'diodes.log')
diode_log_content = diode_log.get_content()
tapcells, endcaps, diodes = 0, 0, 0
if tapcell_log_content is not None:
match = re.search(r"Inserted (\d+) end\s*caps\.", tapcell_log_content)
if match is not None:
endcaps = int(match[1])
match = re.search(r"Inserted (\d+) tap\s*cells\.", tapcell_log_content)
if match is not None:
tapcells = int(match[1])
if diode_log_content is not None:
match = None
if "inserted!" in diode_log_content:
match = re.search(r"(\d+)\s+of\s+.+?\s+inserted!", diode_log_content)
else:
match = re.search(r"(\d+)\s+diodes\s+inserted\.", diode_log_content)
if match is not None:
diodes = int(match[1])
filler_cells = tapcells + endcaps + diodes
# LVS Total Errors
lvs_report = Artifact(rp, 'results', "lvs", f"{self.design_name}.lvs_parsed.lef.log")
lvs_report_content = lvs_report.get_content()
lvs_total_errors = -1
if lvs_report_content is not None:
lvs_total_errors = 0
match = re.search(r"Total errors\s*=\s*(\d+)", lvs_report_content)
if match is not None:
lvs_total_errors = int(match[1])
# CVC Total Errors
cvc_log = Artifact(rp, 'logs', "cvc", "cvc_screen.log")
cvc_log_content = cvc_log.get_content()
cvc_total_errors = -1
if cvc_log_content is not None:
match = re.search(r"CVC:\s*Total:\s*(\d+)", cvc_log_content)
if match is not None:
cvc_total_errors = int(match[1])
return [
flow_status,
total_runtime,
routed_runtime,
die_area,
cells_per_mm,
utilization,
tr_memory_peak,
cell_count,
tr_violations,
short_violations,
metspc_violations,
offgrid_violations,
minhole_violations,
other_violations,
magic_violations,
antenna_violations,
lvs_total_errors,
cvc_total_errors,
klayout_violations,
wire_length,
vias,
wns,
pl_wns,
opt_wns,
fr_wns,
spef_wns,
tns,
pl_tns,
opt_tns,
fr_tns,
spef_tns,
hpwl,
*layer_usage,
*yosys_metrics_values,
abc_i,
abc_o,
abc_level,
endcaps,
tapcells,
diodes,
filler_cells
]
def get_report(self):
row = [
self.design_path,
self.design_name,
self.tag,
*self.extract_all_values(),
*self.configuration
]
return ",".join([f"{cell}" for cell in row])
|
|
"""Utilities for managing the fiscal calendar."""
import calendar
import contextlib
import datetime
from typing import Iterator, Optional, Union, cast
__author__ = "Adam J. Stewart"
__version__ = "0.4.0"
# Number of months in each quarter
MONTHS_PER_QUARTER = 12 // 4
MIN_QUARTER = 1
MAX_QUARTER = 4
# These global variables control the start of the fiscal year.
# The default is to use the U.S. federal government's fiscal year,
# but they can be changed to use any other fiscal year.
START_YEAR = "previous"
START_MONTH = 10
START_DAY = 1
def _validate_fiscal_calendar_params(
start_year: str, start_month: int, start_day: int
) -> None:
"""Raise an Exception if the calendar parameters are invalid.
:param start_year: Relationship between the start of the fiscal year and
the calendar year. Possible values: ``'previous'`` or ``'same'``.
:param start_month: The first month of the fiscal year
:param start_day: The first day of the first month of the fiscal year
:raises ValueError: If ``start_year`` is not ``'previous'`` or ``'same'``
:raises ValueError: If ``start_month`` or ``start_day`` is out of range
"""
if start_year not in ["previous", "same"]:
msg = f"'start_year' must be either 'previous' or 'same', not: '{start_year}'"
raise ValueError(msg)
_check_day(start_month, start_day)
def setup_fiscal_calendar(
start_year: Optional[str] = None,
start_month: Optional[int] = None,
start_day: Optional[int] = None,
) -> None:
"""Modify the start of the fiscal calendar.
:param start_year: Relationship between the start of the fiscal year and
the calendar year. Possible values: ``'previous'`` or ``'same'``.
:param start_month: The first month of the fiscal year
:param start_day: The first day of the first month of the fiscal year
:raises ValueError: If ``start_year`` is not ``'previous'`` or ``'same'``
:raises ValueError: If ``start_month`` or ``start_day`` is out of range
"""
global START_YEAR, START_MONTH, START_DAY
# If arguments are omitted, use the currently active values.
start_year = START_YEAR if start_year is None else start_year
start_month = START_MONTH if start_month is None else start_month
start_day = START_DAY if start_day is None else start_day
_validate_fiscal_calendar_params(start_year, start_month, start_day)
START_YEAR = start_year
START_MONTH = start_month
START_DAY = start_day
@contextlib.contextmanager
def fiscal_calendar(
start_year: Optional[str] = None,
start_month: Optional[int] = None,
start_day: Optional[int] = None,
) -> Iterator[None]:
"""A context manager that lets you modify the start of the fiscal calendar
inside the scope of a with-statement.
:param start_year: Relationship between the start of the fiscal year and
the calendar year. Possible values: ``'previous'`` or ``'same'``.
:param start_month: The first month of the fiscal year
:param start_day: The first day of the first month of the fiscal year
:raises ValueError: If ``start_year`` is not ``'previous'`` or ``'same'``
:raises ValueError: If ``start_month`` or ``start_day`` is out of range
"""
# If arguments are omitted, use the currently active values.
start_year = START_YEAR if start_year is None else start_year
start_month = START_MONTH if start_month is None else start_month
start_day = START_DAY if start_day is None else start_day
# Temporarily change global variables
previous_values = (START_YEAR, START_MONTH, START_DAY)
setup_fiscal_calendar(start_year, start_month, start_day)
yield
# Restore previous values
setup_fiscal_calendar(*previous_values)
def _check_year(year: int) -> int:
"""Check if ``year`` is a valid year.
:param year: The year to test
:return: The year
:raises ValueError: If ``year`` is out of range
"""
if datetime.MINYEAR <= year <= datetime.MAXYEAR:
return year
else:
raise ValueError(f"year {year} is out of range")
def _check_month(month: int) -> int:
"""Check if ``month`` is a valid month.
:param month: The month to test
:return: The month
:raises ValueError: If ``month`` is out of range
"""
if 1 <= month <= 12:
return month
else:
raise ValueError(f"month {month} is out of range")
def _check_day(month: int, day: int) -> int:
"""Check if ``day`` is a valid day of month.
:param month: The month to test
:param day: The day to test
:return: The day
:raises ValueError: If ``month`` or ``day`` is out of range
"""
month = _check_month(month)
# Find the last day of the month
# Use a non-leap year
max_day = calendar.monthrange(2001, month)[1]
if 1 <= day <= max_day:
return day
else:
raise ValueError(f"day {day} is out of range")
def _check_fiscal_day(fiscal_year: int, fiscal_day: int) -> int:
"""Check if ``day`` is a valid day of the fiscal year.
:param fiscal_year: The fiscal year to test
:param fiscal_day: The fiscal day to test
:return: The fiscal day
:raises ValueError: If ``year`` or ``day`` is out of range
"""
fiscal_year = _check_year(fiscal_year)
# Find the length of the year
max_day = 366 if FiscalYear(fiscal_year).isleap else 365
if 1 <= fiscal_day <= max_day:
return fiscal_day
else:
raise ValueError(f"fiscal_day {fiscal_day} is out of range")
def _check_quarter(quarter: int) -> int:
"""Check if ``quarter`` is a valid quarter.
:param quarter: The quarter to test
:return: The quarter
:raises ValueError: If ``quarter`` is out of range
"""
if MIN_QUARTER <= quarter <= MAX_QUARTER:
return quarter
else:
raise ValueError(f"quarter {quarter} is out of range")
class _Hashable:
"""A class to make Fiscal objects hashable"""
def __hash__(self) -> int:
"""Unique hash of an object instance based on __slots__
:returns: a unique hash
"""
return hash(tuple(map(lambda x: getattr(self, x), self.__slots__)))
class FiscalYear(_Hashable):
"""A class representing a single fiscal year."""
__slots__ = ["_fiscal_year"]
__hash__ = _Hashable.__hash__
_fiscal_year: int
def __new__(cls, fiscal_year: int) -> "FiscalYear":
"""Constructor.
:param fiscal_year: The fiscal year
:returns: A newly constructed FiscalYear object
:raises ValueError: If ``fiscal_year`` is out of range
"""
fiscal_year = _check_year(fiscal_year)
self = super(FiscalYear, cls).__new__(cls)
self._fiscal_year = fiscal_year
return self
@classmethod
def current(cls) -> "FiscalYear":
"""Alternative constructor. Returns the current FiscalYear.
:returns: A newly constructed FiscalYear object
"""
today = FiscalDate.today()
return cls(today.fiscal_year)
def __repr__(self) -> str:
"""Convert to formal string, for repr().
>>> fy = FiscalYear(2017)
>>> repr(fy)
'FiscalYear(2017)'
"""
return f"{self.__class__.__name__}({self._fiscal_year})"
def __str__(self) -> str:
"""Convert to informal string, for str().
>>> fy = FiscalYear(2017)
>>> str(fy)
'FY2017'
"""
return f"FY{self._fiscal_year}"
# TODO: Implement __format__ so that you can print
# fiscal year as 17 or 2017 (%y or %Y)
def __contains__(
self,
item: Union[
"FiscalYear",
"FiscalQuarter",
"FiscalMonth",
"FiscalDay",
datetime.datetime,
datetime.date,
],
) -> bool:
""":param item: The item to check
:returns: True if item in self, else False
"""
if isinstance(item, FiscalYear):
return self == item
elif isinstance(item, (FiscalQuarter, FiscalMonth, FiscalDay)):
return self._fiscal_year == item.fiscal_year
elif isinstance(item, datetime.datetime):
return self.start <= item <= self.end
else:
return self.start.date() <= item <= self.end.date()
# Read-only field accessors
@property
def fiscal_year(self) -> int:
""":returns: The fiscal year"""
return self._fiscal_year
@property
def prev_fiscal_year(self) -> "FiscalYear":
""":returns: The previous fiscal year"""
return FiscalYear(self._fiscal_year - 1)
@property
def next_fiscal_year(self) -> "FiscalYear":
""":returns: The next fiscal year"""
return FiscalYear(self._fiscal_year + 1)
@property
def start(self) -> "FiscalDateTime":
""":returns: Start of the fiscal year"""
return self.q1.start
@property
def end(self) -> "FiscalDateTime":
""":returns: End of the fiscal year"""
return self.q4.end
@property
def q1(self) -> "FiscalQuarter":
""":returns: The first quarter of the fiscal year"""
return FiscalQuarter(self._fiscal_year, 1)
@property
def q2(self) -> "FiscalQuarter":
""":returns: The second quarter of the fiscal year"""
return FiscalQuarter(self._fiscal_year, 2)
@property
def q3(self) -> "FiscalQuarter":
""":returns: The third quarter of the fiscal year"""
return FiscalQuarter(self._fiscal_year, 3)
@property
def q4(self) -> "FiscalQuarter":
""":returns: The fourth quarter of the fiscal year"""
return FiscalQuarter(self._fiscal_year, 4)
@property
def isleap(self) -> bool:
"""returns: True if the fiscal year contains a leap day, else False"""
fiscal_year = FiscalYear(self._fiscal_year)
starts_on_or_before_possible_leap_day = (
fiscal_year.start.month,
fiscal_year.start.day,
) < (3, 1)
if START_YEAR == "previous":
if starts_on_or_before_possible_leap_day:
calendar_year = self._fiscal_year - 1
else:
calendar_year = self._fiscal_year
elif START_YEAR == "same":
if starts_on_or_before_possible_leap_day:
calendar_year = self._fiscal_year
else:
calendar_year = self._fiscal_year + 1
return calendar.isleap(calendar_year)
# Comparisons of FiscalYear objects with other
def __lt__(self, other: "FiscalYear") -> bool:
return self._fiscal_year < other._fiscal_year
def __le__(self, other: "FiscalYear") -> bool:
return self._fiscal_year <= other._fiscal_year
def __eq__(self, other: object) -> bool:
if isinstance(other, FiscalYear):
return self._fiscal_year == other._fiscal_year
else:
raise TypeError(
f"can't compare '{type(self).__name__}' to '{type(other).__name__}'"
)
def __ne__(self, other: object) -> bool:
if isinstance(other, FiscalYear):
return self._fiscal_year != other._fiscal_year
else:
raise TypeError(
f"can't compare '{type(self).__name__}' to '{type(other).__name__}'"
)
def __gt__(self, other: "FiscalYear") -> bool:
return self._fiscal_year > other._fiscal_year
def __ge__(self, other: "FiscalYear") -> bool:
return self._fiscal_year >= other._fiscal_year
class FiscalQuarter(_Hashable):
"""A class representing a single fiscal quarter."""
__slots__ = ["_fiscal_year", "_fiscal_quarter"]
__hash__ = _Hashable.__hash__
_fiscal_year: int
_fiscal_quarter: int
def __new__(cls, fiscal_year: int, fiscal_quarter: int) -> "FiscalQuarter":
"""Constructor.
:param fiscal_year: The fiscal year
:param fiscal_quarter: The fiscal quarter
:returns: A newly constructed FiscalQuarter object
:raises ValueError: If fiscal_year or fiscal_quarter is out of range
"""
fiscal_year = _check_year(fiscal_year)
fiscal_quarter = _check_quarter(fiscal_quarter)
self = super(FiscalQuarter, cls).__new__(cls)
self._fiscal_year = fiscal_year
self._fiscal_quarter = fiscal_quarter
return self
@classmethod
def current(cls) -> "FiscalQuarter":
"""Alternative constructor. Returns the current FiscalQuarter.
:returns: A newly constructed FiscalQuarter object
"""
today = FiscalDate.today()
return cls(today.fiscal_year, today.fiscal_quarter)
def __repr__(self) -> str:
"""Convert to formal string, for repr().
>>> q3 = FiscalQuarter(2017, 3)
>>> repr(q3)
'FiscalQuarter(2017, 3)'
"""
return f"{self.__class__.__name__}({self._fiscal_year}, {self._fiscal_quarter})"
def __str__(self) -> str:
"""Convert to informal string, for str().
>>> q3 = FiscalQuarter(2017, 3)
>>> str(q3)
'FY2017 Q3'
"""
return f"FY{self._fiscal_year} Q{self._fiscal_quarter}"
# TODO: Implement __format__ so that you can print
# fiscal year as 17 or 2017 (%y or %Y)
def __contains__(
self,
item: Union[
"FiscalQuarter",
"FiscalMonth",
"FiscalDay",
datetime.datetime,
datetime.date,
],
) -> bool:
"""Returns True if item in self, else False.
:param item: The item to check
"""
if isinstance(item, FiscalQuarter):
return self == item
elif isinstance(item, (FiscalMonth, FiscalDay)):
return self.start <= item.start and item.end <= self.end
elif isinstance(item, datetime.datetime):
return self.start <= item <= self.end
elif isinstance(item, datetime.date):
return self.start.date() <= item <= self.end.date()
# Read-only field accessors
@property
def fiscal_year(self) -> int:
""":returns: The fiscal year"""
return self._fiscal_year
@property
def fiscal_quarter(self) -> int:
""":returns: The fiscal quarter"""
return self._fiscal_quarter
@property
def prev_fiscal_quarter(self) -> "FiscalQuarter":
""":returns: The previous fiscal quarter"""
fiscal_year = self._fiscal_year
fiscal_quarter = self._fiscal_quarter - 1
if fiscal_quarter == 0:
fiscal_year -= 1
fiscal_quarter = 4
return FiscalQuarter(fiscal_year, fiscal_quarter)
@property
def next_fiscal_quarter(self) -> "FiscalQuarter":
""":returns: The next fiscal quarter"""
fiscal_year = self._fiscal_year
fiscal_quarter = self._fiscal_quarter + 1
if fiscal_quarter == 5:
fiscal_year += 1
fiscal_quarter = 1
return FiscalQuarter(fiscal_year, fiscal_quarter)
@property
def start(self) -> "FiscalDateTime":
""":returns: The start of the fiscal quarter"""
# Find the first month of the fiscal quarter
month = START_MONTH
month += (self._fiscal_quarter - 1) * MONTHS_PER_QUARTER
month %= 12
if month == 0:
month = 12
# Find the calendar year of the start of the fiscal quarter
if START_YEAR == "previous":
year = self._fiscal_year - 1
elif START_YEAR == "same":
year = self._fiscal_year
else:
raise ValueError(
"START_YEAR must be either 'previous' or 'same'", START_YEAR
)
if month < START_MONTH:
year += 1
# Find the last day of the month
# If START_DAY is later, choose last day of month instead
max_day = calendar.monthrange(year, month)[1]
day = min(START_DAY, max_day)
return FiscalDateTime(year, month, day, 0, 0, 0)
@property
def end(self) -> "FiscalDateTime":
""":returns: The end of the fiscal quarter"""
# Find the start of the next fiscal quarter
next_start = self.next_fiscal_quarter.start
# Substract 1 second
end = next_start - datetime.timedelta(seconds=1)
return FiscalDateTime(
end.year,
end.month,
end.day,
end.hour,
end.minute,
end.second,
end.microsecond,
end.tzinfo,
)
# Comparisons of FiscalQuarter objects with other
def __lt__(self, other: "FiscalQuarter") -> bool:
return (self._fiscal_year, self._fiscal_quarter) < (
other._fiscal_year,
other._fiscal_quarter,
)
def __le__(self, other: "FiscalQuarter") -> bool:
return (self._fiscal_year, self._fiscal_quarter) <= (
other._fiscal_year,
other._fiscal_quarter,
)
def __eq__(self, other: object) -> bool:
if isinstance(other, FiscalQuarter):
return (self._fiscal_year, self._fiscal_quarter) == (
other._fiscal_year,
other._fiscal_quarter,
)
else:
raise TypeError(
f"can't compare '{type(self).__name__}' to '{type(other).__name__}'"
)
def __ne__(self, other: object) -> bool:
if isinstance(other, FiscalQuarter):
return (self._fiscal_year, self._fiscal_quarter) != (
other._fiscal_year,
other._fiscal_quarter,
)
else:
raise TypeError(
f"can't compare '{type(self).__name__}' to '{type(other).__name__}'"
)
def __gt__(self, other: "FiscalQuarter") -> bool:
return (self._fiscal_year, self._fiscal_quarter) > (
other._fiscal_year,
other._fiscal_quarter,
)
def __ge__(self, other: "FiscalQuarter") -> bool:
return (self._fiscal_year, self._fiscal_quarter) >= (
other._fiscal_year,
other._fiscal_quarter,
)
class FiscalMonth(_Hashable):
"""A class representing a single fiscal month."""
__slots__ = ["_fiscal_year", "_fiscal_month"]
__hash__ = _Hashable.__hash__
_fiscal_year: int
_fiscal_month: int
def __new__(cls, fiscal_year: int, fiscal_month: int) -> "FiscalMonth":
"""Constructor.
:param fiscal_year: The fiscal year
:param fiscal_month: The fiscal month
:returns: A newly constructed FiscalMonth object
:raises ValueError: If fiscal_year or fiscal_month is out of range
"""
fiscal_year = _check_year(fiscal_year)
fiscal_month = _check_month(fiscal_month)
self = super(FiscalMonth, cls).__new__(cls)
self._fiscal_year = fiscal_year
self._fiscal_month = fiscal_month
return self
@classmethod
def current(cls) -> "FiscalMonth":
"""Alternative constructor. Returns the current FiscalMonth.
:returns: A newly constructed FiscalMonth object
"""
today = FiscalDate.today()
return cls(today.fiscal_year, today.fiscal_month)
def __repr__(self) -> str:
"""Convert to formal string, for repr().
>>> fm = FiscalMonth(2017, 1)
>>> repr(fm)
'FiscalMonth(2017, 1)'
"""
return f"{self.__class__.__name__}({self._fiscal_year}, {self._fiscal_month})"
def __str__(self) -> str:
"""Convert to informal string, for str().
>>> fm = FiscalMonth(2017, 1)
>>> str(fm)
'FY2017 FM1'
"""
return f"FY{self._fiscal_year} FM{self._fiscal_month}"
# TODO: Implement __format__ so that you can print
# fiscal year as 17 or 2017 (%y or %Y)
def __contains__(
self, item: Union["FiscalMonth", "FiscalDay", datetime.datetime, datetime.date]
) -> bool:
"""Returns True if item in self, else False.
:param item: The item to check
"""
if isinstance(item, FiscalMonth):
return self == item
elif isinstance(item, FiscalDay):
return self.start <= item.start <= item.end <= self.end
elif isinstance(item, datetime.datetime):
return self.start <= item <= self.end
elif isinstance(item, datetime.date):
return self.start.date() <= item <= self.end.date()
# Read-only field accessors
@property
def fiscal_year(self) -> int:
""":returns: The fiscal year"""
return self._fiscal_year
@property
def fiscal_month(self) -> int:
""":returns: The fiscal month"""
return self._fiscal_month
@property
def start(self) -> "FiscalDateTime":
""":returns: Start of the fiscal month"""
calendar_month = (START_MONTH + self._fiscal_month - 1) % 12
if calendar_month == 0:
calendar_month = 12
month_is_on_or_after_start_month = calendar_month >= START_MONTH
if START_YEAR == "previous":
if month_is_on_or_after_start_month:
calendar_year = self._fiscal_year - 1
else:
calendar_year = self._fiscal_year
elif START_YEAR == "same":
if month_is_on_or_after_start_month:
calendar_year = self._fiscal_year
else:
calendar_year = self._fiscal_year + 1
return FiscalDateTime(calendar_year, calendar_month, START_DAY)
@property
def end(self) -> "FiscalDateTime":
""":returns: End of the fiscal month"""
# Find the start of the next fiscal quarter
next_start = self.next_fiscal_month.start
# Substract 1 second
end = next_start - datetime.timedelta(seconds=1)
return FiscalDateTime(
end.year,
end.month,
end.day,
end.hour,
end.minute,
end.second,
end.microsecond,
end.tzinfo,
)
@property
def prev_fiscal_month(self) -> "FiscalMonth":
""":returns: The previous fiscal month"""
fiscal_year = self._fiscal_year
fiscal_month = self._fiscal_month - 1
if fiscal_month == 0:
fiscal_year -= 1
fiscal_month = 12
return FiscalMonth(fiscal_year, fiscal_month)
@property
def next_fiscal_month(self) -> "FiscalMonth":
""":returns: The next fiscal month"""
fiscal_year = self._fiscal_year
fiscal_month = self._fiscal_month + 1
if fiscal_month == 13:
fiscal_year += 1
fiscal_month = 1
return FiscalMonth(fiscal_year, fiscal_month)
# Comparisons of FiscalMonth objects with other
def __lt__(self, other: "FiscalMonth") -> bool:
return (self._fiscal_year, self._fiscal_month) < (
other._fiscal_year,
other._fiscal_month,
)
def __le__(self, other: "FiscalMonth") -> bool:
return (self._fiscal_year, self._fiscal_month) <= (
other._fiscal_year,
other._fiscal_month,
)
def __eq__(self, other: object) -> bool:
if isinstance(other, FiscalMonth):
return (self._fiscal_year, self._fiscal_month) == (
other._fiscal_year,
other._fiscal_month,
)
else:
raise TypeError(
f"can't compare '{type(self).__name__}' to '{type(other).__name__}'"
)
def __ne__(self, other: object) -> bool:
if isinstance(other, FiscalMonth):
return (self._fiscal_year, self._fiscal_month) != (
other._fiscal_year,
other._fiscal_month,
)
else:
raise TypeError(
f"can't compare '{type(self).__name__}' to '{type(other).__name__}'"
)
def __gt__(self, other: "FiscalMonth") -> bool:
return (self._fiscal_year, self._fiscal_month) > (
other._fiscal_year,
other._fiscal_month,
)
def __ge__(self, other: "FiscalMonth") -> bool:
return (self._fiscal_year, self._fiscal_month) >= (
other._fiscal_year,
other._fiscal_month,
)
class FiscalDay(_Hashable):
"""A class representing a single fiscal day."""
__slots__ = ["_fiscal_year", "_fiscal_day"]
__hash__ = _Hashable.__hash__
_fiscal_year: int
_fiscal_day: int
def __new__(cls, fiscal_year: int, fiscal_day: int) -> "FiscalDay":
"""Constructor.
:param fiscal_year: The fiscal year
:param fiscal_day: The fiscal day
:returns: A newly constructed FiscalDay object
:raises ValueError: If fiscal_year or fiscal_day is out of range
"""
fiscal_year = _check_year(fiscal_year)
fiscal_day = _check_fiscal_day(fiscal_year, fiscal_day)
self = super(FiscalDay, cls).__new__(cls)
self._fiscal_year = fiscal_year
self._fiscal_day = fiscal_day
return self
@classmethod
def current(cls) -> "FiscalDay":
"""Alternative constructor. Returns the current FiscalDay.
:returns: A newly constructed FiscalDay object
"""
today = FiscalDate.today()
return cls(today.fiscal_year, today.fiscal_day)
def __repr__(self) -> str:
"""Convert to formal string, for repr().
>>> fd = FiscalDay(2017, 1)
>>> repr(fd)
'FiscalDay(2017, 1)'
"""
return f"{self.__class__.__name__}({self._fiscal_year}, {self._fiscal_day})"
def __str__(self) -> str:
"""Convert to informal string, for str().
>>> fd = FiscalDay(2017, 1)
>>> str(fd)
'FY2017 FD1'
"""
return f"FY{self._fiscal_year} FD{self._fiscal_day}"
# TODO: Implement __format__ so that you can print
# fiscal year as 17 or 2017 (%y or %Y)
def __contains__(
self, item: Union["FiscalDay", datetime.datetime, datetime.date]
) -> bool:
"""Returns True if item in self, else False.
:param item: The item to check
"""
if isinstance(item, FiscalDay):
return self == item
elif isinstance(item, datetime.datetime):
return self.start <= item <= self.end
elif isinstance(item, datetime.date):
return self.start.date() <= item <= self.end.date()
# Read-only field accessors
@property
def fiscal_year(self) -> int:
""":returns: The fiscal year"""
return self._fiscal_year
@property
def fiscal_quarter(self) -> int:
""":returns: The fiscal quarter"""
return self.start.fiscal_quarter
@property
def fiscal_month(self) -> int:
""":returns: The fiscal month"""
return self.start.fiscal_month
@property
def fiscal_day(self) -> int:
""":returns: The fiscal day"""
return self._fiscal_day
@property
def start(self) -> "FiscalDateTime":
""":returns: Start of the fiscal day"""
fiscal_year = FiscalYear(self._fiscal_year)
days_elapsed = datetime.timedelta(days=self._fiscal_day - 1)
start = fiscal_year.start + days_elapsed
return FiscalDateTime(start.year, start.month, start.day, 0, 0, 0)
@property
def end(self) -> "FiscalDateTime":
""":returns: End of the fiscal day"""
# Find the start of the next fiscal quarter
next_start = self.next_fiscal_day.start
# Substract 1 second
end = next_start - datetime.timedelta(seconds=1)
return FiscalDateTime(
end.year,
end.month,
end.day,
end.hour,
end.minute,
end.second,
end.microsecond,
end.tzinfo,
)
@property
def prev_fiscal_day(self) -> "FiscalDay":
""":returns: The previous fiscal day"""
fiscal_year = self._fiscal_year
fiscal_day = self._fiscal_day - 1
if fiscal_day == 0:
fiscal_year -= 1
try:
fiscal_day = _check_fiscal_day(fiscal_year, 366)
except ValueError:
fiscal_day = _check_fiscal_day(fiscal_year, 365)
return FiscalDay(fiscal_year, fiscal_day)
@property
def next_fiscal_day(self) -> "FiscalDay":
""":returns: The next fiscal day"""
fiscal_year = self._fiscal_year
try:
fiscal_day = _check_fiscal_day(fiscal_year, self._fiscal_day + 1)
except ValueError:
fiscal_year += 1
fiscal_day = 1
return FiscalDay(fiscal_year, fiscal_day)
# Comparisons of FiscalDay objects with other
def __lt__(self, other: "FiscalDay") -> bool:
return (self._fiscal_year, self._fiscal_day) < (
other._fiscal_year,
other._fiscal_day,
)
def __le__(self, other: "FiscalDay") -> bool:
return (self._fiscal_year, self._fiscal_day) <= (
other._fiscal_year,
other._fiscal_day,
)
def __eq__(self, other: object) -> bool:
if isinstance(other, FiscalDay):
return (self._fiscal_year, self._fiscal_day) == (
other._fiscal_year,
other._fiscal_day,
)
else:
raise TypeError(
f"can't compare '{type(self).__name__}' to '{type(other).__name__}'"
)
def __ne__(self, other: object) -> bool:
if isinstance(other, FiscalDay):
return (self._fiscal_year, self._fiscal_day) != (
other._fiscal_year,
other._fiscal_day,
)
else:
raise TypeError(
f"can't compare '{type(self).__name__}' to '{type(other).__name__}'"
)
def __gt__(self, other: "FiscalDay") -> bool:
return (self._fiscal_year, self._fiscal_day) > (
other._fiscal_year,
other._fiscal_day,
)
def __ge__(self, other: "FiscalDay") -> bool:
return (self._fiscal_year, self._fiscal_day) >= (
other._fiscal_year,
other._fiscal_day,
)
class _FiscalMixin:
"""Mixin for FiscalDate and FiscalDateTime that
provides the following common attributes in addition to
those provided by datetime.date and datetime.datetime:
"""
@property
def fiscal_year(self) -> int:
""":returns: The fiscal year"""
fiscal_self = cast(Union["FiscalDate", "FiscalDateTime"], self)
# The fiscal year can be at most 1 year away from the calendar year
if fiscal_self in FiscalYear(fiscal_self.year):
return fiscal_self.year
elif fiscal_self in FiscalYear(fiscal_self.year + 1):
return fiscal_self.year + 1
else:
return fiscal_self.year - 1
@property
def fiscal_quarter(self) -> int:
""":returns: The fiscal quarter"""
fiscal_self = cast(Union["FiscalDate", "FiscalDateTime"], self)
for quarter in range(1, 5):
q = FiscalQuarter(fiscal_self.fiscal_year, quarter)
if fiscal_self in q:
break
return quarter
@property
def fiscal_month(self) -> int:
""":returns: The fiscal month"""
fiscal_self = cast(Union["FiscalDate", "FiscalDateTime"], self)
for month in range(1, 13):
m = FiscalMonth(fiscal_self.fiscal_year, month)
if fiscal_self in m:
break
return month
@property
def fiscal_day(self) -> int:
""":returns: The fiscal day"""
fiscal_self = cast(Union["FiscalDate", "FiscalDateTime"], self)
fiscal_year = FiscalYear(fiscal_self.fiscal_year)
year_start = fiscal_year.start
if isinstance(fiscal_self, FiscalDate):
delta = cast(datetime.date, fiscal_self) - year_start.date()
else:
delta = fiscal_self - year_start
return delta.days + 1
@property
def prev_fiscal_year(self) -> FiscalYear:
""":returns: The previous fiscal year"""
return FiscalYear(self.fiscal_year - 1)
@property
def next_fiscal_year(self) -> FiscalYear:
""":returns: The next fiscal year"""
return FiscalYear(self.fiscal_year + 1)
@property
def prev_fiscal_quarter(self) -> FiscalQuarter:
""":returns: The previous fiscal quarter"""
fiscal_quarter = FiscalQuarter(self.fiscal_year, self.fiscal_quarter)
return fiscal_quarter.prev_fiscal_quarter
@property
def next_fiscal_quarter(self) -> FiscalQuarter:
""":returns: The next fiscal quarter"""
fiscal_quarter = FiscalQuarter(self.fiscal_year, self.fiscal_quarter)
return fiscal_quarter.next_fiscal_quarter
@property
def prev_fiscal_month(self) -> FiscalMonth:
""":returns: The previous fiscal month"""
fiscal_month = FiscalMonth(self.fiscal_year, self.fiscal_month)
return fiscal_month.prev_fiscal_month
@property
def next_fiscal_month(self) -> FiscalMonth:
""":returns: The next fiscal month"""
fiscal_month = FiscalMonth(self.fiscal_year, self.fiscal_month)
return fiscal_month.next_fiscal_month
@property
def prev_fiscal_day(self) -> FiscalDay:
""":returns: The previous fiscal day"""
fiscal_day = FiscalDay(self.fiscal_year, self.fiscal_day)
return fiscal_day.prev_fiscal_day
@property
def next_fiscal_day(self) -> FiscalDay:
""":returns: The next fiscal day"""
fiscal_day = FiscalDay(self.fiscal_year, self.fiscal_day)
return fiscal_day.next_fiscal_day
class FiscalDate(datetime.date, _FiscalMixin):
"""A wrapper around the builtin datetime.date class
that provides the following attributes."""
pass
class FiscalDateTime(datetime.datetime, _FiscalMixin):
"""A wrapper around the builtin datetime.datetime class
that provides the following attributes."""
pass
|
|
"""
An implementation of the Open Spherical Camera API proposed here:
https://developers.google.com/streetview/open-spherical-camera/reference
There is minimal error checking, and likely a few places where the expected
workflow isn't adhered to but this should get you started if you're using
Python and a camera that supports the Open Spherical Camera API.
Usage:
At the top of your Python script, use
from osc.osc import *
After you import the library, you can use the commands like this:
# Initializing the class starts a session
camera = OpenSphericalCamera()
camera.state()
camera.info()
# Only need to call this if there was a problem
# when 'camera' was created
camera.startSession()
# Capture image
response = camera.takePicture()
# Wait for the stitching to finish
camera.waitForProcessing(response['id'])
# Copy image to computer
camera.getLatestImage()
camera.closeSession()
"""
import json
import requests
import time
__author__ = 'Haarm-Pieter Duiker'
__copyright__ = 'Copyright (C) 2016 - Duiker Research Corp'
__license__ = ''
__maintainer__ = 'Haarm-Pieter Duiker'
__email__ = '[email protected]'
__status__ = 'Production'
__major_version__ = '1'
__minor_version__ = '0'
__change_version__ = '0'
__version__ = '.'.join((__major_version__,
__minor_version__,
__change_version__))
__all__ = ['g_oscOptions',
'shutterSpeedNames',
'shutterSpeeds',
'exposurePrograms',
'whiteBalance',
'OpenSphericalCamera']
#
# Options
#
'''
Reference:
https://developers.google.com/streetview/open-spherical-camera/reference/options
'''
g_oscOptions = [
# Read-only values
"remainingPictures",
"remainingSpace",
"totalSpace",
# Reference options
"gpsInfo",
"dateTimeZone",
"aperture",
"apertureSupport",
"captureMode",
"captureModeSupport",
"exposureCompensation",
"exposureCompensationSupport",
"exposureProgram",
"exposureProgramSupport",
"fileFormat",
"fileFormatSupport",
"iso",
"isoSupport",
"offDelay",
"offDelaySupport",
"shutterSpeed",
"shutterSpeedSupport",
"sleepDelay",
"sleepDelaySupport",
"whiteBalance",
"whiteBalanceSupport",
"exposureDelay",
"exposureDelaySupport",
"hdr",
"hdrSupport",
"exposureBracket",
"exposureBracketSupport",
"gyro",
"gyroSupport",
"imageStabilization",
"imageStabilizationSupport",
"wifiPassword"
]
#
# Known options values
#
shutterSpeedNames = {
0.00015625 : "1/6400",
0.0002 : "1/5000",
0.00025 : "1/4000",
0.0003125 : "1/3200",
0.0004 : "1/2500",
0.0005 : "1/2000",
0.000625 : "1/1600",
0.0008 : "1/1250",
0.001 : "1/1000",
0.00125 : "1/800",
0.0015625 : "1/640",
0.002 : "1/500",
0.0025 : "1/400",
0.003125 : "1/320",
0.004 : "1/250",
0.005 : "1/200",
0.00625 : "1/160",
0.008 : "1/125",
0.01 : "1/100",
0.0125 : "1/80",
0.01666666 : "1/60",
0.02 : "1/50",
0.025 : "1/40",
0.03333333 : "1/30",
0.04 : "1/25",
0.05 : "1/20",
0.06666666 : "1/15",
0.07692307 : "1/13",
0.1 : "1/10",
0.125 : "1/8",
0.16666666 : "1/6",
0.2 : "1/5",
0.25 : "1/4",
0.33333333 : "1/3",
0.4 : "1/2.5",
0.5 : "1/2",
0.625 : "1/1.6",
0.76923076 : "1/1.3",
1 : "1",
1.3 : "1.3",
1.6 : "1.6",
2 : "2",
2.5 : "2.5",
3.2 : "3.2",
4 : "4",
5 : "5",
6 : "6",
8 : "8",
10 : "10",
13 : "13",
15 : "15",
20 : "20",
25 : "25",
30 : "30",
60 : "60"
}
shutterSpeeds = [
0.00015625,
0.0002,
0.00025,
0.0003125,
0.0004,
0.0005,
0.000625,
0.0008,
0.001,
0.00125,
0.0015625,
0.002,
0.0025,
0.003125,
0.004,
0.005,
0.00625,
0.008,
0.01,
0.0125,
0.01666666,
0.02,
0.025,
0.03333333,
0.04,
0.05,
0.06666666,
0.07692307,
0.1,
0.125,
0.16666666,
0.2,
0.25,
0.33333333,
0.4,
0.5,
0.625,
0.76923076,
1,
1.3,
1.6,
2,
2.5,
3.2,
4,
5,
6,
8,
10,
13,
15,
20,
25,
30,
60
]
exposurePrograms = {
"manual" : 1,
"automatic" : 2,
"shutter priority" : 4,
"iso priority" : 9
}
whiteBalance = {
"Auto" : "auto",
"Outdoor" : "daylight",
"Shade" : "shade",
"Cloudy" : "cloudy-daylight",
"Incandescent light 1" : "incandescent",
"Incandescent light 2" : "_warmWhiteFluorescent",
"Fluorescent light 1 (daylight)" : "_dayLightFluorescent",
"Fluorescent light 2 (natural white)" : "_dayWhiteFluorescent",
"Fluorescent light 3 (white)" : "fluorescent",
"Fluorescent light 4 (light bulb color)" : "_bulbFluorescent"
}
#
# Error codes
#
'''
Reference:
https://developers.google.com/streetview/open-spherical-camera/guides/osc/error-handling
https://developers.theta360.com/en/docs/v2/api_reference/protocols/errors.html
Error code - HTTP Status code - Description
unknownCommand - 400 - Invalid command is issued
missingParameter - 400 - Insufficient required parameters to issue the command
invalidParameterName - 400 - Parameter name or option name is invalid
invalidParameterValue - 400 - Parameter value when command was issued is invalid
cameraInExclusiveUse - 400 - Session start not possible when camera is in exclusive use
disabledCommand - 403 - Command cannot be executed due to the camera status
invalidSessionId - 403 - sessionID when command was issued is invalid
corruptedFile - 403 - Process request for corrupted file
powerOffSequenceRunning - 403 - Process request when power supply is off
invalidFileFormat - 403 - Invalid file format specified
serviceUnavailable - 503 - Processing requests cannot be received temporarily
unexpected - 503 - Other errors
'''
#
# Generic OpenSphericalCamera
#
class OpenSphericalCamera:
# Class variables / methods
oscOptions = g_oscOptions
# Instance variables / methods
def __init__(self, ip_base="192.168.1.1", httpPort=80):
self.sid = None
self.fingerprint = None
self._api = None
self._ip = ip_base
self._httpPort = httpPort
self._httpUpdatesPort = httpPort
# Try to start a session
self.startSession()
# Use 'info' command to retrieve more information
self._info = self.info()
if self._info:
self._api = self._info['api']
self._httpPort = self._info['endpoints']['httpPort']
self._httpUpdatesPort = self._info['endpoints']['httpUpdatesPort']
def __del__(self):
if self.sid:
self.closeSession()
def _request(self, url_request, update=False):
"""
Generate the URI to send to the Open Spherical Camera.
All calls start with /osc/
"""
osc_request = unicode("/osc/" + url_request)
url_base = "http://%s:%s" % (self._ip, self._httpPort if not update else self._httpUpdatesPort)
if self._api:
if osc_request in self._api:
url = url_base + osc_request
else:
print( "OSC Error - Unsupported API : %s" % osc_request )
print( "OSC Error - Supported API is : %s" % self._api )
url = None
else:
url = url_base + osc_request
return url
def _httpError(self, exception):
print( "HTTP Error - begin" )
print( repr(exception) )
print( "HTTP Error - end" )
def _oscError(self, request):
status = request.status_code
try:
error = request.json()
print( "OSC Error - HTTP Status : %s" % status)
if 'error' in error:
print( "OSC Error - Code : %s" % error['error']['code'])
print( "OSC Error - Message : %s" % error['error']['message'])
print( "OSC Error - Name : %s" % error['name'])
print( "OSC Error - State : %s" % error['state'])
except:
print( "OSC Error - HTTP Status : %s" % status)
return status
def getOptionNames(self):
return self.oscOptions
def info(self):
"""
Get basic information on the camera. Note that this is a GET call
and not a POST. Most of the calls are POST.
Reference:
https://developers.google.com/streetview/open-spherical-camera/guides/osc/info
"""
url = self._request("info")
try:
req = requests.get(url)
except Exception, e:
self._httpError(e)
return None
if req.status_code == 200:
response = req.json()
else:
self._oscError(req)
response = None
return response
def state(self):
"""
Get the state of the camera, which will include the sessionsId and also the
latestFileUri if you've just taken a picture.
Reference:
https://developers.google.com/streetview/open-spherical-camera/guides/osc/state
"""
url = self._request("state")
try:
req = requests.post(url)
except Exception, e:
self._httpError(e)
return None
if req.status_code == 200:
response = req.json()
self.fingerprint = response['fingerprint']
state = response['state']
else:
self._oscError(req)
state = None
return state
def status(self, command_id):
"""
Returns the status for previous inProgress commands.
Reference:
https://developers.google.com/streetview/open-spherical-camera/guides/osc/commands/status
"""
url = self._request("commands/status")
body = json.dumps({"id": command_id})
try:
req = requests.post(url, data=body)
except Exception, e:
self._httpError(e)
return None
if req.status_code == 200:
response = req.json()
state = response['state']
else:
self._oscError(req)
state = None
return state
def checkForUpdates(self):
"""
Check for updates on the camera, using the current state fingerprint.
Reference:
https://developers.google.com/streetview/open-spherical-camera/guides/osc/checkforupdates
"""
if self.fingerprint is None:
self.state()
url = self._request("checkForUpdates")
body = json.dumps({"stateFingerprint": self.fingerprint})
try:
req = requests.post(url, data=body)
except Exception, e:
self._httpError(e)
return False
if req.status_code == 200:
response = req.json()
newFingerprint = response['stateFingerprint']
if newFingerprint != self.fingerprint:
print( "Update - new, old fingerprint : %s, %s" % (newFingerprint, self.fingerprint) )
self.fingerprint = newFingerprint
response = True
else:
print( "No update - fingerprint : %s" % self.fingerprint )
response = False
else:
self._oscError(req)
response = False
return response
def waitForProcessing(self, command_id, maxWait=20):
"""
Helper function that will poll the camera until the status to changes
to 'done' or the timeout is hit.
Reference:
https://developers.google.com/streetview/open-spherical-camera/guides/osc/commands/status
"""
print( "Waiting for processing")
for i in range(maxWait):
status = self.status(command_id)
if status == "done":
print( "Image processing finished" )
break
elif not status or "error" in status:
print( "Status failed. Stopping wait." )
break
print( "%d - %s" % (i, status) )
time.sleep( 1 )
return
def startSession(self):
"""
Start a new session. Grab the sessionId number and return it.
You'll need the sessionId to take a video or image.
Reference:
https://developers.google.com/streetview/open-spherical-camera/reference/camera/startsession
"""
url = self._request("commands/execute")
body = json.dumps({"name": "camera.startSession",
"parameters": {}
})
try:
req = requests.post(url, data=body)
except Exception, e:
self._httpError(e)
self.sid = None
return self.sid
if req.status_code == 200:
response = req.json()
self.sid = (response["results"]["sessionId"])
else:
self._oscError(req)
self.sid = None
return self.sid
def updateSession(self):
"""
Update a session, using the sessionId.
Reference:
https://developers.google.com/streetview/open-spherical-camera/reference/camera/updatesession
"""
url = self._request("commands/execute")
body = json.dumps({"name": "camera.updateSession",
"parameters": { "sessionId":self.sid }
})
try:
req = requests.post(url, data=body)
except Exception, e:
self._httpError(e)
return None
response = None
if req.status_code == 200:
response = req.json()
else:
self._oscError(req)
response = None
return response
def closeSession(self):
"""
Close a session.
Reference:
https://developers.google.com/streetview/open-spherical-camera/reference/camera/closesession
"""
url = self._request("commands/execute")
body = json.dumps({"name": "camera.closeSession",
"parameters": { "sessionId":self.sid }
})
try:
req = requests.post(url, data=body)
except Exception, e:
self._httpError(e)
return None
if req.status_code == 200:
response = req.json()
self.sid = None
else:
self._oscError(req)
response = None
return response
def takePicture(self):
"""
Take a still image. The sessionId is either taken from
startSession or from state. You can change the mode
from video to image with captureMode in the options.
Reference:
https://developers.google.com/streetview/open-spherical-camera/reference/camera/takepicture
"""
if self.sid == None:
response = None
return response
url = self._request("commands/execute")
body = json.dumps({"name": "camera.takePicture",
"parameters": {
"sessionId": self.sid
}
})
try:
req = requests.post(url, data=body)
except Exception, e:
self._httpError(e)
return None
if req.status_code == 200:
response = req.json()
else:
self._oscError(req)
response = None
return response
def listImages(self, entryCount = 3, maxSize = None,
continuationToken = None, includeThumb = True ):
"""
entryCount:
Integer No. of still images and video files to be acquired
maxSize:
Integer (Optional) Maximum size of thumbnail images;
max(thumbnail_width, thumbnail_height).
continuationToken
String (Optional) An opaque continuation token of type string,
returned by previous listImages call, used to retrieve next
images. Omit this parameter for the first listImages
includeThumb:
Boolean (Optional) Defaults to true. Use false to omit
thumbnail images from the result.
Reference:
https://developers.google.com/streetview/open-spherical-camera/reference/camera/listimages
"""
parameters = {
"entryCount": entryCount,
"includeThumb": includeThumb,
}
if maxSize is not None:
parameters['maxSize'] = maxSize
if continuationToken is not None:
parameters['continuationToken'] = continuationToken
url = self._request("commands/execute")
body = json.dumps({"name": "camera.listImages",
"parameters": parameters
})
try:
req = requests.post(url, data=body)
except Exception, e:
self._httpError(e)
return None
if req.status_code == 200:
response = req.json()
else:
self._oscError(req)
response = None
return response
def delete(self, fileUri):
"""
Delete the image with the named fileUri
Reference:
https://developers.google.com/streetview/open-spherical-camera/reference/camera/delete
"""
url = self._request("commands/execute")
body = json.dumps({"name": "camera.delete",
"parameters": {
"fileUri": fileUri
}
})
try:
req = requests.post(url, data=body)
except Exception, e:
self._httpError(e)
return None
if req.status_code == 200:
response = req.json()
else:
self._oscError(req)
response = None
return response
def getImage(self, fileUri, imageType="image"):
"""
Transfer the file from the camera to computer and save the
binary data to local storage. This works, but is clunky.
There are easier ways to do this. The __type parameter
can be set to "thumb" for a thumbnail or "image" for the
full-size image. The default is "image".
Reference:
https://developers.google.com/streetview/open-spherical-camera/reference/camera/getimage
"""
url = self._request("commands/execute")
body = json.dumps({"name": "camera.getImage",
"parameters": {
"fileUri": fileUri,
"_type": imageType
}
})
fileName = fileUri.split("/")[1]
print( "Writing image : %s" % fileName )
acquired = False
try:
response = requests.post(url, data=body, stream=True)
except Exception, e:
self._httpError(e)
return acquired
if response.status_code == 200:
with open(fileName, 'wb') as handle:
for block in response.iter_content(1024):
handle.write(block)
acquired = True
else:
self._oscError(response)
return acquired
def getMetadata(self, fileUri):
"""
Get the exif and xmp metadata associated with the named fileUri
Reference:
https://developers.google.com/streetview/open-spherical-camera/reference/camera/getmetadata
"""
url = self._request("commands/execute")
body = json.dumps({"name": "camera.getMetadata",
"parameters": {
"fileUri": fileUri
}
})
try:
req = requests.post(url, data=body)
except Exception, e:
self._httpError(e)
return None
if req.status_code == 200:
response = req.json()
else:
self._oscError(req)
response = None
return response
def setOption(self, option, value):
"""
Set an option to a value. The validity of the option is checked. The
validity of the value is not.
Reference:
https://developers.google.com/streetview/open-spherical-camera/reference/camera/setoptions
https://developers.theta360.com/en/docs/v2/api_reference/commands/camera.set_options.html
"""
if self.sid == None or option not in self.getOptionNames():
response = None
return response
print( "setOption - %s : %s" % (option, value) )
url = self._request("commands/execute")
body = json.dumps({"name": "camera.setOptions",
"parameters": {
"sessionId": self.sid,
"options": {
option: value,
}
}
})
try:
req = requests.post(url, data=body)
except Exception, e:
self._httpError(e)
return None
if req.status_code == 200:
response = req.json()
#print( "setOption suceeeded - %s " % response )
else:
self._oscError(req)
response = None
return response
def getOption(self, option):
"""
Get an option value. The validity of the option is not checked.
Reference:
https://developers.google.com/streetview/open-spherical-camera/reference/camera/getoptions
https://developers.theta360.com/en/docs/v2/api_reference/commands/camera.get_options.html
"""
url = self._request("commands/execute")
body = json.dumps({"name": "camera.getOptions",
"parameters": {
"sessionId": self.sid,
"optionNames": [
option]
}
})
try:
req = requests.post(url, data=body)
except Exception, e:
self._httpError(e)
return None
if req.status_code == 200:
response = req.json()
value = response["results"]["options"][option]
else:
self._oscError(req)
value = None
return value
def getSid(self):
"""
Helper function that will refresh the cache of the sessionsId and
return it's value
"""
url = self._request("state")
try:
req = requests.post(url)
except Exception, e:
self._httpError(e)
self.sid = None
return None
if req.status_code == 200:
response = req.json()
self.sid = response["state"]["sessionId"]
else:
self._oscError(req)
self.sid = None
return self.sid
# Extensions
def getAllOptions(self):
"""
Helper function that will get the value for all options.
"""
url = self._request("commands/execute")
body = json.dumps({"name": "camera.getOptions",
"parameters": {
"sessionId": self.sid,
"optionNames": self.getOptionNames()
}
})
try:
req = requests.post(url, data=body)
except Exception, e:
self._httpError(e)
return None
if req.status_code == 200:
response = req.json()
returnOptions = response["results"]["options"]
else:
self._oscError(req)
returnOptions = None
return returnOptions
def latestFileUri(self):
"""
Get the name of the last captured image or video from the state
"""
try:
state_data = self.state()
except:
return None
if state_data:
latestFileUri = state_data["_latestFileUri"]
else:
latestFileUri = None
return latestFileUri
def getLatestImage(self, imageType="image"):
"""
Transfer the latest file from the camera to computer and save the
binary data to local storage. The __type parameter
can be set to "thumb" for a thumbnail or "image" for the
full-size image. The default is "image".
"""
fileUri = self.latestFileUri()
if fileUri:
self.getImage(fileUri, imageType)
def getLatestImageMetadata(self):
"""
Get the metadata for the last image
"""
fileUri = self.latestFileUri()
if fileUri:
metadata = self.getImageMetadata(fileUri)
else:
metadata = None
return metadata
# OpenSphericalCamera
|
|
from __future__ import print_function, division
from functools import wraps
from sympy.core import S, Symbol, sympify, Tuple, Integer, Basic, Expr
from sympy.core.decorators import call_highest_priority
from sympy.core.sympify import SympifyError, sympify
from sympy.functions import conjugate, adjoint
from sympy.matrices import ShapeError
from sympy.simplify import simplify
def _sympifyit(arg, retval=None):
# This version of _sympifyit sympifies MutableMatrix objects
def deco(func):
@wraps(func)
def __sympifyit_wrapper(a, b):
try:
b = sympify(b, strict=True)
return func(a, b)
except SympifyError:
return retval
return __sympifyit_wrapper
return deco
class MatrixExpr(Basic):
""" Superclass for Matrix Expressions
MatrixExprs represent abstract matrices, linear transformations represented
within a particular basis.
Examples
========
>>> from sympy import MatrixSymbol
>>> A = MatrixSymbol('A', 3, 3)
>>> y = MatrixSymbol('y', 3, 1)
>>> x = (A.T*A).I * A * y
See Also
========
MatrixSymbol
MatAdd
MatMul
Transpose
Inverse
"""
_op_priority = 11.0
is_Matrix = True
is_MatrixExpr = True
is_Identity = None
is_Inverse = False
is_Transpose = False
is_ZeroMatrix = False
is_MatAdd = False
is_MatMul = False
is_commutative = False
def __new__(cls, *args, **kwargs):
args = map(sympify, args)
return Basic.__new__(cls, *args, **kwargs)
# The following is adapted from the core Expr object
def __neg__(self):
return MatMul(S.NegativeOne, self).doit()
def __abs__(self):
raise NotImplementedError
@_sympifyit('other', NotImplemented)
@call_highest_priority('__radd__')
def __add__(self, other):
return MatAdd(self, other).doit()
@_sympifyit('other', NotImplemented)
@call_highest_priority('__add__')
def __radd__(self, other):
return MatAdd(other, self).doit()
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rsub__')
def __sub__(self, other):
return MatAdd(self, -other).doit()
@_sympifyit('other', NotImplemented)
@call_highest_priority('__sub__')
def __rsub__(self, other):
return MatAdd(other, -self).doit()
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rmul__')
def __mul__(self, other):
return MatMul(self, other).doit()
@_sympifyit('other', NotImplemented)
@call_highest_priority('__mul__')
def __rmul__(self, other):
return MatMul(other, self).doit()
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rpow__')
def __pow__(self, other):
if not self.is_square:
raise ShapeError("Power of non-square matrix %s" % self)
if other is S.NegativeOne:
return Inverse(self)
elif other is S.Zero:
return Identity(self.rows)
elif other is S.One:
return self
return MatPow(self, other)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__pow__')
def __rpow__(self, other):
raise NotImplementedError("Matrix Power not defined")
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rdiv__')
def __div__(self, other):
return self * other**S.NegativeOne
@_sympifyit('other', NotImplemented)
@call_highest_priority('__div__')
def __rdiv__(self, other):
raise NotImplementedError()
#return MatMul(other, Pow(self, S.NegativeOne))
__truediv__ = __div__
__rtruediv__ = __rdiv__
@property
def rows(self):
return self.shape[0]
@property
def cols(self):
return self.shape[1]
@property
def is_square(self):
return self.rows == self.cols
def _eval_conjugate(self):
from sympy.matrices.expressions.adjoint import Adjoint
from sympy.matrices.expressions.transpose import Transpose
return Adjoint(Transpose(self))
def _eval_inverse(self):
from sympy.matrices.expressions.inverse import Inverse
return Inverse(self)
def _eval_transpose(self):
return Transpose(self)
def _eval_power(self, exp):
return MatPow(self, exp)
def _eval_simplify(self, **kwargs):
if self.is_Atom:
return self
else:
return self.__class__(*[simplify(x, **kwargs) for x in self.args])
def _eval_adjoint(self):
from sympy.matrices.expressions.adjoint import Adjoint
return Adjoint(self)
def _entry(self, i, j):
raise NotImplementedError(
"Indexing not implemented for %s" % self.__class__.__name__)
def adjoint(self):
return adjoint(self)
def conjugate(self):
return conjugate(self)
def transpose(self):
from sympy.matrices.expressions.transpose import transpose
return transpose(self)
T = property(transpose, None, None, 'Matrix transposition.')
def inverse(self):
return self._eval_inverse()
@property
def I(self):
return self.inverse()
def valid_index(self, i, j):
def is_valid(idx):
return isinstance(idx, (int, Integer, Symbol, Expr))
return (is_valid(i) and is_valid(j) and
(0 <= i) != False and (i < self.rows) != False and
(0 <= j) != False and (j < self.cols) != False)
def __getitem__(self, key):
if not isinstance(key, tuple) and isinstance(key, slice):
from sympy.matrices.expressions.slice import MatrixSlice
return MatrixSlice(self, key, (0, None, 1))
if isinstance(key, tuple) and len(key) == 2:
i, j = key
if isinstance(i, slice) or isinstance(j, slice):
from sympy.matrices.expressions.slice import MatrixSlice
return MatrixSlice(self, i, j)
i, j = sympify(i), sympify(j)
if self.valid_index(i, j) != False:
return self._entry(i, j)
else:
raise IndexError("Invalid indices (%s, %s)" % (i, j))
raise IndexError("Invalid index, wanted %s[i,j]" % self)
def as_explicit(self):
"""
Returns a dense Matrix with elements represented explicitly
Returns an object of type ImmutableMatrix.
Examples
========
>>> from sympy import Identity
>>> I = Identity(3)
>>> I
I
>>> I.as_explicit()
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
See Also
========
as_mutable: returns mutable Matrix type
"""
from sympy.matrices.immutable import ImmutableMatrix
return ImmutableMatrix([[ self[i, j]
for j in range(self.cols)]
for i in range(self.rows)])
def as_mutable(self):
"""
Returns a dense, mutable matrix with elements represented explicitly
Examples
========
>>> from sympy import Identity
>>> I = Identity(3)
>>> I
I
>>> I.shape
(3, 3)
>>> I.as_mutable()
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
See Also
========
as_explicit: returns ImmutableMatrix
"""
return self.as_explicit().as_mutable()
def __array__(self):
from numpy import empty
a = empty(self.shape, dtype=object)
for i in range(self.rows):
for j in range(self.cols):
a[i, j] = self[i, j]
return a
def equals(self, other):
"""
Test elementwise equality between matrices, potentially of different
types
>>> from sympy import Identity, eye
>>> Identity(3).equals(eye(3))
True
"""
return self.as_explicit().equals(other)
def canonicalize(self):
return self
def as_coeff_mmul(self):
return 1, MatMul(self)
class MatrixElement(Expr):
parent = property(lambda self: self.args[0])
i = property(lambda self: self.args[1])
j = property(lambda self: self.args[2])
_diff_wrt = True
class MatrixSymbol(MatrixExpr):
"""Symbolic representation of a Matrix object
Creates a SymPy Symbol to represent a Matrix. This matrix has a shape and
can be included in Matrix Expressions
>>> from sympy import MatrixSymbol, Identity
>>> A = MatrixSymbol('A', 3, 4) # A 3 by 4 Matrix
>>> B = MatrixSymbol('B', 4, 3) # A 4 by 3 Matrix
>>> A.shape
(3, 4)
>>> 2*A*B + Identity(3)
I + 2*A*B
"""
is_commutative = False
def __new__(cls, name, n, m):
n, m = sympify(n), sympify(m)
obj = Basic.__new__(cls, name, n, m)
return obj
def _hashable_content(self):
return(self.name, self.shape)
@property
def shape(self):
return self.args[1:3]
@property
def name(self):
return self.args[0]
def _eval_subs(self, old, new):
# only do substitutions in shape
shape = Tuple(*self.shape)._subs(old, new)
return MatrixSymbol(self.name, *shape)
def __call__(self, *args):
raise TypeError( "%s object is not callable" % self.__class__ )
def _entry(self, i, j):
return MatrixElement(self, i, j)
@property
def free_symbols(self):
return set((self,))
def doit(self, **hints):
if hints.get('deep', True):
return type(self)(self.name, self.args[1].doit(**hints),
self.args[2].doit(**hints))
else:
return self
def _eval_simplify(self, **kwargs):
return self
class Identity(MatrixExpr):
"""The Matrix Identity I - multiplicative identity
>>> from sympy.matrices import Identity, MatrixSymbol
>>> A = MatrixSymbol('A', 3, 5)
>>> I = Identity(3)
>>> I*A
A
"""
is_Identity = True
def __new__(cls, n):
return super(Identity, cls).__new__(cls, sympify(n))
@property
def rows(self):
return self.args[0]
@property
def cols(self):
return self.args[0]
@property
def shape(self):
return (self.args[0], self.args[0])
def _eval_transpose(self):
return self
def _eval_trace(self):
return self.rows
def _eval_inverse(self):
return self
def conjugate(self):
return self
def _entry(self, i, j):
if i == j:
return S.One
else:
return S.Zero
def _eval_determinant(self):
return S.One
class ZeroMatrix(MatrixExpr):
"""The Matrix Zero 0 - additive identity
>>> from sympy import MatrixSymbol, ZeroMatrix
>>> A = MatrixSymbol('A', 3, 5)
>>> Z = ZeroMatrix(3, 5)
>>> A+Z
A
>>> Z*A.T
0
"""
is_ZeroMatrix = True
def __new__(cls, m, n):
return super(ZeroMatrix, cls).__new__(cls, m, n)
@property
def shape(self):
return (self.args[0], self.args[1])
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rpow__')
def __pow__(self, other):
if other != 1 and not self.is_square:
raise ShapeError("Power of non-square matrix %s" % self)
if other == 0:
return Identity(self.rows)
return self
def _eval_transpose(self):
return ZeroMatrix(self.cols, self.rows)
def _eval_trace(self):
return S.Zero
def _eval_determinant(self):
return S.Zero
def conjugate(self):
return self
def _entry(self, i, j):
return S.Zero
def __nonzero__(self):
return False
__bool__ = __nonzero__
def matrix_symbols(expr):
return [sym for sym in expr.free_symbols if sym.is_Matrix]
from .matmul import MatMul
from .matadd import MatAdd
from .matpow import MatPow
from .transpose import Transpose
from .inverse import Inverse
|
|
"""Test queues inspection SB APIs."""
from __future__ import print_function
import unittest2
import os
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestQueues(TestBase):
mydir = TestBase.compute_mydir(__file__)
@skipUnlessDarwin
@add_test_categories(['pyapi'])
def test_with_python_api_queues(self):
"""Test queues inspection SB APIs."""
self.build()
self.queues()
@skipUnlessDarwin
@add_test_categories(['pyapi'])
def test_with_python_api_queues_with_backtrace(self):
"""Test queues inspection SB APIs."""
self.build()
self.queues_with_libBacktraceRecording()
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line numbers that we will step to in main:
self.main_source = "main.c"
def check_queue_for_valid_queue_id(self, queue):
self.assertTrue(
queue.GetQueueID() != 0, "Check queue %s for valid QueueID (got 0x%x)" %
(queue.GetName(), queue.GetQueueID()))
def check_running_and_pending_items_on_queue(
self, queue, expected_running, expected_pending):
self.assertTrue(
queue.GetNumPendingItems() == expected_pending,
"queue %s should have %d pending items, instead has %d pending items" %
(queue.GetName(),
expected_pending,
(queue.GetNumPendingItems())))
self.assertTrue(
queue.GetNumRunningItems() == expected_running,
"queue %s should have %d running items, instead has %d running items" %
(queue.GetName(),
expected_running,
(queue.GetNumRunningItems())))
def describe_threads(self):
desc = []
for x in self.inferior_process:
id = x.GetIndexID()
reason_str = lldbutil.stop_reason_to_str(x.GetStopReason())
location = "\t".join([lldbutil.get_description(
x.GetFrameAtIndex(i)) for i in range(x.GetNumFrames())])
desc.append(
"thread %d: %s (queue id: %s) at\n\t%s" %
(id, reason_str, x.GetQueueID(), location))
print('\n'.join(desc))
def check_number_of_threads_owned_by_queue(self, queue, number_threads):
if (queue.GetNumThreads() != number_threads):
self.describe_threads()
self.assertTrue(
queue.GetNumThreads() == number_threads,
"queue %s should have %d thread executing, but has %d" %
(queue.GetName(),
number_threads,
queue.GetNumThreads()))
def check_queue_kind(self, queue, kind):
expected_kind_string = "Unknown"
if kind == lldb.eQueueKindSerial:
expected_kind_string = "Serial queue"
if kind == lldb.eQueueKindConcurrent:
expected_kind_string = "Concurrent queue"
actual_kind_string = "Unknown"
if queue.GetKind() == lldb.eQueueKindSerial:
actual_kind_string = "Serial queue"
if queue.GetKind() == lldb.eQueueKindConcurrent:
actual_kind_string = "Concurrent queue"
self.assertTrue(
queue.GetKind() == kind,
"queue %s is expected to be a %s but it is actually a %s" %
(queue.GetName(),
expected_kind_string,
actual_kind_string))
def check_queues_threads_match_queue(self, queue):
for idx in range(0, queue.GetNumThreads()):
t = queue.GetThreadAtIndex(idx)
self.assertTrue(
t.IsValid(), "Queue %s's thread #%d must be valid" %
(queue.GetName(), idx))
self.assertTrue(
t.GetQueueID() == queue.GetQueueID(),
"Queue %s has a QueueID of %d but its thread #%d has a QueueID of %d" %
(queue.GetName(),
queue.GetQueueID(),
idx,
t.GetQueueID()))
self.assertTrue(
t.GetQueueName() == queue.GetName(),
"Queue %s has a QueueName of %s but its thread #%d has a QueueName of %s" %
(queue.GetName(),
queue.GetName(),
idx,
t.GetQueueName()))
self.assertTrue(
t.GetQueue().GetQueueID() == queue.GetQueueID(),
"Thread #%d's Queue's QueueID of %d is not the same as the QueueID of its owning queue %d" %
(idx,
t.GetQueue().GetQueueID(),
queue.GetQueueID()))
def queues(self):
"""Test queues inspection SB APIs without libBacktraceRecording."""
exe = self.getBuildArtifact("a.out")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
self.main_source_spec = lldb.SBFileSpec(self.main_source)
break1 = target.BreakpointCreateByName("stopper", 'a.out')
self.assertTrue(break1, VALID_BREAKPOINT)
process = target.LaunchSimple(
[], None, self.get_process_working_directory())
self.assertTrue(process, PROCESS_IS_VALID)
threads = lldbutil.get_threads_stopped_at_breakpoint(process, break1)
if len(threads) != 1:
self.fail("Failed to stop at breakpoint 1.")
self.inferior_process = process
queue_submittor_1 = lldb.SBQueue()
queue_performer_1 = lldb.SBQueue()
queue_performer_2 = lldb.SBQueue()
queue_performer_3 = lldb.SBQueue()
for idx in range(0, process.GetNumQueues()):
q = process.GetQueueAtIndex(idx)
if q.GetName() == "com.apple.work_submittor_1":
queue_submittor_1 = q
if q.GetName() == "com.apple.work_performer_1":
queue_performer_1 = q
if q.GetName() == "com.apple.work_performer_2":
queue_performer_2 = q
if q.GetName() == "com.apple.work_performer_3":
queue_performer_3 = q
self.assertTrue(
queue_submittor_1.IsValid() and queue_performer_1.IsValid() and queue_performer_2.IsValid() and queue_performer_3.IsValid(),
"Got all four expected queues: %s %s %s %s" %
(queue_submittor_1.IsValid(),
queue_performer_1.IsValid(),
queue_performer_2.IsValid(),
queue_performer_3.IsValid()))
self.check_queue_for_valid_queue_id(queue_submittor_1)
self.check_queue_for_valid_queue_id(queue_performer_1)
self.check_queue_for_valid_queue_id(queue_performer_2)
self.check_queue_for_valid_queue_id(queue_performer_3)
self.check_number_of_threads_owned_by_queue(queue_submittor_1, 1)
self.check_number_of_threads_owned_by_queue(queue_performer_1, 1)
self.check_number_of_threads_owned_by_queue(queue_performer_2, 1)
self.check_number_of_threads_owned_by_queue(queue_performer_3, 4)
self.check_queue_kind(queue_submittor_1, lldb.eQueueKindSerial)
self.check_queue_kind(queue_performer_1, lldb.eQueueKindSerial)
self.check_queue_kind(queue_performer_2, lldb.eQueueKindSerial)
self.check_queue_kind(queue_performer_3, lldb.eQueueKindConcurrent)
self.check_queues_threads_match_queue(queue_submittor_1)
self.check_queues_threads_match_queue(queue_performer_1)
self.check_queues_threads_match_queue(queue_performer_2)
self.check_queues_threads_match_queue(queue_performer_3)
# We have threads running with all the different dispatch QoS service
# levels - find those threads and check that we can get the correct
# QoS name for each of them.
user_initiated_thread = lldb.SBThread()
user_interactive_thread = lldb.SBThread()
utility_thread = lldb.SBThread()
unspecified_thread = lldb.SBThread()
background_thread = lldb.SBThread()
for th in process.threads:
if th.GetName() == "user initiated QoS":
user_initiated_thread = th
if th.GetName() == "user interactive QoS":
user_interactive_thread = th
if th.GetName() == "utility QoS":
utility_thread = th
if th.GetName() == "unspecified QoS":
unspecified_thread = th
if th.GetName() == "background QoS":
background_thread = th
self.assertTrue(
user_initiated_thread.IsValid(),
"Found user initiated QoS thread")
self.assertTrue(
user_interactive_thread.IsValid(),
"Found user interactive QoS thread")
self.assertTrue(utility_thread.IsValid(), "Found utility QoS thread")
self.assertTrue(
unspecified_thread.IsValid(),
"Found unspecified QoS thread")
self.assertTrue(
background_thread.IsValid(),
"Found background QoS thread")
stream = lldb.SBStream()
self.assertTrue(
user_initiated_thread.GetInfoItemByPathAsString(
"requested_qos.printable_name",
stream),
"Get QoS printable string for user initiated QoS thread")
self.assertTrue(
stream.GetData() == "User Initiated",
"user initiated QoS thread name is valid")
stream.Clear()
self.assertTrue(
user_interactive_thread.GetInfoItemByPathAsString(
"requested_qos.printable_name",
stream),
"Get QoS printable string for user interactive QoS thread")
self.assertTrue(
stream.GetData() == "User Interactive",
"user interactive QoS thread name is valid")
stream.Clear()
self.assertTrue(
utility_thread.GetInfoItemByPathAsString(
"requested_qos.printable_name",
stream),
"Get QoS printable string for utility QoS thread")
self.assertTrue(
stream.GetData() == "Utility",
"utility QoS thread name is valid")
stream.Clear()
self.assertTrue(
unspecified_thread.GetInfoItemByPathAsString(
"requested_qos.printable_name",
stream),
"Get QoS printable string for unspecified QoS thread")
qosName = stream.GetData()
self.assertTrue(
qosName == "User Initiated" or qosName == "Default",
"unspecified QoS thread name is valid")
stream.Clear()
self.assertTrue(
background_thread.GetInfoItemByPathAsString(
"requested_qos.printable_name",
stream),
"Get QoS printable string for background QoS thread")
self.assertTrue(
stream.GetData() == "Background",
"background QoS thread name is valid")
@skipIfDarwin # rdar://50379398
def queues_with_libBacktraceRecording(self):
"""Test queues inspection SB APIs with libBacktraceRecording present."""
exe = self.getBuildArtifact("a.out")
if not os.path.isfile(
'/Applications/Xcode.app/Contents/Developer/usr/lib/libBacktraceRecording.dylib'):
self.skipTest(
"Skipped because libBacktraceRecording.dylib was present on the system.")
if not os.path.isfile(
'/usr/lib/system/introspection/libdispatch.dylib'):
self.skipTest(
"Skipped because introspection libdispatch dylib is not present.")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
self.main_source_spec = lldb.SBFileSpec(self.main_source)
break1 = target.BreakpointCreateByName("stopper", 'a.out')
self.assertTrue(break1, VALID_BREAKPOINT)
# Now launch the process, and do not stop at entry point.
libbtr_path = "/Applications/Xcode.app/Contents/Developer/usr/lib/libBacktraceRecording.dylib"
if self.getArchitecture() in ['arm', 'arm64', 'arm64e', 'arm64_32', 'armv7', 'armv7k']:
libbtr_path = "/Developer/usr/lib/libBacktraceRecording.dylib"
process = target.LaunchSimple(
[],
[
'DYLD_INSERT_LIBRARIES=%s' % (libbtr_path),
'DYLD_LIBRARY_PATH=/usr/lib/system/introspection'],
self.get_process_working_directory())
self.assertTrue(process, PROCESS_IS_VALID)
# The stop reason of the thread should be breakpoint.
threads = lldbutil.get_threads_stopped_at_breakpoint(process, break1)
if len(threads) != 1:
self.fail("Failed to stop at breakpoint 1.")
self.inferior_process = process
libbtr_module_filespec = lldb.SBFileSpec("libBacktraceRecording.dylib")
libbtr_module = target.FindModule(libbtr_module_filespec)
if not libbtr_module.IsValid():
self.skipTest(
"Skipped because libBacktraceRecording.dylib was not loaded into the process.")
self.assertTrue(
process.GetNumQueues() >= 4,
"Found the correct number of queues.")
queue_submittor_1 = lldb.SBQueue()
queue_performer_1 = lldb.SBQueue()
queue_performer_2 = lldb.SBQueue()
queue_performer_3 = lldb.SBQueue()
for idx in range(0, process.GetNumQueues()):
q = process.GetQueueAtIndex(idx)
if "LLDB_COMMAND_TRACE" in os.environ:
print("Queue with id %s has name %s" % (q.GetQueueID(), q.GetName()))
if q.GetName() == "com.apple.work_submittor_1":
queue_submittor_1 = q
if q.GetName() == "com.apple.work_performer_1":
queue_performer_1 = q
if q.GetName() == "com.apple.work_performer_2":
queue_performer_2 = q
if q.GetName() == "com.apple.work_performer_3":
queue_performer_3 = q
if q.GetName() == "com.apple.main-thread":
if q.GetNumThreads() == 0:
print("Cannot get thread <=> queue associations")
return
self.assertTrue(
queue_submittor_1.IsValid() and queue_performer_1.IsValid() and queue_performer_2.IsValid() and queue_performer_3.IsValid(),
"Got all four expected queues: %s %s %s %s" %
(queue_submittor_1.IsValid(),
queue_performer_1.IsValid(),
queue_performer_2.IsValid(),
queue_performer_3.IsValid()))
self.check_queue_for_valid_queue_id(queue_submittor_1)
self.check_queue_for_valid_queue_id(queue_performer_1)
self.check_queue_for_valid_queue_id(queue_performer_2)
self.check_queue_for_valid_queue_id(queue_performer_3)
self.check_running_and_pending_items_on_queue(queue_submittor_1, 1, 0)
self.check_running_and_pending_items_on_queue(queue_performer_1, 1, 3)
self.check_running_and_pending_items_on_queue(
queue_performer_2, 1, 9999)
self.check_running_and_pending_items_on_queue(queue_performer_3, 4, 0)
self.check_number_of_threads_owned_by_queue(queue_submittor_1, 1)
self.check_number_of_threads_owned_by_queue(queue_performer_1, 1)
self.check_number_of_threads_owned_by_queue(queue_performer_2, 1)
self.check_number_of_threads_owned_by_queue(queue_performer_3, 4)
self.check_queue_kind(queue_submittor_1, lldb.eQueueKindSerial)
self.check_queue_kind(queue_performer_1, lldb.eQueueKindSerial)
self.check_queue_kind(queue_performer_2, lldb.eQueueKindSerial)
self.check_queue_kind(queue_performer_3, lldb.eQueueKindConcurrent)
self.check_queues_threads_match_queue(queue_submittor_1)
self.check_queues_threads_match_queue(queue_performer_1)
self.check_queues_threads_match_queue(queue_performer_2)
self.check_queues_threads_match_queue(queue_performer_3)
self.assertTrue(queue_performer_2.GetPendingItemAtIndex(
0).IsValid(), "queue 2's pending item #0 is valid")
self.assertTrue(queue_performer_2.GetPendingItemAtIndex(0).GetAddress().GetSymbol(
).GetName() == "doing_the_work_2", "queue 2's pending item #0 should be doing_the_work_2")
self.assertTrue(
queue_performer_2.GetNumPendingItems() == 9999,
"verify that queue 2 still has 9999 pending items")
self.assertTrue(queue_performer_2.GetPendingItemAtIndex(
9998).IsValid(), "queue 2's pending item #9998 is valid")
self.assertTrue(queue_performer_2.GetPendingItemAtIndex(9998).GetAddress().GetSymbol(
).GetName() == "doing_the_work_2", "queue 2's pending item #0 should be doing_the_work_2")
self.assertTrue(queue_performer_2.GetPendingItemAtIndex(
9999).IsValid() == False, "queue 2's pending item #9999 is invalid")
|
|
import os
import time
import random
import apibinding.inventory as inventory
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.operations.volume_operations as vol_ops
import zstackwoodpecker.operations.vm_operations as vm_ops
import zstackwoodpecker.operations.image_operations as img_ops
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.account_operations as acc_ops
import zstackwoodpecker.zstack_test.zstack_test_vm as test_vm_header
import zstackwoodpecker.zstack_test.zstack_test_image as zstack_image_header
import zstackwoodpecker.zstack_test.zstack_test_snapshot as zstack_sp_header
import zstackwoodpecker.zstack_test.zstack_test_volume as zstack_volume_header
import zstackwoodpecker.operations.scenario_operations as sce_ops
import zstackwoodpecker.header.host as host_header
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
Path = [[]]
index = 0
tag = "VM_TEST_REBOOT"
backup = None
backup_list = []
case_flavor = dict(snapshot_running= dict(vm_op=['VM_TEST_SNAPSHOT'], state_op=['VM_TEST_NONE']),
create_img_running= dict(vm_op=['VM_TEST_CREATE_IMG'], state_op=['VM_TEST_NONE']),
resize_running= dict(vm_op=['VM_TEST_RESIZE_RVOL'], state_op=['VM_TEST_NONE']),
del_snapshot_running= dict(vm_op=['RVOL_DEL_SNAPSHOT'], state_op=['VM_TEST_NONE']),
create_img_from_backup_running= dict(vm_op=['VM_TEST_BACKUP_IMAGE'], state_op=['VM_TEST_NONE']),
migrate_running= dict(vm_op=['VM_TEST_MIGRATE'], state_op=['VM_TEST_NONE']),
snapshot_stopped= dict(vm_op=['VM_TEST_SNAPSHOT'], state_op=['VM_TEST_STOP']),
create_img_stopped= dict(vm_op=['VM_TEST_CREATE_IMG'], state_op=['VM_TEST_STOP']),
resize_stopped= dict(vm_op=['VM_TEST_RESIZE_RVOL'], state_op=['VM_TEST_STOP']),
del_snapshot_stopped= dict(vm_op=['RVOL_DEL_SNAPSHOT'], state_op=['VM_TEST_STOP']),
change_os_stopped= dict(vm_op=['VM_TEST_CHANGE_OS'], state_op=['VM_TEST_STOP']),
reset_stopped= dict(vm_op=['VM_TEST_RESET'], state_op=['VM_TEST_STOP']),
revert_backup_stopped= dict(vm_op=['VM_TEST_REVERT_BACKUP'], state_op=['VM_TEST_STOP']),
create_img_from_backup_stopped= dict(vm_op=['VM_TEST_BACKUP_IMAGE'], state_op=['VM_TEST_STOP']),
migrate_stopped= dict(vm_op=['VM_TEST_MIGRATE'], state_op=['VM_TEST_STOP']),
change_os_snapshot_stopped= dict(vm_op=['VM_TEST_CHANGE_OS', 'VM_TEST_SNAPSHOT'], state_op=['VM_TEST_STOP']),
backup_reboot= dict(vm_op=['VM_TEST_NONE', 'VM_TEST_SNAPSHOT'], state_op=['VM_TEST_NONE', 'VM_TEST_REBOOT']),
)
def record(fun):
def recorder(vm, op):
global index
if op != tag:
Path[index].append(op)
elif op == tag:
Path.append([op])
Path[index].append(op)
index += 1
return fun(vm, op)
return recorder
VM_RUNNING_OPS = [
"VM_TEST_SNAPSHOT",
"VM_TEST_CREATE_IMG",
"VM_TEST_RESIZE_RVOL",
"RVOL_DEL_SNAPSHOT",
"VM_TEST_NONE",
"VM_TEST_BACKUP_IMAGE"
]
VM_STOPPED_OPS = [
"VM_TEST_SNAPSHOT",
"VM_TEST_CREATE_IMG",
"VM_TEST_RESIZE_RVOL",
"RVOL_DEL_SNAPSHOT",
"VM_TEST_CHANGE_OS",
"VM_TEST_RESET",
"VM_TEST_NONE",
"VM_TEST_REVERT_BACKUP",
"VM_TEST_BACKUP_IMAGE"
]
VM_STATE_OPS = [
"VM_TEST_STOP",
"VM_TEST_REBOOT",
"VM_TEST_NONE"
]
@record
def vm_op_test(vm, op):
test_util.test_logger(vm.vm.name + "-------" + op)
ops = {
"VM_TEST_STOP": stop,
"VM_TEST_REBOOT": reboot,
"VM_TEST_NONE": do_nothing,
"VM_TEST_MIGRATE": migrate,
"VM_TEST_SNAPSHOT": create_snapshot,
"VM_TEST_CREATE_IMG": create_image,
"VM_TEST_RESIZE_RVOL": resize_rvol,
"RVOL_DEL_SNAPSHOT": delete_snapshot,
"VM_TEST_CHANGE_OS": change_os,
"VM_TEST_RESET": reset,
"VM_TEST_BACKUP": back_up,
"VM_TEST_REVERT_BACKUP": revert_backup,
"VM_TEST_BACKUP_IMAGE": backup_image
}
ops[op](vm)
def stop(vm):
vm.stop()
def reboot(vm):
vm.reboot()
def do_nothing(vm):
pass
def reset(vm):
vm.reinit()
def migrate(vm_obj):
ps = test_lib.lib_get_primary_storage_by_vm(vm_obj.get_vm())
if vm_obj.vm.state == "Running" and ps.type in [inventory.CEPH_PRIMARY_STORAGE_TYPE, 'SharedMountPoint', inventory.NFS_PRIMARY_STORAGE_TYPE,
'SharedBlock', inventory.LOCAL_STORAGE_TYPE]:
target_host = test_lib.lib_find_random_host(vm_obj.vm)
vm_obj.migrate(target_host.uuid)
elif ps.type in [inventory.LOCAL_STORAGE_TYPE]:
vm_obj.check()
target_host = test_lib.lib_find_random_host(vm_obj.vm)
vol_ops.migrate_volume(vm_obj.get_vm().allVolumes[0].uuid, target_host.uuid)
vm_obj.start()
test_lib.lib_wait_target_up(vm_obj.get_vm().vmNics[0].ip, 22, 300)
else:
test_util.test_fail("FOUND NEW STORAGTE TYPE. FAILED")
def create_snapshot(vm_obj):
vol_obj = zstack_volume_header.ZstackTestVolume()
vol_obj.set_volume(test_lib.lib_get_root_volume(vm_obj.get_vm()))
snapshots_root = zstack_sp_header.ZstackVolumeSnapshot()
snapshots_root.set_utility_vm(vm_obj)
snapshots_root.set_target_volume(vol_obj)
snapshots_root.create_snapshot('create_data_snapshot1')
snapshots_root.check()
sp1 = snapshots_root.get_current_snapshot()
#vm_obj.stop()
#vm_obj.check()
#snapshots_root.use_snapshot(sp1)
#vm_obj.start()
#test_lib.lib_wait_target_up(vm_obj.get_vm().vmNics[0].ip, 22, 300)
def delete_snapshot(vm_obj):
vol_obj = zstack_volume_header.ZstackTestVolume()
vol_obj.set_volume(test_lib.lib_get_root_volume(vm_obj.get_vm()))
snapshots_root = zstack_sp_header.ZstackVolumeSnapshot()
snapshots_root.set_utility_vm(vm_obj)
snapshots_root.set_target_volume(vol_obj)
sp_list = snapshots_root.get_snapshot_list()
if sp_list:
snapshots_root.delete_snapshot(random.choice(sp_list))
def create_image(vm_obj):
volume_uuid = test_lib.lib_get_root_volume(vm_obj.get_vm()).uuid
bs_list = test_lib.lib_get_backup_storage_list_by_vm(vm_obj.vm)
image_option = test_util.ImageOption()
image_option.set_root_volume_uuid(volume_uuid)
image_option.set_name('image_resize_template')
image_option.set_backup_storage_uuid_list([bs_list[0].uuid])
image = img_ops.create_root_volume_template(image_option)
new_image = zstack_image_header.ZstackTestImage()
new_image.set_creation_option(image_option)
new_image.set_image(image)
new_image.check()
new_image.clean()
def resize_rvol(vm_obj):
vol_size = test_lib.lib_get_root_volume(vm_obj.get_vm()).size
volume_uuid = test_lib.lib_get_root_volume(vm_obj.get_vm()).uuid
set_size = 1024 * 1024 * 1024 + int(vol_size)
vol_ops.resize_volume(volume_uuid, set_size)
vm_obj.update()
vol_size_after = test_lib.lib_get_root_volume(vm_obj.get_vm()).size
# if set_size != vol_size_after:
# test_util.test_fail('Resize Root Volume failed, size = %s' % vol_size_after)
# vm_obj.check()
test_lib.lib_wait_target_up(vm_obj.get_vm().vmNics[0].ip, 22, 300)
def change_os(vm_obj):
vm_uuid = vm_obj.get_vm().uuid
last_l3network_uuid = test_lib.lib_get_l3s_uuid_by_vm(vm_obj.get_vm())
last_ps_uuid = test_lib.lib_get_root_volume(vm_obj.get_vm()).primaryStorageUuid
cond = res_ops.gen_query_conditions("system", '=', "false")
cond = res_ops.gen_query_conditions("mediaType", '=', "RootVolumeTemplate", cond)
cond = res_ops.gen_query_conditions("platform", '=', "Linux", cond)
image_uuid = random.choice(res_ops.query_resource(res_ops.IMAGE, cond)).uuid
vm_ops.change_vm_image(vm_uuid, image_uuid)
vm_obj.start()
vm_obj.update()
# check whether the vm is running successfully
test_lib.lib_wait_target_up(vm_obj.get_vm().vmNics[0].ip, 22, 300)
# check whether the network config has changed
l3network_uuid_after = test_lib.lib_get_l3s_uuid_by_vm(vm_obj.get_vm())
if l3network_uuid_after != last_l3network_uuid:
test_util.test_fail('Change VM Image Failed.The Network config has changed.')
# check whether primarystorage has changed
ps_uuid_after = test_lib.lib_get_root_volume(vm_obj.get_vm()).primaryStorageUuid
if ps_uuid_after != last_ps_uuid:
test_util.test_fail('Change VM Image Failed.Primarystorage has changed.')
def back_up(vm_obj):
global backup
cond = res_ops.gen_query_conditions("type", '=', "ImageStoreBackupStorage")
bs = res_ops.query_resource(res_ops.BACKUP_STORAGE, cond)[0]
backup_option = test_util.BackupOption()
backup_option.set_name("test_compare")
backup_option.set_volume_uuid(test_lib.lib_get_root_volume(vm_obj.get_vm()).uuid)
backup_option.set_backupStorage_uuid(bs.uuid)
backup = vol_ops.create_backup(backup_option)
backup_list.append(backup)
def revert_backup(vm_obj):
backup_uuid = backup_list.pop(random.randint(0, len(backup_list)-1)).uuid
vol_ops.revert_volume_from_backup(backup_uuid)
def backup_image(vm_obj):
cond = res_ops.gen_query_conditions("type", '=', "ImageStoreBackupStorage")
bs = res_ops.query_resource(res_ops.BACKUP_STORAGE, cond)[0]
backup = random.choice(backup_list)
image = img_ops.create_root_template_from_backup(bs.uuid, backup.uuid)
def print_path(Path):
print("=" * 43 + "PATH" + "=" * 43)
for i in range(len(Path)):
path = ''
for j in range(len(Path[i])):
if j == len(Path[i]) - 1:
path += Path[i][j]
else:
path += (Path[i][j] + " --> ")
print(path)
print("=" * 90)
def test():
global test_obj_dict, VM_RUNNING_OPS, VM_STOPPED_OPS, VM_STATE_OPS, backup
flavor = case_flavor[os.environ.get('CASE_FLAVOR')]
VM_OP = flavor['vm_op']
STATE_OP = flavor['state_op']
ps = res_ops.query_resource(res_ops.PRIMARY_STORAGE)[0]
if ps.type == "AliyunNAS":
test_util.test_skip("VolumeBackup does not support AliyunNAS for now")
if ps.type != inventory.LOCAL_STORAGE_TYPE and 'VM_TEST_MIGRATE' in VM_OP and "VM_TEST_STOP" in STATE_OP:
test_util.test_skip("Shared Storage does not support migration")
vm_name = "test_vm"
cond = res_ops.gen_query_conditions("system", '=', "false")
cond = res_ops.gen_query_conditions("mediaType", '=', "RootVolumeTemplate", cond)
cond = res_ops.gen_query_conditions("platform", '=', "Linux", cond)
img_name = res_ops.query_resource(res_ops.IMAGE, cond)[0].name
cond = res_ops.gen_query_conditions("category", '=', "Private")
l3_name = res_ops.query_resource(res_ops.L3_NETWORK,cond)[0].name
vm = test_stub.create_vm(vm_name, img_name, l3_name)
vm.check()
test_obj_dict.add_vm(vm)
if "VM_TEST_BACKUP_IMAGE" in VM_OP or "VM_TEST_REVERT_BACKUP" in VM_OP:
vm_op_test(vm, "VM_TEST_BACKUP")
if "RVOL_DEL_SNAPSHOT" in VM_OP:
vm_op_test(vm, "VM_TEST_SNAPSHOT")
for i,j in zip(VM_OP,STATE_OP):
vm_op_test(vm, j)
if vm.state == "Running":
if not backup_list and "VM_TEST_BACKUP_IMAGE" == i:
i = "VM_TEST_NONE"
elif vm.state == "Stopped":
if not backup_list and ("VM_TEST_BACKUP_IMAGE" == i or "VM_TEST_REVERT_BACKUP" == i):
i = "VM_TEST_NONE"
vm_op_test(vm, i)
if vm.state == "Stopped":
vm.start()
vm.check()
if test_lib.lib_is_vm_l3_has_vr(vm.vm):
test_lib.TestHarness = test_lib.TestHarnessVR
cmd = "echo 111 > /home/" + str(int(time.time()))
test_lib.lib_execute_command_in_vm(vm.vm,cmd)
vm.suspend()
# create_snapshot/backup
vm_op_test(vm, "VM_TEST_BACKUP")
# compare vm & image created by backup
if ps.type != inventory.CEPH_PRIMARY_STORAGE_TYPE:
compare(ps, vm, backup)
vm.resume()
print_path(Path)
test_lib.lib_error_cleanup(test_obj_dict)
def error_cleanup():
global test_obj_dict
print_path(Path)
test_lib.lib_error_cleanup(test_obj_dict)
def compare(ps, vm, backup):
test_util.test_logger("-----------------compare----------------")
# find vm_host
host = test_lib.lib_find_host_by_vm(vm.vm)
cond = res_ops.gen_query_conditions("type", '=', "ImageStoreBackupStorage")
bs = res_ops.query_resource(res_ops.BACKUP_STORAGE, cond)[0]
root_volume = test_lib.lib_get_root_volume(vm.get_vm())
vm_path = root_volume.installPath
if ps.type == "SharedBlock":
vm_path = "/dev/" + root_volume.installPath.split("/")[2] + "/" + root_volume.installPath.split("/")[3]
test_util.test_logger(vm_path)
name = backup.backupStorageRefs[0].installPath.split("/")[2]
id = backup.backupStorageRefs[0].installPath.split("/")[3]
# compare vm_root_volume & image
cmd = "mkdir /root/%s;" \
"/usr/local/zstack/imagestore/bin/zstcli " \
"-rootca=/var/lib/zstack/imagestorebackupstorage/package/certs/ca.pem " \
"-url=%s:8000 " \
"pull -installpath /root/%s/old.qcow2 %s:%s;" \
"qemu-img compare %s /root/%s/old.qcow2;" % (id, bs.hostname, id, name, id, vm_path, id)
# clean image
result = test_lib.lib_execute_ssh_cmd(host.managementIp, "root", "password", cmd, timeout=300)
if result != "Images are identical.\n":
test_util.test_fail("compare vm_root_volume & image created by backup")
|
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Utility library for reading user information from an id_token.
This is an experimental library that can temporarily be used to extract
a user from an id_token. The functionality provided by this library
will be provided elsewhere in the future.
"""
import base64
try:
import json
except ImportError:
import simplejson as json
import logging
import os
import re
import time
import urllib
try:
from google.appengine.api import memcache
from google.appengine.api import oauth
from google.appengine.api import urlfetch
from google.appengine.api import users
except ImportError:
from google.appengine.api import memcache
from google.appengine.api import oauth
from google.appengine.api import urlfetch
from google.appengine.api import users
try:
from Crypto.Hash import SHA256
from Crypto.PublicKey import RSA
_CRYPTO_LOADED = True
except ImportError:
_CRYPTO_LOADED = False
__all__ = ['get_current_user',
'InvalidGetUserCall',
'SKIP_CLIENT_ID_CHECK']
SKIP_CLIENT_ID_CHECK = ['*']
_CLOCK_SKEW_SECS = 300
_MAX_TOKEN_LIFETIME_SECS = 86400
_DEFAULT_CERT_URI = ('https://www.googleapis.com/service_accounts/v1/metadata/'
'raw/[email protected]')
_ENV_USE_OAUTH_SCOPE = 'ENDPOINTS_USE_OAUTH_SCOPE'
_ENV_AUTH_EMAIL = 'ENDPOINTS_AUTH_EMAIL'
_ENV_AUTH_DOMAIN = 'ENDPOINTS_AUTH_DOMAIN'
_EMAIL_SCOPE = 'https://www.googleapis.com/auth/userinfo.email'
_TOKENINFO_URL = 'https://www.googleapis.com/oauth2/v1/tokeninfo'
_MAX_AGE_REGEX = re.compile(r'\s*max-age\s*=\s*(\d+)\s*')
_CERT_NAMESPACE = '__verify_jwt'
class _AppIdentityError(Exception):
pass
class InvalidGetUserCall(Exception):
"""Called get_current_user when the environment was not set up for it."""
def get_current_user():
"""Get user information from the id_token or oauth token in the request.
This should only be called from within an Endpoints request handler,
decorated with an @endpoints.method decorator. The decorator should include
the https://www.googleapis.com/auth/userinfo.email scope.
If the current request uses an id_token, this validates and parses the token
against the info in the current request handler and returns the user.
Or, for an Oauth token, this call validates the token against the tokeninfo
endpoint and oauth.get_current_user with the scopes provided in the method's
decorator.
Returns:
None if there is no token or it's invalid. If the token was valid, this
returns a User. Only the user's email field is guaranteed to be set.
Other fields may be empty.
Raises:
InvalidGetUserCall: if the environment variables necessary to determine the
endpoints user are not set. These are typically set when processing a
request using an Endpoints handler. If they are not set, it likely
indicates that this function was called from outside an Endpoints request
handler.
"""
if not _is_auth_info_available():
raise InvalidGetUserCall('No valid endpoints user in environment.')
if _ENV_USE_OAUTH_SCOPE in os.environ:
return oauth.get_current_user(os.environ[_ENV_USE_OAUTH_SCOPE])
if (_ENV_AUTH_EMAIL in os.environ and
_ENV_AUTH_DOMAIN in os.environ):
if not os.environ[_ENV_AUTH_EMAIL]:
return None
return users.User(os.environ[_ENV_AUTH_EMAIL],
os.environ[_ENV_AUTH_DOMAIN] or None)
return None
def _is_auth_info_available():
"""Check if user auth info has been set in environment variables."""
return ((_ENV_AUTH_EMAIL in os.environ and
_ENV_AUTH_DOMAIN in os.environ) or
_ENV_USE_OAUTH_SCOPE in os.environ)
def _maybe_set_current_user_vars(method, api_info=None, request=None):
"""Get user information from the id_token or oauth token in the request.
Used internally by Endpoints to set up environment variables for user
authentication.
Args:
method: The class method that's handling this request. This method
should be annotated with @endpoints.method.
api_info: An api_config._ApiInfo instance. Optional. If None, will attempt
to parse api_info from the implicit instance of the method.
request: The current request, or None.
"""
if _is_auth_info_available():
return
os.environ[_ENV_AUTH_EMAIL] = ''
os.environ[_ENV_AUTH_DOMAIN] = ''
try:
api_info = api_info or method.im_self.api_info
except AttributeError:
logging.warning('AttributeError when accessing %s.im_self. An unbound '
'method was probably passed as an endpoints handler.',
method.__name__)
scopes = method.method_info.scopes
audiences = method.method_info.audiences
allowed_client_ids = method.method_info.allowed_client_ids
else:
scopes = (method.method_info.scopes
if method.method_info.scopes is not None
else api_info.scopes)
audiences = (method.method_info.audiences
if method.method_info.audiences is not None
else api_info.audiences)
allowed_client_ids = (method.method_info.allowed_client_ids
if method.method_info.allowed_client_ids is not None
else api_info.allowed_client_ids)
if not scopes and not audiences and not allowed_client_ids:
return
token = _get_token(request)
if not token:
return None
if ((scopes == [_EMAIL_SCOPE] or scopes == (_EMAIL_SCOPE,)) and
allowed_client_ids):
logging.debug('Checking for id_token.')
time_now = long(time.time())
user = _get_id_token_user(token, audiences, allowed_client_ids, time_now,
memcache)
if user:
os.environ[_ENV_AUTH_EMAIL] = user.email()
os.environ[_ENV_AUTH_DOMAIN] = user.auth_domain()
return
if scopes:
logging.debug('Checking for oauth token.')
if _is_local_dev():
_set_bearer_user_vars_local(token, allowed_client_ids, scopes)
else:
_set_bearer_user_vars(allowed_client_ids, scopes)
def _get_token(request):
"""Get the auth token for this request.
Auth token may be specified in either the Authorization header or
as a query param (either access_token or bearer_token). We'll check in
this order:
1. Authorization header.
2. bearer_token query param.
3. access_token query param.
Args:
request: The current request, or None.
Returns:
The token in the request or None.
"""
auth_header = os.environ.get('HTTP_AUTHORIZATION')
if auth_header:
allowed_auth_schemes = ('OAuth', 'Bearer')
for auth_scheme in allowed_auth_schemes:
if auth_header.startswith(auth_scheme):
return auth_header[len(auth_scheme) + 1:]
return None
if request:
for key in ('bearer_token', 'access_token'):
token, _ = request.get_unrecognized_field_info(key)
if token:
return token
def _get_id_token_user(token, audiences, allowed_client_ids, time_now, cache):
"""Get a User for the given id token, if the token is valid.
Args:
token: The id_token to check.
audiences: List of audiences that are acceptable.
allowed_client_ids: List of client IDs that are acceptable.
time_now: The current time as a long (eg. long(time.time())).
cache: Cache to use (eg. the memcache module).
Returns:
A User if the token is valid, None otherwise.
"""
try:
parsed_token = _verify_signed_jwt_with_certs(token, time_now, cache)
except Exception, e:
logging.debug('id_token verification failed: %s', e)
return None
if _verify_parsed_token(parsed_token, audiences, allowed_client_ids):
email = parsed_token['email']
return users.User(email)
def _set_oauth_user_vars(token_info, audiences, allowed_client_ids, scopes,
local_dev):
logging.warning('_set_oauth_user_vars is deprecated and will be removed '
'soon.')
return _set_bearer_user_vars(allowed_client_ids, scopes)
def _set_bearer_user_vars(allowed_client_ids, scopes):
"""Validate the oauth bearer token and set endpoints auth user variables.
If the bearer token is valid, this sets ENDPOINTS_USE_OAUTH_SCOPE. This
provides enough information that our endpoints.get_current_user() function
can get the user.
Args:
allowed_client_ids: List of client IDs that are acceptable.
scopes: List of acceptable scopes.
"""
for scope in scopes:
try:
client_id = oauth.get_client_id(scope)
except oauth.Error:
continue
if (list(allowed_client_ids) != SKIP_CLIENT_ID_CHECK and
client_id not in allowed_client_ids):
logging.warning('Client ID is not allowed: %s', client_id)
return
os.environ[_ENV_USE_OAUTH_SCOPE] = scope
logging.debug('Returning user from matched oauth_user.')
return
logging.debug('Oauth framework user didn\'t match oauth token user.')
return None
def _set_bearer_user_vars_local(token, allowed_client_ids, scopes):
"""Validate the oauth bearer token on the dev server.
Since the functions in the oauth module return only example results in local
development, this hits the tokeninfo endpoint and attempts to validate the
token. If it's valid, we'll set _ENV_AUTH_EMAIL and _ENV_AUTH_DOMAIN so we
can get the user from the token.
Args:
token: String with the oauth token to validate.
allowed_client_ids: List of client IDs that are acceptable.
scopes: List of acceptable scopes.
"""
result = urlfetch.fetch(
'%s?%s' % (_TOKENINFO_URL, urllib.urlencode({'access_token': token})))
if result.status_code != 200:
try:
error_description = json.loads(result.content)['error_description']
except (ValueError, KeyError):
error_description = ''
logging.error('Token info endpoint returned status %s: %s',
result.status_code, error_description)
return
token_info = json.loads(result.content)
if 'email' not in token_info:
logging.warning('Oauth token doesn\'t include an email address.')
return
if not token_info.get('verified_email'):
logging.warning('Oauth token email isn\'t verified.')
return
client_id = token_info.get('issued_to')
if (list(allowed_client_ids) != SKIP_CLIENT_ID_CHECK and
client_id not in allowed_client_ids):
logging.warning('Client ID is not allowed: %s', client_id)
return
token_scopes = token_info.get('scope', '').split(' ')
if not any(scope in scopes for scope in token_scopes):
logging.warning('Oauth token scopes don\'t match any acceptable scopes.')
return
os.environ[_ENV_AUTH_EMAIL] = token_info['email']
os.environ[_ENV_AUTH_DOMAIN] = ''
logging.debug('Local dev returning user from token.')
return
def _is_local_dev():
return os.environ.get('SERVER_SOFTWARE', '').startswith('Development')
def _verify_parsed_token(parsed_token, audiences, allowed_client_ids):
if parsed_token.get('iss') != 'accounts.google.com':
logging.warning('Issuer was not valid: %s', parsed_token.get('iss'))
return False
aud = parsed_token.get('aud')
if not aud:
logging.warning('No aud field in token')
return False
cid = parsed_token.get('azp')
if aud != cid and aud not in audiences:
logging.warning('Audience not allowed: %s', aud)
return False
if list(allowed_client_ids) == SKIP_CLIENT_ID_CHECK:
logging.warning('Client ID check can\'t be skipped for ID tokens. '
'Id_token cannot be verified.')
return False
elif not cid or cid not in allowed_client_ids:
logging.warning('Client ID is not allowed: %s', cid)
return False
if 'email' not in parsed_token:
return False
return True
def _urlsafe_b64decode(b64string):
b64string = b64string.encode('ascii')
padded = b64string + '=' * ((4 - len(b64string)) % 4)
return base64.urlsafe_b64decode(padded)
def _get_cert_expiration_time(headers):
"""Get the expiration time for a cert, given the response headers.
Get expiration time from the headers in the result. If we can't get
a time from the headers, this returns 0, indicating that the cert
shouldn't be cached.
Args:
headers: A dict containing the response headers from the request to get
certs.
Returns:
An integer with the number of seconds the cert should be cached. This
value is guaranteed to be >= 0.
"""
cache_control = headers.get('Cache-Control', '')
for entry in cache_control.split(','):
match = _MAX_AGE_REGEX.match(entry)
if match:
cache_time_seconds = int(match.group(1))
break
else:
return 0
age = headers.get('Age')
if age is not None:
try:
age = int(age)
except ValueError:
age = 0
cache_time_seconds -= age
return max(0, cache_time_seconds)
def _get_cached_certs(cert_uri, cache):
certs = cache.get(cert_uri, namespace=_CERT_NAMESPACE)
if certs is None:
logging.debug('Cert cache miss')
try:
result = urlfetch.fetch(cert_uri)
except AssertionError:
return None
if result.status_code == 200:
certs = json.loads(result.content)
expiration_time_seconds = _get_cert_expiration_time(result.headers)
if expiration_time_seconds:
cache.set(cert_uri, certs, time=expiration_time_seconds,
namespace=_CERT_NAMESPACE)
else:
logging.error(
'Certs not available, HTTP request returned %d', result.status_code)
return certs
def _b64_to_long(b):
b = b.encode('ascii')
b += '=' * ((4 - len(b)) % 4)
b = base64.b64decode(b)
return long(b.encode('hex'), 16)
def _verify_signed_jwt_with_certs(
jwt, time_now, cache,
cert_uri=_DEFAULT_CERT_URI):
"""Verify a JWT against public certs.
See http://self-issued.info/docs/draft-jones-json-web-token.html.
The PyCrypto library included with Google App Engine is severely limited and
so you have to use it very carefully to verify JWT signatures. The first
issue is that the library can't read X.509 files, so we make a call to a
special URI that has the public cert in modulus/exponent form in JSON.
The second issue is that the RSA.verify method doesn't work, at least for
how the JWT tokens are signed, so we have to manually verify the signature
of the JWT, which means hashing the signed part of the JWT and comparing
that to the signature that's been encrypted with the public key.
Args:
jwt: string, A JWT.
time_now: The current time, as a long (eg. long(time.time())).
cache: Cache to use (eg. the memcache module).
cert_uri: string, URI to get cert modulus and exponent in JSON format.
Returns:
dict, The deserialized JSON payload in the JWT.
Raises:
_AppIdentityError: if any checks are failed.
"""
segments = jwt.split('.')
if len(segments) != 3:
raise _AppIdentityError('Token is not an id_token (Wrong number of '
'segments)')
signed = '%s.%s' % (segments[0], segments[1])
signature = _urlsafe_b64decode(segments[2])
lsignature = long(signature.encode('hex'), 16)
header_body = _urlsafe_b64decode(segments[0])
try:
header = json.loads(header_body)
except:
raise _AppIdentityError("Can't parse header")
if header.get('alg') != 'RS256':
raise _AppIdentityError('Unexpected encryption algorithm: %r' %
header.get('alg'))
json_body = _urlsafe_b64decode(segments[1])
try:
parsed = json.loads(json_body)
except:
raise _AppIdentityError("Can't parse token body")
certs = _get_cached_certs(cert_uri, cache)
if certs is None:
raise _AppIdentityError(
'Unable to retrieve certs needed to verify the signed JWT')
if not _CRYPTO_LOADED:
raise _AppIdentityError('Unable to load pycrypto library. Can\'t verify '
'id_token signature. See http://www.pycrypto.org '
'for more information on pycrypto.')
local_hash = SHA256.new(signed).hexdigest()
verified = False
for keyvalue in certs['keyvalues']:
try:
modulus = _b64_to_long(keyvalue['modulus'])
exponent = _b64_to_long(keyvalue['exponent'])
key = RSA.construct((modulus, exponent))
hexsig = '%064x' % key.encrypt(lsignature, '')[0]
hexsig = hexsig[-64:]
verified = (hexsig == local_hash)
if verified:
break
except Exception, e:
logging.debug(
"Signature verification error: %s; continue with the next cert.", e)
continue
if not verified:
raise _AppIdentityError('Invalid token signature')
iat = parsed.get('iat')
if iat is None:
raise _AppIdentityError('No iat field in token')
earliest = iat - _CLOCK_SKEW_SECS
exp = parsed.get('exp')
if exp is None:
raise _AppIdentityError('No exp field in token')
if exp >= time_now + _MAX_TOKEN_LIFETIME_SECS:
raise _AppIdentityError('exp field too far in future')
latest = exp + _CLOCK_SKEW_SECS
if time_now < earliest:
raise _AppIdentityError('Token used too early, %d < %d' %
(time_now, earliest))
if time_now > latest:
raise _AppIdentityError('Token used too late, %d > %d' %
(time_now, latest))
return parsed
|
|
"""Support for exposing a templated binary sensor."""
import logging
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.components.binary_sensor import (
BinarySensorDevice, ENTITY_ID_FORMAT, PLATFORM_SCHEMA,
DEVICE_CLASSES_SCHEMA)
from homeassistant.const import (
ATTR_FRIENDLY_NAME, ATTR_ENTITY_ID, CONF_VALUE_TEMPLATE,
CONF_ICON_TEMPLATE, CONF_ENTITY_PICTURE_TEMPLATE,
CONF_SENSORS, CONF_DEVICE_CLASS, EVENT_HOMEASSISTANT_START, MATCH_ALL)
from homeassistant.exceptions import TemplateError
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import async_generate_entity_id
from homeassistant.helpers.event import (
async_track_state_change, async_track_same_state)
_LOGGER = logging.getLogger(__name__)
CONF_DELAY_ON = 'delay_on'
CONF_DELAY_OFF = 'delay_off'
SENSOR_SCHEMA = vol.Schema({
vol.Required(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_ICON_TEMPLATE): cv.template,
vol.Optional(CONF_ENTITY_PICTURE_TEMPLATE): cv.template,
vol.Optional(ATTR_FRIENDLY_NAME): cv.string,
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Optional(CONF_DEVICE_CLASS): DEVICE_CLASSES_SCHEMA,
vol.Optional(CONF_DELAY_ON):
vol.All(cv.time_period, cv.positive_timedelta),
vol.Optional(CONF_DELAY_OFF):
vol.All(cv.time_period, cv.positive_timedelta),
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_SENSORS): cv.schema_with_slug_keys(SENSOR_SCHEMA),
})
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up template binary sensors."""
sensors = []
for device, device_config in config[CONF_SENSORS].items():
value_template = device_config[CONF_VALUE_TEMPLATE]
icon_template = device_config.get(CONF_ICON_TEMPLATE)
entity_picture_template = device_config.get(
CONF_ENTITY_PICTURE_TEMPLATE)
entity_ids = set()
manual_entity_ids = device_config.get(ATTR_ENTITY_ID)
invalid_templates = []
for tpl_name, template in (
(CONF_VALUE_TEMPLATE, value_template),
(CONF_ICON_TEMPLATE, icon_template),
(CONF_ENTITY_PICTURE_TEMPLATE, entity_picture_template),
):
if template is None:
continue
template.hass = hass
if manual_entity_ids is not None:
continue
template_entity_ids = template.extract_entities()
if template_entity_ids == MATCH_ALL:
entity_ids = MATCH_ALL
# Cut off _template from name
invalid_templates.append(tpl_name[:-9])
elif entity_ids != MATCH_ALL:
entity_ids |= set(template_entity_ids)
if manual_entity_ids is not None:
entity_ids = manual_entity_ids
elif entity_ids != MATCH_ALL:
entity_ids = list(entity_ids)
if invalid_templates:
_LOGGER.warning(
'Template binary sensor %s has no entity ids configured to'
' track nor were we able to extract the entities to track'
' from the %s template(s). This entity will only be able'
' to be updated manually.',
device, ', '.join(invalid_templates))
friendly_name = device_config.get(ATTR_FRIENDLY_NAME, device)
device_class = device_config.get(CONF_DEVICE_CLASS)
delay_on = device_config.get(CONF_DELAY_ON)
delay_off = device_config.get(CONF_DELAY_OFF)
sensors.append(
BinarySensorTemplate(
hass, device, friendly_name, device_class, value_template,
icon_template, entity_picture_template, entity_ids,
delay_on, delay_off)
)
if not sensors:
_LOGGER.error("No sensors added")
return False
async_add_entities(sensors)
return True
class BinarySensorTemplate(BinarySensorDevice):
"""A virtual binary sensor that triggers from another sensor."""
def __init__(self, hass, device, friendly_name, device_class,
value_template, icon_template, entity_picture_template,
entity_ids, delay_on, delay_off):
"""Initialize the Template binary sensor."""
self.hass = hass
self.entity_id = async_generate_entity_id(
ENTITY_ID_FORMAT, device, hass=hass)
self._name = friendly_name
self._device_class = device_class
self._template = value_template
self._state = None
self._icon_template = icon_template
self._entity_picture_template = entity_picture_template
self._icon = None
self._entity_picture = None
self._entities = entity_ids
self._delay_on = delay_on
self._delay_off = delay_off
async def async_added_to_hass(self):
"""Register callbacks."""
@callback
def template_bsensor_state_listener(entity, old_state, new_state):
"""Handle the target device state changes."""
self.async_check_state()
@callback
def template_bsensor_startup(event):
"""Update template on startup."""
if self._entities != MATCH_ALL:
# Track state change only for valid templates
async_track_state_change(
self.hass, self._entities, template_bsensor_state_listener)
self.async_check_state()
self.hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_START, template_bsensor_startup)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return self._icon
@property
def entity_picture(self):
"""Return the entity_picture to use in the frontend, if any."""
return self._entity_picture
@property
def is_on(self):
"""Return true if sensor is on."""
return self._state
@property
def device_class(self):
"""Return the sensor class of the sensor."""
return self._device_class
@property
def should_poll(self):
"""No polling needed."""
return False
@callback
def _async_render(self):
"""Get the state of template."""
state = None
try:
state = (self._template.async_render().lower() == 'true')
except TemplateError as ex:
if ex.args and ex.args[0].startswith(
"UndefinedError: 'None' has no attribute"):
# Common during HA startup - so just a warning
_LOGGER.warning("Could not render template %s, "
"the state is unknown", self._name)
return
_LOGGER.error("Could not render template %s: %s", self._name, ex)
for property_name, template in (
('_icon', self._icon_template),
('_entity_picture', self._entity_picture_template)):
if template is None:
continue
try:
setattr(self, property_name, template.async_render())
except TemplateError as ex:
friendly_property_name = property_name[1:].replace('_', ' ')
if ex.args and ex.args[0].startswith(
"UndefinedError: 'None' has no attribute"):
# Common during HA startup - so just a warning
_LOGGER.warning('Could not render %s template %s,'
' the state is unknown.',
friendly_property_name, self._name)
else:
_LOGGER.error('Could not render %s template %s: %s',
friendly_property_name, self._name, ex)
return state
return state
@callback
def async_check_state(self):
"""Update the state from the template."""
state = self._async_render()
# return if the state don't change or is invalid
if state is None or state == self.state:
return
@callback
def set_state():
"""Set state of template binary sensor."""
self._state = state
self.async_schedule_update_ha_state()
# state without delay
if (state and not self._delay_on) or \
(not state and not self._delay_off):
set_state()
return
period = self._delay_on if state else self._delay_off
async_track_same_state(
self.hass, period, set_state, entity_ids=self._entities,
async_check_same_func=lambda *args: self._async_render() == state)
async def async_update(self):
"""Force update of the state from the template."""
self.async_check_state()
|
|
#---------------------------------------------------------------------------
# Copyright 2013 The Open Source Electronic Health Record Agent
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#---------------------------------------------------------------------------
import sys
import os
import time
import TestHelper
from OSEHRAHelper import PROMPT
def startFileman(VistA):
# Start FileMan as the programmer user and set XUMF to 1 which lets the user
# change information in Kernel files
# Starts at the VistA Prompt
VistA.wait(PROMPT)
VistA.write('S DUZ=1 S XUMF=1 D Q^DI')
VistA.wait('Select OPTION:')
def signonZU(VistA,acc_code,ver_code):
# Sign a user into the ZU menu system
# The User must have a valid access code and verify code.
# If the user needs to change the Verify Code, the script will append a "!" to the old code
# and use that as the new one.
# Starts at the VistA prompt.
VistA.wait(PROMPT,60)
VistA.write('D ^ZU')
VistA.wait('ACCESS CODE:')
VistA.write(acc_code)
VistA.wait('VERIFY CODE:')
VistA.write(ver_code)
index = VistA.multiwait(['TYPE NAME','verify code:'])
if index==1:
VistA.write(ver_code)
VistA.wait('VERIFY CODE:')
VistA.write(ver_code+"!")
VistA.wait('right:')
VistA.write(ver_code+"!")
VistA.wait('TYPE NAME:')
VistA.write('')
def initializeFileman(VistA,site_name,site_number):
# Initializes FileMan via the DINIT routine.
# The command needs a site name to change to and a local site number
# Script uses value of CMake variable TEST_VISTA_SETUP_SITE_NAME as the name
# and 6161 as the site number.
VistA.write('D ^DINIT')
VistA.wait('Initialize VA FileMan now?')
VistA.write('Yes')
VistA.wait('SITE NAME:')
VistA.write(site_name)
VistA.wait('SITE NUMBER')
VistA.write(site_number)
# It will also change the operating system file to match the local environment type
# found by the set up.
VistA.wait('Do you want to change the MUMPS OPERATING SYSTEM File?')
VistA.write('Yes')
VistA.wait('TYPE OF MUMPS SYSTEM YOU ARE USING')
if VistA.type=='cache':
VistA.write('CACHE')
else:
VistA.write('GT.M(UNIX)')
VistA.wait(PROMPT,60)
# Use the ZUSET routine to rename the correct ZU* for the system.
VistA.write('D ^ZUSET')
VistA.wait('Rename')
VistA.write('Yes')
def setupPrimaryHFSDir(VistA,hfs_dir):
# Set up the primary HFS directory from the
# Kernel System Parameters file
#
# "@" to remove or set a new file path.
startFileman(VistA)
VistA.write('1')
VistA.wait_re('INPUT TO WHAT FILE')
VistA.write('KERNEL SYSTEM PARAMETERS')
VistA.wait('EDIT WHICH FIELD')
VistA.write('PRIMARY HFS DIRECTORY')
VistA.wait('THEN EDIT FIELD')
VistA.write('')
VistA.wait('DOMAIN NAME')
# `1 is the notation to grab the entry with a number of 1
VistA.write('`1')
VistA.wait('PRIMARY HFS DIRECTORY')
VistA.write(os.path.normpath(hfs_dir))
# Multiwait to capture the possible outcomes:
# SURE YOU WANT TO DELETE: File has an entry and the @ will delete it
# DOMAIN NAME: Entry was an acceptable response
# PRIMARY HFS DIRECTORY: Response was not accepted, could be due to
# deleting an empty file entry
index = VistA.multiwait(['SURE YOU WANT TO DELETE','DOMAIN NAME','PRIMARY HFS DIRECTORY'])
if index == 0:
VistA.write('Y')
VistA.wait('DOMAIN NAME')
if index == 2:
VistA.write("")
VistA.wait("DOMAIN NAME")
VistA.write('')
VistA.wait('Select OPTION:')
VistA.write('')
def configureNULLDevice(VistA):
# Ensure that the null device is correctly configured by adding
# a $I for the correct platform rather than VMS and removing
# sign-on capabilities
startFileman(VistA)
VistA.write('1')
VistA.wait_re('INPUT TO WHAT FILE')
VistA.write('DEVICE')
VistA.wait('EDIT WHICH FIELD')
VistA.write('$I\rSIGN-ON/SYSTEM DEVICE\r')
VistA.wait('NAME:')
VistA.write('NULL\r1')
VistA.wait('//')
# Path added is dependent on the platform that is being used.
if sys.platform=='win32':
VistA.write('//./nul\rNO\r')
else:
VistA.write('/dev/null\rNO\r')
VistA.wait("Select OPTION")
VistA.write("")
def configureConsoleDevice(VistA):
# Ensure that the console device is correctly configured by adding
# sign-on capabilities
startFileman(VistA)
VistA.write('1')
VistA.wait_re('INPUT TO WHAT FILE')
VistA.write('DEVICE')
VistA.wait('EDIT WHICH FIELD')
VistA.write('SIGN-ON/SYSTEM DEVICE\r')
VistA.wait('NAME:')
VistA.write('/dev/tty')
VistA.wait('SYSTEM DEVICE')
VistA.write('Y\r')
VistA.wait("Select OPTION")
VistA.write("")
def setupVistADomain(VistA,site_name):
# Enter the site name into the DOMAIN file via FileMan
startFileman(VistA)
VistA.write('1')
VistA.wait_re('INPUT TO WHAT FILE')
VistA.write('DOMAIN\r')
VistA.wait('Select DOMAIN NAME')
VistA.write(site_name)
# Multiwait for possible outcomes:
# Are you adding: Domain is new and will add it to the system
# NAME: Domain exists already
index = VistA.multiwait(["Are you adding","NAME"])
if index == 0:
VistA.write("Y")
else:
VistA.write("")
VistA.wait("FLAGS")
VistA.write('^\r\r')
VistA.wait(PROMPT,60)
# christen the domain via the XMUDCHR routine.
VistA.write('D CHRISTEN^XMUDCHR')
VistA.wait('Are you sure you want to change the name of this facility?')
VistA.write('Yes')
VistA.wait('Select DOMAIN NAME')
VistA.write(site_name)
VistA.wait('PARENT')
VistA.write('')
VistA.wait('TIME ZONE')
# Attempts to pull the timezone from the local machine via Python
# If entry is not accepted, will default to EST
VistA.write(time.strftime('%Z').replace(' Time',''))
index = VistA.multiwait([VistA.prompt,'TIME ZONE'])
if index==1:
VistA.write('EST')
VistA.wait(PROMPT,60)
# Next, Find IEN of new site name and add entries of new domain to
# Kernel System Parameters and RPC Broker Site Parameters files
VistA.IEN('DOMAIN',site_name)
VistA.wait(PROMPT,60)
VistA.write('S $P(^XWB(8994.1,1,0),"^")=' + VistA.IENumber)
VistA.write('S $P(^XTV(8989.3,1,0),"^")=' + VistA.IENumber)
# Then, re-index both files with the FileMan Utility.
startFileman(VistA)
VistA.write('UTILITY')
VistA.wait('UTILITY OPTION')
VistA.write('RE')
VistA.wait_re('MODIFY WHAT FILE')
VistA.write('8989.3\rNO\rY\rY')
VistA.wait('UTILITY OPTION')
VistA.write('RE')
VistA.wait_re('MODIFY WHAT FILE')
VistA.write('8994.1\rNO\rY\rY\r')
VistA.wait('Select OPTION')
VistA.write("")
def setupBoxVolPair(VistA,volume_set,site_name,tcp_port):
# Query the instance for the Box-volume pair of the machine
VistA.getenv(volume_set)
# Rename the first Box-volume entry in the Taskman Site Parameters file
# to match what was queried above
startFileman(VistA)
VistA.write('1')
VistA.wait_re('INPUT TO WHAT FILE')
VistA.write('14.7')
VistA.wait('ALL//')
VistA.write('')
VistA.wait('Select TASKMAN SITE PARAMETERS BOX-VOLUME PAIR:')
VistA.write('`1')
VistA.wait('//')
VistA.write(VistA.boxvol)
VistA.wait('RESERVED')
VistA.write('^\r')
#time.sleep(5)
# Add the Box-volume pair to the RPC Broker parameters for the local domain
# Also adds the information for the new style RPC Broker Listener on the supplied TCP port
# if a Cache system, will start a task to start the Listener, and put the
# listener under the Listener Starter's control
# if a GT.M system, will create the information but not start it.
VistA.wait('Select OPTION')
VistA.write('1')
VistA.wait_re('INPUT TO WHAT FILE')
VistA.write('8994.1')
VistA.wait('EDIT WHICH FIELD')
VistA.write('LISTENER')
VistA.wait("SUB-FIELD")
VistA.write("")
VistA.wait("THEN EDIT FIELD")
VistA.write("")
VistA.wait('Select RPC BROKER SITE PARAMETERS DOMAIN NAME')
VistA.write(site_name)
VistA.wait("OK")
VistA.write("Y")
VistA.wait("BOX-VOLUME PAIR")
VistA.write(VistA.boxvol + '\r')
VistA.wait("BOX-VOLUME PAIR")
VistA.write("")
VistA.wait("Select PORT")
VistA.write(tcp_port + '\rY')
if VistA.type=='cache':
VistA.write('1\r1\r1\r')
else:
VistA.write('1\r\r\r')
VistA.wait("Select OPTION")
VistA.write("")
def setupVolumeSet(VistA,site_name,volume_set,namespace=""):
# Rename first entry in the Volume Set file to match
# the CMake value of TEST_VISTA_SETUP_VOLUME_SET.
startFileman(VistA)
VistA.write('1')
VistA.wait_re('INPUT TO WHAT FILE')
VistA.write('14.5\r')
VistA.wait('Select VOLUME SET')
VistA.write('`1')
VistA.wait('VOLUME SET:')
VistA.write(volume_set+ '\r\r\r\r\r')
VistA.wait('TASKMAN FILES UCI')
if VistA.type=='cache':
VistA.write(namespace+'\r\r\r\r\r\r')
else:
VistA.write(volume_set +'\r\r\r\r\r\r')
# Add the Volume set information to the Kernel System Parameters File
VistA.wait('Select OPTION')
VistA.write('1')
VistA.wait_re('INPUT TO WHAT FILE')
VistA.write('KERNEL SYSTEM PARAMETERS\rVOLUME SET\r\r')
VistA.wait('Select KERNEL SYSTEM PARAMETERS DOMAIN NAME:')
VistA.write(site_name + '\r')
VistA.wait('VOLUME SET')
VistA.write(volume_set)
index = VistA.multiwait(['Are you adding','VOLUME SET'])
if index==0:
VistA.write('Y')
elif index==1:
VistA.write('')
# Set up basic information about sign-on to the domain via the Volume Set
VistA.wait('MAX SIGNON ALLOWED')
VistA.write('500')
VistA.wait('LOG SYSTEM RT')
VistA.write('N')
VistA.wait('VOLUME SET')
VistA.write('\r\r')
def scheduleOption(VistA,optionName):
# If using Cache as the M environment, Schedule a task to start the
# XWB Listener Starter on the start up of TaskMan
VistA.wait(PROMPT)
VistA.write('S DUZ=1 D ^XUP')
VistA.wait('Select OPTION NAME')
VistA.write('EVE\r1')
VistA.wait('Systems Manager Menu')
VistA.write('Taskman Management')
VistA.wait('Select Taskman Management')
VistA.write('SCHED')
VistA.wait('reschedule:')
VistA.write(optionName + '\rY')
VistA.wait('COMMAND:')
VistA.write('\r^SPECIAL QUEUEING\rSTARTUP\rS\rE\r')
VistA.wait('Select Taskman Management')
VistA.write('')
VistA.wait('Systems Manager Menu')
VistA.write('')
VistA.wait('Do you really want to halt')
VistA.write('Y')
def restartTaskMan(VistA):
# Restarts the TaskMan instance via the Taskman Management Utilities Menu.
VistA.wait(PROMPT)
VistA.write('S DUZ=1 D ^XUP')
VistA.wait('Select OPTION NAME')
VistA.write('EVE\r1')
VistA.wait('Systems Manager Menu')
VistA.write('Taskman Management')
VistA.wait('Select Taskman Management')
VistA.write('Taskman Management Utilities')
VistA.wait('Select Taskman Management Utilities')
VistA.write('Restart Task Manager\rY')
VistA.wait('Select Taskman Management Utilities')
VistA.write('')
VistA.wait('Select Taskman Management')
VistA.write('')
VistA.wait('Select Systems Manager Menu')
VistA.write('')
VistA.wait('Do you really want to halt')
VistA.write('Y')
VistA.wait(PROMPT)
VistA.write('K')
def addSystemManager(VistA):
# Add the super user System Manager via the User Management Menu
# Set basic information about the user: Name,SSN, Sex ....
VistA.wait(PROMPT,60)
VistA.write('S DUZ=1 D ^XUP')
VistA.wait('Select OPTION NAME')
VistA.write('EVE\r1')
VistA.wait('Systems Manager Menu')
VistA.write('USER MANAGEMENT')
VistA.wait('User Management')
VistA.write('ADD')
VistA.wait('Enter NEW PERSON')
VistA.write('MANAGER,SYSTEM')
index = VistA.multiwait(['Are you adding','Want to reactivate'])
if index == 0:
VistA.write('Y')
VistA.wait('INITIAL:')
VistA.write('SM')
VistA.wait('SSN:')
VistA.write('000000001')
VistA.wait('SEX:')
VistA.write('M')
VistA.wait('NPI')
VistA.write('')
VistA.wait('NAME COMPONENTS')
# A ScreenMan form opens at this point, and the following information is set:
# Primary Menu: EVE
# Secondary Menu: OR PARAM COORDINATOR MENU, TIU IRM MAINTENANCE MENU,
# XPAR MENU TOOLS,DG REGISTER PATIENT
# Access Code: SM1234
# Verify Code: SM1234!!
VistA.write('\r\r\r\r\r^PRIMARY MENU OPTION\rEVE\r1\r^Want to edit ACCESS CODE\rY\rSM1234\rSM1234\r^Want to edit VERIFY CODE\rY\rSM1234!!\rSM1234!!\r^SECONDARY MENU OPTIONS\rOR PARAM COORDINATOR MENU\rY\r\r\r\rTIU IRM MAINTENANCE MENU\rY\r\r\r\rXPAR MENU TOOLS\rY\r\r\r\rDG REGISTER PATIENT\rY\r\r\r\r^MULTIPLE SIGN-ON\r1\r1\r99\r^SERVICE/SECTION\rIRM\rS\rE')
# Exiting the ScreenMan form, Allocate Security Keys
# For Kernel Access: XUMGR, XUPROG, XUPROGMODE
# and Scheduling Access: SD SUPERVISOR, SDWL PARAMETER, SDWL MENU
VistA.wait('User Account Access Letter')
VistA.write('NO')
VistA.wait('wish to allocate security keys?')
VistA.write('Y')
VistA.wait('Allocate key')
VistA.write('XUMGR')
VistA.wait('Another key')
VistA.write('XUPROG\r1')
VistA.wait('Another key')
VistA.write('XUPROGMODE')
VistA.wait('Another key')
VistA.write('SD SUPERVISOR')
VistA.wait('Another key')
VistA.write('SDWL PARAMETER')
VistA.wait('Another key')
VistA.write('SDWL MENU')
VistA.wait('Another key')
VistA.write('')
VistA.wait('Another holder')
VistA.write('')
VistA.wait('YES//')
VistA.write('')
VistA.wait('mail groups?')
VistA.write('\r')
VistA.wait('Systems Manager Menu')
VistA.write('\rY')
VistA.wait(PROMPT,60)
# Get the record number of the user that was just created
VistA.IEN('NEW PERSON','MANAGER,SYSTEM')
VistA.wait(PROMPT,60)
# Set a piece of the New Person global corresponding to the MANAGER,SYSTEM
# to "@" to tell FileMan that user is a programmer
VistA.write('S DUZ=' + VistA.IENumber + ' S $P(^VA(200,DUZ,0),"^",4)="@"')
def addInstitution(VistA,inst_name,station_number):
# In FileMan, add a entry to the Institution file
# Pass in the name and number as arguments to allow for
# multiple additions.
startFileman(VistA)
VistA.write('1')
VistA.wait_re('INPUT TO WHAT FILE:')
VistA.write('4')
VistA.wait('EDIT WHICH FIELD')
VistA.write('STATION NUMBER')
VistA.wait('THEN EDIT FIELD')
VistA.write('')
VistA.wait('Select INSTITUTION NAME:')
VistA.write(inst_name)
index = VistA.multiwait(['Are you adding','STATION NUMBER'])
if index==0:
VistA.write('Y')
VistA.wait('STATION NUMBER:')
VistA.write(station_number)
VistA.wait('Select INSTITUTION NAME:')
VistA.write('')
VistA.wait('Select OPTION:')
VistA.write('')
def addDivision(VistA,div_name, facility_number,station_number):
# Adds a division to the VistA instance via FileMan,
# Each Division needs a name and a facility number. The station number
# points back to the recently created Institution
startFileman(VistA)
VistA.write('1')
VistA.wait_re('INPUT TO WHAT FILE:')
VistA.write('40.8')
VistA.wait('EDIT WHICH FIELD')
VistA.write('FACILITY NUMBER')
VistA.wait('THEN EDIT FIELD')
VistA.write('INSTITUTION FILE POINTER')
VistA.wait('THEN EDIT FIELD')
VistA.write('')
VistA.wait('DIVISION NAME')
VistA.write(div_name)
VistA.wait('Are you adding')
VistA.write('Y')
VistA.wait('MEDICAL CENTER DIVISION NUM:')
VistA.write('')
VistA.wait('FACILITY NUMBER')
VistA.write(facility_number)
VistA.write('')
VistA.wait('INSTITUTION FILE POINTER')
VistA.write(station_number)
VistA.wait('DIVISION NAME')
VistA.write('')
VistA.wait('Select OPTION')
VistA.write('')
def setupWard(VistA, division, institution, ward_name, clinic_name, order, specialty='Cardiac Surgery', bed_array = [["1-A","testBed1"]] ):
# Set up an inpatient ward for lodging of users and inpatient medication prescription
# taken from the ADTActions script of Registration Roll-And-Scroll testing
VistA.wait(PROMPT)
VistA.write('S DUZ=1 D ^XUP')
VistA.wait('OPTION NAME:')
# DEFINE THE WARD
VistA.write('WARD DEFINITION ENTRY')
VistA.wait('NAME:')
VistA.write(ward_name)
VistA.wait('No//')
VistA.write('YES')
VistA.wait('POINTER:')
VistA.write(clinic_name)
VistA.wait('ORDER:')
VistA.write(order)
VistA.wait(ward_name)
VistA.write('')
VistA.wait('WRISTBAND:')
VistA.write('YES')
VistA.wait('DIVISION:')
VistA.write(division)
VistA.wait('INSTITUTION:')
VistA.write(institution)
VistA.wait('6100')
VistA.write('')
VistA.wait('BEDSECTION:')
VistA.write('bedselect')
VistA.wait('SPECIALTY:')
VistA.write(specialty)
VistA.wait('SERVICE:')
VistA.write('S')
VistA.wait('LOCATION:')
VistA.write('north')
VistA.wait('WARD:')
VistA.write('1')
VistA.wait('DATE:')
VistA.write('T')
VistA.wait('No//')
VistA.write('YES')
VistA.wait('BEDS:')
VistA.write('20')
VistA.wait('ILL:')
VistA.write('1')
VistA.wait('SYNONYM:')
VistA.write('')
VistA.wait('G&L ORDER:')
VistA.write('')
VistA.wait('TOTALS:')
VistA.write('')
VistA.wait('NAME:')
VistA.write('')
addBedsToWard(VistA, ward_name, bed_array)
def addBedsToWard(VistA, ward_name, bed_array):
VistA.wait(PROMPT)
VistA.write('S DUZ=1 D ^XUP')
# SETUP BEDS
VistA.wait('OPTION NAME:')
VistA.write('ADT SYSTEM')
VistA.wait('Option:')
VistA.write('ADD')
for sitem in bed_array:
VistA.wait('NAME:')
VistA.write(sitem[0])
VistA.wait('No//')
VistA.write('yes')
VistA.wait('NAME:')
VistA.write('')
VistA.wait('DESCRIPTION:')
VistA.write(sitem[1])
VistA.wait('No//')
VistA.write('yes')
VistA.wait('ASSIGN:')
VistA.write(ward_name)
VistA.wait('No//')
VistA.write('yes')
VistA.wait('ASSIGN:')
VistA.write('')
VistA.wait('NAME:')
VistA.write('')
VistA.wait('Option:')
VistA.write('')
VistA.wait('YES//')
VistA.write('')
def modifyDVBParams(VistA):
VistA.wait(PROMPT)
VistA.write('D ^XUP')
# ADD ENTRY TO FILE 395 DVB PARAMETERS
VistA.wait('NAME:')
VistA.write('ZZFILEMAN')
VistA.wait('OPTION:')
VistA.write('1')
VistA.wait_re('INPUT TO WHAT FILE')
VistA.write('395')
VistA.wait('EDIT WHICH FIELD')
VistA.write('ALL')
VistA.wait('Select DVB PARAMETERS ONE:')
VistA.write('1')
VistA.wait('No//')
VistA.write('yes')
VistA.wait('SCREENS?:')
VistA.write('NO')
VistA.wait('DAY:')
VistA.write('^NEW IDCU INTERFACE')
VistA.wait('INTERFACE:')
VistA.write('0')
VistA.wait('Difference:')
VistA.write('')
VistA.wait('DIVISION:')
VistA.write('YES')
VistA.wait('GROUP:')
VistA.write('^')
VistA.wait('Select DVB PARAMETERS ONE:')
VistA.write('')
VistA.wait('OPTION:')
VistA.write('')
def addtoMASParameter(VistA, institution, medical_center):
# ADD ENTRY TO MAS PARAMETER
VistA.wait(PROMPT)
VistA.write('D ^XUP')
VistA.write('1')
VistA.wait('Select OPTION NAME')
VistA.write('ADT SYSTEM')
VistA.wait('ADT System Definition Menu')
VistA.write('MAS Parameter Entry')
VistA.wait('Enter 1-3 to EDIT, or RETURN to QUIT')
VistA.write('1')
VistA.wait('MEDICAL CENTER NAME')
VistA.write(medical_center)
VistA.wait('AFFILIATED')
VistA.write('NO')
VistA.wait('MULTIDIVISION MED CENTER')
VistA.write('NO')
VistA.wait('NURSING HOME WARDS')
VistA.write('')
VistA.wait('DOMICILIARY WARDS')
VistA.write('')
VistA.wait('SYSTEM TIMEOUT')
VistA.write('30')
VistA.wait('AUTOMATIC PTF MESSAGES')
VistA.write('')
VistA.wait('PRINT PTF MESSAGES')
VistA.write('')
VistA.wait('DEFAULT PTF MESSAGE PRINTER')
VistA.write('')
VistA.wait('SHOW STATUS SCREEN')
VistA.write('YES')
VistA.wait('USE HIGH INTENSITY ON SCREENS')
VistA.write('^^')
VistA.wait('Enter 1-3 to EDIT, or RETURN to QUIT')
VistA.write('2')
VistA.wait('DAYS TO UPDATE MEDICAID')
VistA.write('365')
VistA.wait('DAYS TO MAINTAIN G&L CORR')
VistA.write('30')
VistA.wait('TIME FOR LATE DISPOSITION')
VistA.write('30')
VistA.wait('SUPPLEMENTAL 10/10')
VistA.write('0')
VistA.wait(':')
VistA.write('^ASK DEVICE IN REGISTRATION')
VistA.wait('ASK DEVICE IN REGISTRATION')
VistA.write('YES')
VistA.wait('DAYS TO MAINTAIN SENSITIVITY')
VistA.write('30')
VistA.wait(':')
VistA.write('^^')
VistA.wait('Enter 1-3 to EDIT, or RETURN to QUIT')
VistA.write('3')
VistA.wait(':')
VistA.write('^INSTITUTION FILE POINTER')
VistA.wait('INSTITUTION FILE POINTER')
VistA.write(institution)
VistA.wait(':')
VistA.write('^^')
VistA.wait('Enter 1-3 to EDIT, or RETURN to QUIT')
VistA.write('')
VistA.wait('ADT System Definition Menu')
VistA.write('')
VistA.wait('YES//')
VistA.write('')
VistA.wait(PROMPT)
VistA.write('')
def setupNursLocation(VistA, unit_name):
# Set up a NURS LOCATION entity so that BCMA can connect to the system.
startFileman(VistA)
VistA.write('1')
VistA.wait_re('INPUT TO WHAT FILE:')
VistA.write('NURS LOCATION')
VistA.wait('EDIT WHICH FIELD')
VistA.write('')
VistA.wait('NURSING UNIT NAME')
VistA.write(unit_name)
VistA.wait('Are you adding')
VistA.write('Y')
VistA.wait('Are you adding')
VistA.write('Y')
VistA.wait('PRODUCT LINE')
VistA.write('NURSING')
VistA.wait('CARE SETTING')
VistA.write("INPATIENT")
VistA.wait('UNIT TYPE')
VistA.write("CLINICAL")
VistA.wait('INPATIENT DSS DEPARTMENT')
VistA.write('')
VistA.wait('PATIENT CARE FLAG')
VistA.write('A')
VistA.wait('INACTIVE FLAG')
VistA.write('A')
VistA.wait('MAS WARD')
VistA.write('')
VistA.wait('AMIS BED SECTION')
VistA.write('')
VistA.wait('PROFESSIONAL PERCENTAGE')
VistA.write('')
VistA.wait('UNIT EXPERIENCE')
VistA.write('')
VistA.wait('POC DATA ENTRY PERSONNEL')
VistA.write('')
VistA.wait('POC DATA APPROVAL PERSONNEL')
VistA.write('')
VistA.wait('SERVICE DATE')
VistA.write('')
VistA.wait('SERVICE DATE')
VistA.write('')
VistA.wait('STATUS')
VistA.write('')
VistA.wait('NURSING UNIT NAME')
VistA.write('')
VistA.wait('Select OPTION')
VistA.write('')
def setupStrepTest(VistA):
# The Sikuli test for CPRS orders a Streptozyme test for the patient
# This information ensures the test can be ordered at the VistA Health care
# Facility
# Add a NUMERIC IDENTIFIER to the Chemistry ACCESSION Area
# This is necessary to add a laboratory test to an Accession
# area at an Institution.
startFileman(VistA)
VistA.write('1')
VistA.wait_re('INPUT TO WHAT FILE')
VistA.write('ACCESSION\r1')
VistA.wait('EDIT WHICH FIELD')
VistA.write('.4\r')
VistA.wait('Select ACCESSION AREA')
VistA.write('CHEMISTRY')
VistA.wait('NUMERIC IDENTIFIER')
VistA.write('CH\r')
# Change the STREPTOZYME test to be accessioned through the Chemistry
# area at the Vista Health Care institution
VistA.wait('OPTION')
VistA.write('1')
VistA.wait_re('INPUT TO WHAT FILE')
VistA.write('LABORATORY TEST')
VistA.wait('EDIT WHICH FIELD')
VistA.write('ACCESSION AREA\r\r')
VistA.wait('Select LABORATORY TEST NAME')
VistA.write('STREPTOZYME')
VistA.wait('Select INSTITUTION')
VistA.write('VISTA HEALTH CARE')
VistA.wait('ACCESSION AREA')
VistA.write('CHEMISTRY')
VistA.wait('Select LABORATORY TEST NAME')
VistA.write('')
# Change the Package Prefix of the ONCE schedule to be
# used by the Laboratory
VistA.wait('OPTION')
VistA.write('1')
VistA.wait_re('INPUT TO WHAT FILE')
VistA.write('ADMINISTRATION SCHEDULE')
VistA.wait('EDIT WHICH FIELD')
VistA.write('PACKAGE PREFIX\r')
VistA.wait('Select ADMINISTRATION SCHEDULE NAME')
VistA.write('ONCE')
VistA.wait('P')
VistA.write('LR')
VistA.wait('ADMINISTRATION SCHEDULE')
VistA.write('')
VistA.wait('Select OPTION')
VistA.write('')
# Set Up the Quick Order entry for the Strep Throat
# Default to a one time, swab collection.
VistA.wait(PROMPT)
VistA.write('K D ^XUP')
VistA.wait("Access Code")
VistA.write("SM1234")
index = VistA.multiwait(['Select OPTION NAME','TERMINAL TYPE NAME'])
if index ==1:
VistA.write("C-VT220")
VistA.wait("Select OPTION NAME")
VistA.write("Systems Manager Menu")
VistA.wait('Systems Manager Menu')
VistA.write('CPRS Configuration')
VistA.wait('CPRS Configuration')
VistA.write('MM')
VistA.wait('Order Menu Management')
VistA.write('QO')
VistA.wait('Select QUICK ORDER NAME')
VistA.write('LRZ STREP TEST')
VistA.wait('Are you adding')
VistA.write('Y')
VistA.wait('TYPE OF QUICK ORDER')
VistA.write('LAB\r')
VistA.wait('DISPLAY TEXT')
VistA.write('STREP TEST')
VistA.wait('VERIFY ORDER')
VistA.write('Y')
VistA.wait('DESCRIPTION')
VistA.write('N\r')
VistA.wait('Lab Test')
VistA.write('STREP\r2')
VistA.wait('Collected By')
VistA.write('SP')
VistA.wait('Collection Sample')
VistA.write('SWAB\r')
VistA.wait('Collection Date/Time')
VistA.write('TODAY\r')
VistA.wait('How often')
VistA.write('ONCE')
VistA.wait('PLACE//')
VistA.write('\r\r')
VistA.wait('Option')
VistA.write('ST')
VistA.wait('Select ORDER SET NAME')
VistA.write('STREP TEST')
VistA.wait('Are you adding')
VistA.write('Y')
VistA.wait('Do you wish to copy')
VistA.write('No\r')
VistA.wait('DISPLAY TEXT')
VistA.write('Strep Test\r\r\r')
VistA.wait('COMPONENT SEQUENCE')
VistA.write('10\r')
VistA.wait('ITEM:')
VistA.write('LRZ STREP TEST\r\r\r\r') # Return to EVE menu
VistA.wait("Systems Manager Menu")
VistA.write("")
VistA.wait("Do you really")
VistA.write("Y")
def registerVitalsCPRS(VistA):
# Register the DLL versions for Vitals and the executable version for
# CPRS through the XPAR Menu. This information should match the versions
# that will be used during testing.
# Files can be downloaded: http://www.osehra.org/document/guis-used-automatic-functional-testing
VistA.wait(PROMPT,60)
VistA.write('S GMVDLL=\"GMV_VITALSVIEWENTER.DLL:v. 08/11/09 15:00\"')
VistA.wait(PROMPT,60)
VistA.write('D EN^XPAR(\"SYS\",\"GMV DLL VERSION\",GMVDLL,1)')
VistA.wait(PROMPT,60)
VistA.write('S GMVDLL=\"GMV_VITALSVIEWENTER.DLL:v. 01/21/11 12:52\"')
VistA.wait(PROMPT,60)
VistA.write('D EN^XPAR(\"SYS\",\"GMV DLL VERSION\",GMVDLL,1)')
VistA.wait(PROMPT,60)
VistA.write('S GMVGUI=\"VITALSMANAGER.EXE:5.0.26.1\"')
VistA.wait(PROMPT,60)
VistA.write('D EN^XPAR(\"SYS\",\"GMV GUI VERSION\",GMVGUI,1)')
VistA.wait(PROMPT,60)
VistA.write('S GMVGUI=\"VITALS.EXE:5.0.26.1\"')
VistA.wait(PROMPT,60)
VistA.write('D EN^XPAR(\"SYS\",\"GMV GUI VERSION\",GMVGUI,1)')
def addDoctor(VistA,name,init,SSN,sex,AC,VC1):
# Adds a Doctor user into the system via the User Management Menu as
# the System Manager.
# Needs:
# Doctor Name, Doctor Initials, SSN, Sex, Access Code, Verify Code
VistA.write('USER MANAGEMENT')
VistA.wait('User Management')
VistA.write('ADD')
VistA.wait('name')
VistA.write(name+'\rY')
VistA.wait('INITIAL:')
VistA.write(init)
VistA.wait('SSN:')
VistA.write(SSN)
VistA.wait('SEX:')
VistA.write(sex)
VistA.wait('NPI')
VistA.write('')
VistA.wait('NAME COMPONENTS')
# A ScreenMan form opens at this point, and the following information is set:
# Primary Menu: XUCORE
# Secondary Menu: PSB GUI CONTEXT, GMPL MGT MENU, OR CPRS GUI CHART, GMV V/M GUI,
# Access Code: <passed as argument>
# Verify Code: <passed as argument>
# No restriction on Patient Selection
# Allowed multiple sign-ons
# Allopathic and Osteopathic Physicians as the Person Class
# Core CPRS Tab access
VistA.write('\r\r\r\r\r^PRIMARY MENU OPTION\rXUCOR\r^SECONDARY MENU OPTIONS\rPSB GUI CONTEXT\rY\r\r\r\rGMPL MGT MENU\rY\r\r\r\rOR CPRS GUI CHART\rY\r\r\r\rGMV V/M GUI\rY\r\r\r\r^Want to edit ACCESS CODE\rY\r'+AC+'\r'+AC+'\r^Want to edit VERIFY CODE\rY\r'+VC1+'\r'+VC1+'\rVISTA HEALTH CARE\rY\r\r\r\r\r^SERVICE/SECTION\rIRM\r^Language\r\r767\rY\rY\rT-1\r\r^RESTRICT PATIENT SELECTION\r0\r\rCOR\rY\rT-1\r\r^MULTIPLE SIGN-ON\r1\r1\r99\r^\rS\rE')
# Exiting the ScreenMan form, Allocate Security Keys
# PROVIDER,GMV MANAGER,LRLAB,LRVERIFY,ORES,SD SUPERVISOR,SDWL PARAMETER,SDWL MENU,
VistA.wait('User Account Access Letter')
VistA.write('NO')
VistA.wait('wish to allocate security keys?')
VistA.write('Y')
VistA.wait('Allocate key')
VistA.write('PROVIDER\r1')
VistA.wait('Another key')
VistA.write('GMV MANAGER')
VistA.wait('Another key')
VistA.write('LRLAB')
VistA.wait('Another key')
VistA.write('LRVERIFY')
VistA.wait('Another key')
VistA.write('ORES')
VistA.wait('Another key')
VistA.write('SD SUPERVISOR')
VistA.wait('Another key')
VistA.write('SDWL PARAMETER')
VistA.wait('Another key')
VistA.write('SDWL MENU')
VistA.wait('Another key')
VistA.write('PSB MANAGER')
VistA.wait('Another key')
VistA.write('')
VistA.wait('Another holder')
VistA.write('')
VistA.wait('Do you wish to proceed')
VistA.write('Yes')
VistA.wait('add this user to mail groups')
VistA.write('NO')
VistA.wait("User Management")
VistA.write("")
def addNurse(VistA,name,init,SSN,sex,AC,VC1):
# Adds a Nurse user into the system via the User Management Menu as
# the System Manager.
# Needs:
# Nurse Name, Nurse Initials, SSN, Sex, Access Code, Verify Code
VistA.wait("Systems Manager Menu")
VistA.write("User Management")
VistA.wait('User Management')
VistA.write('ADD')
VistA.wait('name')
VistA.write(name+'\rY')
VistA.wait('INITIAL:')
VistA.write(init)
VistA.wait('SSN:')
VistA.write(SSN)
VistA.wait('SEX:')
VistA.write(sex)
VistA.wait('NPI')
VistA.write('')
VistA.wait('NAME COMPONENTS')
# A ScreenMan form opens at this point, and the following information is set:
# Primary Menu: XUCORE
# Secondary Menu: PSB GUI CONTEXT, GMPL MGT MENU, OR CPRS GUI CHART, GMV V/M GUI,
# Access Code: <passed as argument>
# Verify Code: <passed as argument>
# No restriction on Patient Selection
# Allowed multiple sign-ons
# Nursing Service Provider as the Person Class
# Core CPRS Tab access
VistA.write('\r\r\r\r\r^PRIMARY MENU OPTION\rXUCOR\r^SECONDARY MENU OPTIONS\rPSB GUI CONTEXT\rY\r\r\r\rGMPL MGT MENU\rY\r\r\r\rOR CPRS GUI CHART\rY\r\r\r\rGMV V/M GUI\rY\r\r\r\r^Want to edit ACCESS CODE\rY\r'+AC+'\r'+AC+'\r^Want to edit VERIFY CODE\rY\r'+VC1+'\r'+VC1+'\rVISTA HEALTH CARE\rY\r\r\r\r\r^SERVICE/SECTION\rIRM\r^Language\r\r289\rY\rY\rT-1\r\r^RESTRICT PATIENT SELECTION\r0\r\rCOR\rY\rT-1\r\r^MULTIPLE SIGN-ON\r1\r1\r99\r^\rS\rE')
# Exiting the ScreenMan form, Allocate Security Keys
# PROVIDER,ORELSE
VistA.wait('User Account Access Letter')
VistA.write('NO')
VistA.wait('wish to allocate security keys?')
VistA.write('Y')
VistA.wait('Allocate key')
VistA.write('PSB MANAGER')
VistA.wait('Another key')
VistA.write('PROVIDER\r1')
VistA.wait('Another key')
VistA.write('ORELSE\r')
VistA.wait('Another holder')
VistA.write('')
VistA.wait('Do you wish to proceed')
VistA.write('Yes')
VistA.wait('add this user to mail groups')
VistA.write('NO')
VistA.wait("User Management")
VistA.write("")
def addClerk(VistA,name,init,SSN,sex,AC,VC1):
# Adds a Clerk user into the system via the User Management Menu as
# the System Manager.
# Needs:
# Clerk Name, Clerk Initials, SSN, Sex, Access Code, Verify Code
VistA.wait("Systems Manager Menu")
VistA.write("User Management")
VistA.wait('User Management')
VistA.write('ADD')
VistA.wait('name')
VistA.write(name+'\rY')
VistA.wait('INITIAL:')
VistA.write(init)
VistA.wait('SSN:')
VistA.write(SSN)
VistA.wait('SEX:')
VistA.write(sex)
VistA.wait('NPI')
VistA.write('')
VistA.wait('NAME COMPONENTS')
# A ScreenMan form opens at this point, and the following information is set:
# Primary Menu: XUCORE
# Secondary Menu: GMPL DATA ENTRY
# Access Code: <passed as argument>
# Verify Code: <passed as argument>
# No restriction on Patient Selection
# Allowed multiple sign-ons
# Core CPRS Tab access
VistA.write('\r\r\r\r\r^PRIMARY MENU OPTION\rXUCOR\r^SECONDARY MENU OPTIONS\rGMPL DATA ENTRY\rY\r\r\r\rOR CPRS GUI CHART\rY\r\r\r\rGMV V/M GUI\rY\r\r\r\r^Want to edit ACCESS CODE\rY\r'+AC+'\r'+AC+'\r^Want to edit VERIFY CODE\rY\r'+VC1+'\r'+VC1+'\rVISTA HEALTH CARE\rY\r\r\r\r\r^SERVICE/SECTION\rIRM\r^RESTRICT PATIENT SELECTION\r0\r\rCOR\rY\rT-1\r\r^MULTIPLE SIGN-ON\r1\r1\r99\r^\rS\rE')
# Exiting the ScreenMan form, Allocate Security Key
# ORELSE
VistA.wait('User Account Access Letter')
VistA.write('NO')
VistA.wait('wish to allocate security keys?')
VistA.write('Y')
VistA.wait('Allocate key')
VistA.write('ORELSE')
VistA.wait('Another key')
VistA.write('')
VistA.wait('Another holder')
VistA.write('')
VistA.wait('Do you wish to proceed')
VistA.write('Yes')
VistA.wait('add this user to mail groups')
VistA.write('NO')
VistA.wait("User Management")
VistA.write("")
def createOrderMenu(VistA):
# Create the Quick Order Menu to have the LRZ Strep Test as a selectable option while
# not removing the old entries.
VistA.wait('Systems Manager Menu')
VistA.write('CPRS Configuration') # We can jump straight to the CPRS (Clin Coord) menu
VistA.wait('CPRS Configuration')
VistA.write('MM') # Order Menu Management
VistA.wait('Order Menu Management')
VistA.write('MN') # Enter/edit order menus
VistA.wait('ORDER MENU:')
VistA.write('ORZ GEN MED WRITE ORDERS LIST') # New menu name
VistA.wait('Are you adding')
VistA.write('Y')
VistA.wait('Do you wish to copy an existing menu')
VistA.write('N')
VistA.wait('DISPLAY TEXT')
VistA.write('') # Ignored by GUI
VistA.wait('Edit') # DESCRIPTION field
VistA.write('N')
#VistA.write('General Medicine Write Orders list') # Menu description
#VistA.wait('2')
#VistA.write('') # End of DESCRIPTION
#VistA.wait('EDIT') # Editor options
#VistA.write('') # We are done with the DESCRIPTION
VistA.wait('COLUMN WIDTH')
VistA.write('80') # Default to 80 characters
VistA.wait('MNEMONIC WIDTH')
VistA.write('') # Ignored by GUI
VistA.wait('PATH SWITCH')
VistA.write('') # Ignored by GUI
VistA.wait('ENTRY ACTION')
VistA.write('') # Shown because we have programmer access - Ignore this field
VistA.wait('EXIT ACTION')
VistA.write('') # Shown because we have programmer access - Ignore this field
# Begin ScreenMan form
VistA.wait('Action')
VistA.write('Add')
VistA.wait('Add')
VistA.write('Menu Items') # Add Menu Items to this Order Menu
# Add items to menu - repeat for each menu item
# Begin 'Add New Orders' menu
VistA.wait('ITEM')
VistA.write('OR ADD MENU CLINICIAN')
VistA.wait('ROW')
VistA.write('1')
VistA.wait('COLUMN')
VistA.write('1')
VistA.wait('DISPLAY TEXT')
VistA.write('')
VistA.wait('MNEMONIC')
VistA.write('')
# End 'Add New Orders'
# Begin 'Allergies' package menu
VistA.wait('ITEM')
VistA.write('GMRAOR ALLERGY ENTER/EDIT')
VistA.wait('ROW')
VistA.write('2')
VistA.wait('COLUMN')
VistA.write('1')
VistA.wait('DISPLAY TEXT')
VistA.write('')
VistA.wait('MNEMONIC')
VistA.write('')
# End 'Allergies'
# Begin 'Diet' package menu
VistA.wait('ITEM')
VistA.write('FHW1')
VistA.wait('ROW')
VistA.write('3')
VistA.wait('COLUMN')
VistA.write('1')
VistA.wait('DISPLAY TEXT')
VistA.write('')
VistA.wait('MNEMONIC')
VistA.write('')
# End 'Diet'
# Begin 'Meds, Inpatient' package menu
VistA.wait('ITEM')
VistA.write('PSJ OR PAT OE')
VistA.wait('ROW')
VistA.write('4')
VistA.wait('COLUMN')
VistA.write('1')
VistA.wait('DISPLAY TEXT')
VistA.write('')
VistA.wait('MNEMONIC')
VistA.write('')
# End 'Meds, Inpatient'
# Begin 'Meds, Non-VA' package menu
VistA.wait('ITEM')
VistA.write('PSH OERR')
VistA.wait('ROW')
VistA.write('5')
VistA.wait('COLUMN')
VistA.write('1')
VistA.wait('DISPLAY TEXT')
VistA.write('')
VistA.wait('MNEMONIC')
VistA.write('')
# End 'Meds, Non-VA'
# Begin 'Meds, Outpatient' package menu
VistA.wait('ITEM')
VistA.write('PSO OERR')
VistA.wait('ROW')
VistA.write('6')
VistA.wait('COLUMN')
VistA.write('1')
VistA.wait('DISPLAY TEXT')
VistA.write('')
VistA.wait('MNEMONIC')
VistA.write('')
# End 'Meds, Outpatient'
# Begin 'IV Fluids' package menu
VistA.wait('ITEM')
VistA.write('PSJI OR PAT FLUID OE')
VistA.wait('ROW')
VistA.write('7')
VistA.wait('COLUMN')
VistA.write('1')
VistA.wait('DISPLAY TEXT')
VistA.write('')
VistA.wait('MNEMONIC')
VistA.write('')
# End 'IV Fluids'
# Begin 'Lab Tests' package menu
VistA.wait('ITEM')
VistA.write('LR OTHER LAB TESTS')
VistA.wait('ROW')
VistA.write('8')
VistA.wait('COLUMN')
VistA.write('1')
VistA.wait('DISPLAY TEXT')
VistA.write('')
VistA.wait('MNEMONIC')
VistA.write('')
# End 'Lab Tests'
# Begin 'Imaging' package menu
VistA.wait('ITEM')
VistA.write('RA OERR EXAM')
VistA.wait('ROW')
VistA.write('9')
VistA.wait('COLUMN')
VistA.write('1')
VistA.wait('DISPLAY TEXT')
VistA.write('')
VistA.wait('MNEMONIC')
VistA.write('')
# End 'Imaging'
# Begin 'Consult' package menu
VistA.wait('ITEM')
VistA.write('GMRCOR CONSULT')
VistA.wait('ROW')
VistA.write('10')
VistA.wait('COLUMN')
VistA.write('1')
VistA.wait('DISPLAY TEXT')
VistA.write('')
VistA.wait('MNEMONIC')
VistA.write('')
# End 'Consult'
# Begin 'Procedure' package menu
VistA.wait('ITEM')
VistA.write('GMRCOR REQUEST')
VistA.wait('ROW')
VistA.write('11')
VistA.wait('COLUMN')
VistA.write('1')
VistA.wait('DISPLAY TEXT')
VistA.write('')
VistA.wait('MNEMONIC')
VistA.write('')
# End 'Procedure'
# Begin 'Vitals' package menu
VistA.wait('ITEM')
VistA.write('GMRVOR')
VistA.wait('CHOOSE') # There is more than one GMRVOR* menu
VistA.write('1') # GMRVOR is the entire menu name and is the first one
VistA.wait('ROW')
VistA.write('12')
VistA.wait('COLUMN')
VistA.write('1')
VistA.wait('DISPLAY TEXT')
VistA.write('')
VistA.wait('MNEMONIC')
VistA.write('')
# End 'Vitals'
# Begin 'Text Only Order' package menu
VistA.wait('ITEM')
VistA.write('OR GXTEXT WORD PROCESSING ORDER')
VistA.wait('ROW')
VistA.write('13')
VistA.wait('COLUMN')
VistA.write('1')
VistA.wait('DISPLAY TEXT')
VistA.write('')
VistA.wait('MNEMONIC')
VistA.write('')
# End 'Text Only Order'
# Begin 'STREP TEST' quick order menu
VistA.wait('ITEM')
VistA.write('LRZ STREP TEST')
VistA.wait('ROW')
VistA.write('14')
VistA.wait('COLUMN')
VistA.write('1')
VistA.wait('DISPLAY TEXT')
VistA.write('')
VistA.wait('MNEMONIC')
VistA.write('')
# End 'STREP TEST'
VistA.wait('ITEM')
VistA.write('') # Done adding menus
VistA.wait('Action')
VistA.write('Quit') # Done editing this menu
VistA.wait('Order Menu Management') # Need to get to CPRS Manager Menu
VistA.write('General Parameter Tools')
VistA.wait('General Parameter Tools') # The System Manager has this as a secondary menu (can jump to it)
VistA.write('EP') # Edit Parameter
VistA.wait('PARAMETER DEFINITION NAME')
VistA.write('ORWDX WRITE ORDERS LIST') # Parameter used to control Write Orders list
VistA.wait('selection')
VistA.write('8') # Set it for the entire System
VistA.wait('Order Dialog')
VistA.write('ORZ GEN MED WRITE ORDERS LIST') # Order menu we want to use
VistA.write('\r\r\r\r') # we are done. Stay at the EVE menu
def addAllergiesPermission(VistA):
# Add permissions for all users to mark an Allergy as "Entered in error"
# in CPRS. Done in the CPRS Configuration menu.
# Start from the Systems Manager Menu
# Exits to Systems Manager Menu
VistA.wait('Systems Manager Menu')
VistA.write('CPRS Configuration')
VistA.wait('CPRS Configuration')
VistA.write('GUI PARAMETERS')
VistA.wait('GUI Parameters')
VistA.write('GUI Mark Allergy Entered in Error')
VistA.wait('Enter selection')
VistA.write('4\rY\r\r')
def addTemplatePermission(VistA,init):
# Add permission for the Nurse to create note templates that can be
# shared in the domain.
VistA.wait('Systems Manager Menu')
VistA.write('TIU Maintenance')
VistA.wait('TIU Maintenance')
VistA.write('User Class Management')
VistA.wait('User Class Management')
VistA.write('List Membership by User')
VistA.wait('Select USER')
VistA.write('MS\rAdd\rClinical Coordinator\rT-1\r\r\r')
VistA.wait('Option')
VistA.write('\r')
def createClinic(VistA,name,abbrv,service):
# Add clinic via the XUP menu to allow scheduling
# Clinic Information:
# Clinic meets at the Facility: Yes
# Non-Count clinic: No
# Stop Code: 301 (General Internal Medicine)
# Allowable consecutive no-shows: 0
# Max # days for booking in future: 90
# Time for Clinic start: 80
# Max # days for Auto-rebook: 90
# Maximum Overbooks per day: 0
# Length of Appointment: 30
# Variable Length Appointments?: Y
# Display increments per hour: 2
VistA.wait(PROMPT)
VistA.write('W $$NOSEND^VAFHUTL')
VistA.wait('0')
VistA.write('S DUZ=1 D ^XUP')
VistA.wait('OPTION NAME:')
VistA.write('SDBUILD')
VistA.wait('CLINIC NAME:')
VistA.write(name)
VistA.wait('Are you adding')
VistA.write('Y')
VistA.wait('NAME')
VistA.write('')
VistA.wait('ABBREVIATION')
VistA.write(abbrv)
while True:
index = VistA.multiwait(['SERVICE','CLINIC MEETS','PATIENT FRIENDLY NAME','ALLOW DIRECT PATIENT','DISPLAY CLIN APPT'])
if index == 0:
break;
if index == 2:
VistA.write('')
else:
VistA.write('Y')
VistA.write(service)
VistA.wait('NON-COUNT CLINIC')
VistA.write('N')
VistA.wait('STOP CODE NUMBER')
VistA.write('301\r\r')
VistA.wait('TELEPHONE')
VistA.write('555-555-1414\r\r\r\r\r\r\r\r\r\r\r')
index = VistA.multiwait(['ALLOWABLE CONSECUTIVE NO-SHOWS','WORKLOAD VALIDATION'])
if index == 1:
VistA.write('')
VistA.wait('ALLOWABLE CONSECUTIVE NO-SHOWS')
VistA.write('0')
VistA.wait('FUTURE BOOKING')
VistA.write('90')
VistA.wait('HOUR CLINIC DISPLAY BEGINS')
VistA.write('8\r')
VistA.wait('AUTO-REBOOK')
VistA.write('90\r\r\r\r\r')
VistA.wait('MAXIMUM')
VistA.write('0\r')
VistA.wait('LENGTH OF APP')
VistA.write('30')
VistA.wait('VARIABLE')
VistA.write('Yes')
VistA.wait('DISPLAY INCREMENTS PER HOUR')
VistA.write('2')
# Sets availability for Clinic. Dates below are for a work week (Mon-Fri)
# Sets 4 appointment slots from 8am to 3pm with a half hour lunch break of
# no appointments. This will be set for all week days in future.
dates = ['JUL 2,2012','JUL 3,2012','JUL 4,2012','JUL 5,2012','JUL 6,2012']
for date in dates:
VistA.wait('AVAILABILITY DATE')
VistA.write(date)
VistA.wait('TIME')
VistA.write('0800-1200\r4')
VistA.wait('TIME')
VistA.write('1230-1500\r4')
VistA.wait('TIME')
VistA.write('')
VistA.wait('PATTERN OK')
VistA.write('Yes')
VistA.wait('AVAILABILITY DATE')
VistA.write('')
VistA.wait('CLINIC NAME:')
VistA.write('')
def setupElectronicSignature(VistA,AC,VC1,VC2,sigcode):
# Signs a created user into the ZU Menu system to add a signature code for
# document signing. It will force the user to change the verify code,
VistA.wait(PROMPT,60)
VistA.write('D ^ZU')
VistA.wait('ACCESS CODE:')
VistA.write(AC)
VistA.wait('VERIFY CODE:')
VistA.write(VC1)
VistA.wait('verify code:')
VistA.write(VC1)
VistA.wait('VERIFY CODE:')
VistA.write(VC2)
VistA.wait('right:')
VistA.write(VC2)
VistA.wait('TYPE NAME')
VistA.write('')
# then will enter the User's Toolbox to change the signature information.
VistA.wait('Core Applications')
VistA.write('USER\'s TOOLBOX')
VistA.wait('Toolbox')
VistA.write('ELE')
VistA.wait('INITIAL')
VistA.write('')
VistA.wait('SIGNATURE BLOCK PRINTED NAME')
VistA.write('')
VistA.wait('SIGNATURE BLOCK TITLE')
VistA.write('\r\r\r')
VistA.wait('SIGNATURE CODE')
VistA.write(sigcode)
VistA.wait('SIGNATURE CODE FOR VERIFICATION')
VistA.write(sigcode)
VistA.wait('Toolbox')
VistA.write('\r\r\r')
# Add patient through the
# Function arguments:
# VistA, Patient Name, Patient Sex,Patient DOB, Patient SSN, Patient Veteran?
def addPatient(VistA,pfile):
'''Add ALL patients from specified CSV '''
preader = TestHelper.CSVFileReader()
prec = preader.getfiledata(pfile, 'key')
for pitem in prec:
VistA.write('S DUZ=1 D ^XUP')
VistA.wait('Select OPTION NAME')
VistA.write('Core Applications\r')
VistA.wait("Select Core Applications")
VistA.write("ADT Manager Menu")
while True:
index = VistA.multiwait(['to continue','Select ADT Manager Menu',"Select Registration Menu"])
if index == 0:
VistA.write('')
elif index == 1:
VistA.write("Registration Menu")
elif index == 2:
VistA.write('Register a Patient')
break
index = VistA.multiwait(['PATIENT NAME',"Select 1010 printer"])
if index == 1:
VistA.write("NULL")
VistA.wait('PATIENT NAME')
VistA.write(prec[pitem]['fullname'].rstrip().lstrip())
index = VistA.multiwait(['ARE YOU ADDING','Enterprise Search'])
VistA.write('Y')
if index == 1:
while True:
index = VistA.multiwait(['FAMILY','GIVEN','MIDDLE NAME','PREFIX','SUFFIX',
'DEGREE','SOCIAL SECURITY','DATE OF BIRTH','SEX','MAIDEN NAME','CITY','STATE',
'MULTIPLE BIRTH','PHONE NUMBER','ARE YOU ADDING'])
if index == 14:
VistA.write('Y')
break
elif index == 6:
VistA.write(pitem)
elif index == 7:
VistA.write(prec[pitem]['dob'].rstrip().lstrip())
elif index == 8:
VistA.write(prec[pitem]['sex'].rstrip().lstrip())
else:
VistA.write('')
VistA.wait('to continue')
VistA.write('')
VistA.wait('MULTIPLE BIRTH INDICATOR')
VistA.write('')
VistA.wait('MAIDEN NAME:')
VistA.write('')
else:
VistA.wait('SEX')
VistA.write(prec[pitem]['sex'].rstrip().lstrip())
VistA.wait('DATE OF BIRTH')
VistA.write(prec[pitem]['dob'].rstrip().lstrip())
VistA.wait('SOCIAL SECURITY NUMBER')
VistA.write(pitem)
VistA.wait('TYPE')
VistA.write(prec[pitem]['type'].rstrip().lstrip())
VistA.wait('PATIENT VETERAN')
VistA.write(prec[pitem]['veteran'].rstrip().lstrip())
VistA.wait('SERVICE CONNECTED')
VistA.write(prec[pitem]['service'].rstrip().lstrip())
VistA.wait('MULTIPLE BIRTH INDICATOR')
VistA.write(prec[pitem]['twin'].rstrip().lstrip())
index = VistA.multiwait(["Do you still",'FAMILY'])
if index == 0:
VistA.write('Y')
VistA.wait("FAMILY")
VistA.write('^\r')
VistA.wait('MAIDEN NAME:')
VistA.write('')
VistA.wait('[CITY]')
VistA.write(prec[pitem]['cityob'].rstrip().lstrip())
VistA.wait('[STATE]')
VistA.write(prec[pitem]['stateob'].rstrip().lstrip())
VistA.wait('ALIAS')
VistA.write('')
searchArray = ['exit:', VistA.prompt]
if VistA.type=='cache':
searchArray.append(VistA.namespace)
index = VistA.multiwait(searchArray)
if index == 0:
VistA.write('\r')
VistA.wait('Patient Data')
VistA.write('Y')
VistA.wait('QUIT')
VistA.write('4')
VistA.wait('COUNTRY')
VistA.write('')
VistA.wait('STREET ADDRESS')
VistA.write('834 Ocean Vista Avenue\r')
VistA.wait('ZIP')
VistA.write('90401')
VistA.wait('CITY')
VistA.write('1')
VistA.wait('PHONE NUMBER')
VistA.write('310-555-2233\r\r')
VistA.wait('changes')
VistA.write('Y\r')
VistA.wait('QUIT')
VistA.write('\r\r')
VistA.wait('QUIT')
VistA.write('1')
VistA.wait('PRIMARY NOK')
VistA.write('Carter,David J Sr')
VistA.wait('RELATIONSHIP')
VistA.write('FATHER')
VistA.wait('ADDRESS')
VistA.write('Y')
VistA.wait('WORK PHONE')
VistA.write('310-555-9876\r^')
VistA.wait('condition')
VistA.write('N')
VistA.wait('today')
VistA.write('Y')
VistA.wait('Registration login')
VistA.write('NOW')
VistA.wait(PROMPT)
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.contrib.all_reduce.python..all_reduce."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.contrib.all_reduce.python import all_reduce as ar
from tensorflow.core.framework import types_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
class AllReduceTest(test_util.TensorFlowTestCase):
def testRingPermutations(self):
# 0 devices
pred_by_c_d, rank_by_c_d = ar._ring_permutations(1, 0, [])
self.assertEqual(pred_by_c_d, [])
self.assertEqual(rank_by_c_d, [])
# 1 worker, 1 subchunk cases
pred_by_c_d, rank_by_c_d = ar._ring_permutations(1, 1, [0])
self.assertEqual(pred_by_c_d, [[0]])
self.assertEqual(rank_by_c_d, [[0]])
pred_by_c_d, rank_by_c_d = ar._ring_permutations(1, 1, [0, 1, 2])
self.assertEqual(pred_by_c_d, [[2, 0, 1]])
self.assertEqual(rank_by_c_d, [[0, 1, 2]])
# multiple workers, 1 subchunk cases
pred_by_c_d, rank_by_c_d = ar._ring_permutations(2, 1, [0, 1, 2])
self.assertEqual(pred_by_c_d, [[5, 0, 1, 2, 3, 4]])
self.assertEqual(rank_by_c_d, [[0, 1, 2, 3, 4, 5]])
pred_by_c_d, rank_by_c_d = ar._ring_permutations(3, 1, [0, 1, 2])
self.assertEqual(pred_by_c_d, [[8, 0, 1, 2, 3, 4, 5, 6, 7]])
self.assertEqual(rank_by_c_d, [[0, 1, 2, 3, 4, 5, 6, 7, 8]])
pred_by_c_d, rank_by_c_d = ar._ring_permutations(2, 1, [2, 1, 0])
self.assertEqual(pred_by_c_d, [[1, 2, 3, 4, 5, 0]])
self.assertEqual(rank_by_c_d, [[2, 1, 0, 5, 4, 3]])
# 1 worker, multiple subchunk cases
pred_by_c_d, rank_by_c_d = ar._ring_permutations(1, 2, [0, 1, 2, 3])
self.assertEqual(pred_by_c_d, [[3, 0, 1, 2], [3, 0, 1, 2]])
self.assertEqual(rank_by_c_d, [[0, 1, 2, 3], [2, 3, 0, 1]])
pred_by_c_d, rank_by_c_d = ar._ring_permutations(1, 4, [0, 1, 2, 3])
self.assertEqual(pred_by_c_d, [[3, 0, 1, 2], [3, 0, 1, 2],
[3, 0, 1, 2], [3, 0, 1, 2]])
self.assertEqual(rank_by_c_d, [[0, 1, 2, 3], [3, 0, 1, 2],
[2, 3, 0, 1], [1, 2, 3, 0]])
# multiple worker, multiple subchunk cases
pred_by_c_d, rank_by_c_d = ar._ring_permutations(2, 2, [0, 1, 2, 3])
self.assertEqual(pred_by_c_d, [[7, 0, 1, 2, 3, 4, 5, 6],
[3, 0, 5, 2, 7, 4, 1, 6]])
self.assertEqual(rank_by_c_d, [[0, 1, 2, 3, 4, 5, 6, 7],
[2, 3, 0, 1, 6, 7, 4, 5]])
pred_by_c_d, rank_by_c_d = ar._ring_permutations(2, 2, [0, 3, 2, 1])
self.assertEqual(pred_by_c_d, [[5, 2, 3, 0, 1, 6, 7, 4],
[1, 2, 7, 0, 5, 6, 3, 4]])
self.assertEqual(rank_by_c_d, [[0, 3, 2, 1, 4, 7, 6, 5],
[2, 1, 0, 3, 6, 5, 4, 7]])
def _buildInput(self, num_workers, num_gpus):
t8 = constant_op.constant(
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
types_pb2.DT_FLOAT)
input_tensors = []
device_names = []
for w in range(0, num_workers):
for d in range(0, num_gpus):
dn = "/replica:0/task:%d/device:GPU:%d" % (w, d % num_gpus)
device_names.append(dn)
with ops.device(dn):
input_tensors.append(array_ops.identity(t8))
return input_tensors, device_names
def testBuildRingGatherPassStructure(self):
# 1 worker, 1 device
input_tensors, device_names = self._buildInput(1, 1)
pred_by_c_d, rank_by_c_d = ar._ring_permutations(1, 1, [0])
output_tensors = ar._build_ring_gather(input_tensors, device_names, 1,
pred_by_c_d, rank_by_c_d,
math_ops.add)
self.assertEqual(output_tensors, input_tensors)
# 1 worker, 4 devices, 2 subchunks
input_tensors, device_names = self._buildInput(1, 4)
pred_by_c_d, rank_by_c_d = ar._ring_permutations(1, 2, [0, 1, 2, 3])
output_tensors, pad_len = ar._build_ring_gather(
input_tensors, device_names, 2, pred_by_c_d, rank_by_c_d, math_ops.add)
self.assertEqual(0, pad_len)
# same number outputs as inputs
self.assertEqual(len(output_tensors), len(input_tensors))
num_chunks = 2 * len(input_tensors)
tlen = input_tensors[0].shape[0].value
for otl in output_tensors:
self.assertEqual(len(otl), num_chunks)
for ot in otl:
self.assertEqual(ot.shape, [tlen/num_chunks])
def _buildInitialVars(self, shape, dev_list):
values = []
num_devices = len(dev_list)
dim = np.prod(shape)
for d in range(0, num_devices):
with ops.device(dev_list[d]):
npt = np.zeros(shape).astype(np.float32)
alias = np.frombuffer(npt.data, dtype=np.float32)
for i in range(0, dim):
alias[i] = i + 0.01 * d
var = state_ops.variable_op(shape, types_pb2.DT_FLOAT)
state_ops.init_variable(var, npt).op.run()
values.append(var)
return values
# pylint: disable=g-long-lambda
def _buildRing(self, num_workers, num_gpus, subdiv):
gpu_perm = range(0, num_gpus)
return lambda x, un_op: ar.build_ring_all_reduce(
x, num_workers, subdiv, gpu_perm, math_ops.add, un_op)
def _testAllReduce(self, num_workers, num_gpus, shape, build_f):
# Use local CPU as device for all inputs.
num_devices = num_workers * num_gpus
dev_list = ["/replica:0/task:0/device:CPU:0"
for _ in range(num_devices)]
with self.test_session():
input_tensors = self._buildInitialVars(shape, dev_list)
un_op = lambda x: math_ops.div(
x, constant_op.constant(num_devices, dtype=types_pb2.DT_FLOAT))
simple_sum = math_ops.add_n(input_tensors)
simple_sum.op.run()
output_tensors = build_f(input_tensors, un_op)
sum_reduced = math_ops.add_n(output_tensors)
sum_reduced.op.run()
self.assertAllClose(sum_reduced.eval(), simple_sum.eval())
def _testRingAllReduce(self, num_workers, num_gpus, shape, subdiv):
start_time = time.time()
build_f = self._buildRing(num_workers, num_gpus, subdiv)
self._testAllReduce(num_workers, num_gpus, shape, build_f)
elapsed = time.time() - start_time
tf_logging.info("RingAllReduce num_workers=%d num_gpus=%d shape=%s "
"subdiv=%d elapsed=%f" %
(num_workers, num_gpus, shape, subdiv, elapsed))
def testRingAllReduce(self):
self._testRingAllReduce(1, 2, [8], 1)
self._testRingAllReduce(1, 2, [4, 4], 1)
self._testRingAllReduce(6, 1, [8], 1)
self._testRingAllReduce(1, 8, [32], 1)
self._testRingAllReduce(1, 8, [120], 1)
self._testRingAllReduce(2, 8, [7, 13], 1)
self._testRingAllReduce(2, 8, [8, 8], 2)
self._testRingAllReduce(2, 8, [8, 8], 4)
# TODO(tucker): The following test is surprisingly slow.
# Diagnose and fix before re-enabling.
# self._testRingAllReduce(4, 8, [8, 8, 2], 4)
def _buildShuffle(self, num_workers, num_gpus, num_shards):
# Use local CPU for all shuffle shards
gather_devices = ["/replica:0/task:0/device:CPU:0"
for _ in range(num_shards)]
return lambda x, un_op: ar.build_shuffle_all_reduce(
x, gather_devices, math_ops.add_n, un_op)
def _testShuffleAllReduce(self, num_workers, num_gpus, shape, num_shards):
start_time = time.time()
build_f = self._buildShuffle(num_workers, num_gpus, num_shards)
self._testAllReduce(num_workers, num_gpus, shape, build_f)
elapsed = time.time() - start_time
tf_logging.info("ShuffleAllReduce num_workers=%d num_gpus=%d shape=%s "
"elapsed=%f" % (num_workers, num_gpus, shape, elapsed))
def testShuffleAllReduce(self):
self._testShuffleAllReduce(1, 2, [8], 1)
self._testShuffleAllReduce(1, 2, [4, 4], 1)
self._testShuffleAllReduce(1, 8, [32], 1)
self._testShuffleAllReduce(1, 8, [120], 1)
self._testShuffleAllReduce(2, 8, [7, 13], 3)
self._testShuffleAllReduce(2, 8, [8, 8], 2)
self._testShuffleAllReduce(2, 8, [8, 8], 4)
self._testShuffleAllReduce(4, 8, [8, 8, 2], 4)
def _buildRecursiveHD(self, num_workers, num_gpus):
return lambda x, un_op: ar.build_recursive_hd_all_reduce(
x, math_ops.add, un_op)
# pylint: enable=g-long-lambda
def _testRecursiveHDAllReduce(self, num_workers, num_gpus, shape):
start_time = time.time()
build_f = self._buildRecursiveHD(num_workers, num_gpus)
self._testAllReduce(num_workers, num_gpus, shape, build_f)
elapsed = time.time() - start_time
tf_logging.info("RecursiveHDAllReduce num_workers=%d num_gpus=%d "
"shape=%s elapsed=%f" %
(num_workers, num_gpus, shape, elapsed))
def testRecursiveHDAllReduce(self):
self._testRecursiveHDAllReduce(1, 2, [8])
self._testRecursiveHDAllReduce(1, 2, [4, 4])
self._testRecursiveHDAllReduce(1, 8, [32])
self._testRecursiveHDAllReduce(1, 8, [120])
self._testRecursiveHDAllReduce(2, 8, [8, 8])
self._testRecursiveHDAllReduce(4, 8, [8, 8, 2])
if __name__ == "__main__":
test.main()
|
|
"""
This module provides functions to convert
NetworkX graphs to and from other formats.
The preferred way of converting data to a NetworkX graph
is through the graph constuctor. The constructor calls
the to_networkx_graph() function which attempts to guess the
input type and convert it automatically.
Examples
--------
Create a 10 node random graph from a numpy matrix
>>> import numpy
>>> a=numpy.reshape(numpy.random.random_integers(0,1,size=100),(10,10))
>>> D=nx.DiGraph(a)
or equivalently
>>> D=nx.to_networkx_graph(a,create_using=nx.DiGraph())
Create a graph with a single edge from a dictionary of dictionaries
>>> d={0: {1: 1}} # dict-of-dicts single edge (0,1)
>>> G=nx.Graph(d)
See Also
--------
nx_pygraphviz, nx_pydot
"""
__author__ = """\n""".join(['Aric Hagberg ([email protected])',
'Pieter Swart ([email protected])',
'Dan Schult([email protected])'])
# Copyright (C) 2006-2011 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import warnings
import networkx as nx
__all__ = ['to_networkx_graph',
'from_dict_of_dicts', 'to_dict_of_dicts',
'from_dict_of_lists', 'to_dict_of_lists',
'from_edgelist', 'to_edgelist',
'from_numpy_matrix', 'to_numpy_matrix',
'to_numpy_recarray',
'from_scipy_sparse_matrix', 'to_scipy_sparse_matrix']
def _prep_create_using(create_using):
"""Return a graph object ready to be populated.
If create_using is None return the default (just networkx.Graph())
If create_using.clear() works, assume it returns a graph object.
Otherwise raise an exception because create_using is not a networkx graph.
"""
if create_using is None:
G=nx.Graph()
else:
G=create_using
try:
G.clear()
except:
raise TypeError("Input graph is not a networkx graph type")
return G
def to_networkx_graph(data,create_using=None,multigraph_input=False):
"""Make a NetworkX graph from a known data structure.
The preferred way to call this is automatically
from the class constructor
>>> d={0: {1: {'weight':1}}} # dict-of-dicts single edge (0,1)
>>> G=nx.Graph(d)
instead of the equivalent
>>> G=nx.from_dict_of_dicts(d)
Parameters
----------
data : a object to be converted
Current known types are:
any NetworkX graph
dict-of-dicts
dist-of-lists
list of edges
numpy matrix
numpy ndarray
scipy sparse matrix
pygraphviz agraph
create_using : NetworkX graph
Use specified graph for result. Otherwise a new graph is created.
multigraph_input : bool (default False)
If True and data is a dict_of_dicts,
try to create a multigraph assuming dict_of_dict_of_lists.
If data and create_using are both multigraphs then create
a multigraph from a multigraph.
"""
# NX graph
if hasattr(data,"adj"):
try:
result= from_dict_of_dicts(data.adj,\
create_using=create_using,\
multigraph_input=data.is_multigraph())
if hasattr(data,'graph') and isinstance(data.graph,dict):
result.graph=data.graph.copy()
if hasattr(data,'node') and isinstance(data.node,dict):
result.node=dict( (n,dd.copy()) for n,dd in data.node.items() )
return result
except:
raise nx.NetworkXError("Input is not a correct NetworkX graph.")
# pygraphviz agraph
if hasattr(data,"is_strict"):
try:
return nx.from_agraph(data,create_using=create_using)
except:
raise nx.NetworkXError("Input is not a correct pygraphviz graph.")
# dict of dicts/lists
if isinstance(data,dict):
try:
return from_dict_of_dicts(data,create_using=create_using,\
multigraph_input=multigraph_input)
except:
try:
return from_dict_of_lists(data,create_using=create_using)
except:
raise TypeError("Input is not known type.")
# list or generator of edges
if (isinstance(data,list)
or hasattr(data,'next')
or hasattr(data, '__next__')):
try:
return from_edgelist(data,create_using=create_using)
except:
raise nx.NetworkXError("Input is not a valid edge list")
# numpy matrix or ndarray
try:
import numpy
if isinstance(data,numpy.matrix) or \
isinstance(data,numpy.ndarray):
try:
return from_numpy_matrix(data,create_using=create_using)
except:
raise nx.NetworkXError(\
"Input is not a correct numpy matrix or array.")
except ImportError:
warnings.warn('numpy not found, skipping conversion test.',
ImportWarning)
# scipy sparse matrix - any format
try:
import scipy
if hasattr(data,"format"):
try:
return from_scipy_sparse_matrix(data,create_using=create_using)
except:
raise nx.NetworkXError(\
"Input is not a correct scipy sparse matrix type.")
except ImportError:
warnings.warn('scipy not found, skipping conversion test.',
ImportWarning)
raise nx.NetworkXError(\
"Input is not a known data type for conversion.")
return
def convert_to_undirected(G):
"""Return a new undirected representation of the graph G."""
return G.to_undirected()
def convert_to_directed(G):
"""Return a new directed representation of the graph G."""
return G.to_directed()
def to_dict_of_lists(G,nodelist=None):
"""Return adjacency representation of graph as a dictionary of lists.
Parameters
----------
G : graph
A NetworkX graph
nodelist : list
Use only nodes specified in nodelist
Notes
-----
Completely ignores edge data for MultiGraph and MultiDiGraph.
"""
if nodelist is None:
nodelist=G
d = {}
for n in nodelist:
d[n]=[nbr for nbr in G.neighbors(n) if nbr in nodelist]
return d
def from_dict_of_lists(d,create_using=None):
"""Return a graph from a dictionary of lists.
Parameters
----------
d : dictionary of lists
A dictionary of lists adjacency representation.
create_using : NetworkX graph
Use specified graph for result. Otherwise a new graph is created.
Examples
--------
>>> dol= {0:[1]} # single edge (0,1)
>>> G=nx.from_dict_of_lists(dol)
or
>>> G=nx.Graph(dol) # use Graph constructor
"""
G=_prep_create_using(create_using)
G.add_nodes_from(d)
if G.is_multigraph() and not G.is_directed():
# a dict_of_lists can't show multiedges. BUT for undirected graphs,
# each edge shows up twice in the dict_of_lists.
# So we need to treat this case separately.
seen={}
for node,nbrlist in d.items():
for nbr in nbrlist:
if nbr not in seen:
G.add_edge(node,nbr)
seen[node]=1 # don't allow reverse edge to show up
else:
G.add_edges_from( ((node,nbr) for node,nbrlist in d.items()
for nbr in nbrlist) )
return G
def to_dict_of_dicts(G,nodelist=None,edge_data=None):
"""Return adjacency representation of graph as a dictionary of dictionaries.
Parameters
----------
G : graph
A NetworkX graph
nodelist : list
Use only nodes specified in nodelist
edge_data : list, optional
If provided, the value of the dictionary will be
set to edge_data for all edges. This is useful to make
an adjacency matrix type representation with 1 as the edge data.
If edgedata is None, the edgedata in G is used to fill the values.
If G is a multigraph, the edgedata is a dict for each pair (u,v).
"""
dod={}
if nodelist is None:
if edge_data is None:
for u,nbrdict in G.adjacency_iter():
dod[u]=nbrdict.copy()
else: # edge_data is not None
for u,nbrdict in G.adjacency_iter():
dod[u]=dod.fromkeys(nbrdict, edge_data)
else: # nodelist is not None
if edge_data is None:
for u in nodelist:
dod[u]={}
for v,data in ((v,data) for v,data in G[u].items() if v in nodelist):
dod[u][v]=data
else: # nodelist and edge_data are not None
for u in nodelist:
dod[u]={}
for v in ( v for v in G[u] if v in nodelist):
dod[u][v]=edge_data
return dod
def from_dict_of_dicts(d,create_using=None,multigraph_input=False):
"""Return a graph from a dictionary of dictionaries.
Parameters
----------
d : dictionary of dictionaries
A dictionary of dictionaries adjacency representation.
create_using : NetworkX graph
Use specified graph for result. Otherwise a new graph is created.
multigraph_input : bool (default False)
When True, the values of the inner dict are assumed
to be containers of edge data for multiple edges.
Otherwise this routine assumes the edge data are singletons.
Examples
--------
>>> dod= {0: {1:{'weight':1}}} # single edge (0,1)
>>> G=nx.from_dict_of_dicts(dod)
or
>>> G=nx.Graph(dod) # use Graph constructor
"""
G=_prep_create_using(create_using)
G.add_nodes_from(d)
# is dict a MultiGraph or MultiDiGraph?
if multigraph_input:
# make a copy of the list of edge data (but not the edge data)
if G.is_directed():
if G.is_multigraph():
G.add_edges_from( (u,v,key,data)
for u,nbrs in d.items()
for v,datadict in nbrs.items()
for key,data in datadict.items()
)
else:
G.add_edges_from( (u,v,data)
for u,nbrs in d.items()
for v,datadict in nbrs.items()
for key,data in datadict.items()
)
else: # Undirected
if G.is_multigraph():
seen=set() # don't add both directions of undirected graph
for u,nbrs in d.items():
for v,datadict in nbrs.items():
if (u,v) not in seen:
G.add_edges_from( (u,v,key,data)
for key,data in datadict.items()
)
seen.add((v,u))
else:
seen=set() # don't add both directions of undirected graph
for u,nbrs in d.items():
for v,datadict in nbrs.items():
if (u,v) not in seen:
G.add_edges_from( (u,v,data)
for key,data in datadict.items() )
seen.add((v,u))
else: # not a multigraph to multigraph transfer
if G.is_multigraph() and not G.is_directed():
# d can have both representations u-v, v-u in dict. Only add one.
# We don't need this check for digraphs since we add both directions,
# or for Graph() since it is done implicitly (parallel edges not allowed)
seen=set()
for u,nbrs in d.items():
for v,data in nbrs.items():
if (u,v) not in seen:
G.add_edge(u,v,attr_dict=data)
seen.add((v,u))
else:
G.add_edges_from( ( (u,v,data)
for u,nbrs in d.items()
for v,data in nbrs.items()) )
return G
def to_edgelist(G,nodelist=None):
"""Return a list of edges in the graph.
Parameters
----------
G : graph
A NetworkX graph
nodelist : list
Use only nodes specified in nodelist
"""
if nodelist is None:
return G.edges(data=True)
else:
return G.edges(nodelist,data=True)
def from_edgelist(edgelist,create_using=None):
"""Return a graph from a list of edges.
Parameters
----------
edgelist : list or iterator
Edge tuples
create_using : NetworkX graph
Use specified graph for result. Otherwise a new graph is created.
Examples
--------
>>> edgelist= [(0,1)] # single edge (0,1)
>>> G=nx.from_edgelist(edgelist)
or
>>> G=nx.Graph(edgelist) # use Graph constructor
"""
G=_prep_create_using(create_using)
G.add_edges_from(edgelist)
return G
def to_numpy_matrix(G, nodelist=None, dtype=None, order=None,
multigraph_weight=sum, weight='weight'):
"""Return the graph adjacency matrix as a NumPy matrix.
Parameters
----------
G : graph
The NetworkX graph used to construct the NumPy matrix.
nodelist : list, optional
The rows and columns are ordered according to the nodes in `nodelist`.
If `nodelist` is None, then the ordering is produced by G.nodes().
dtype : NumPy data type, optional
A valid single NumPy data type used to initialize the array.
This must be a simple type such as int or numpy.float64 and
not a compound data type (see to_numpy_recarray)
If None, then the NumPy default is used.
order : {'C', 'F'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. If None, then the NumPy default
is used.
multigraph_weight : {sum, min, max}, optional
An operator that determines how weights in multigraphs are handled.
The default is to sum the weights of the multiple edges.
weight : string or None optional (default='weight')
The edge attribute that holds the numerical value used for
the edge weight. If None then all edge weights are 1.
Returns
-------
M : NumPy matrix
Graph adjacency matrix.
See Also
--------
to_numpy_recarray, from_numpy_matrix
Notes
-----
The matrix entries are assigned with weight edge attribute. When
an edge does not have the weight attribute, the value of the entry is 1.
For multiple edges, the values of the entries are the sums of the edge
attributes for each edge.
When `nodelist` does not contain every node in `G`, the matrix is built
from the subgraph of `G` that is induced by the nodes in `nodelist`.
Examples
--------
>>> G = nx.MultiDiGraph()
>>> G.add_edge(0,1,weight=2)
>>> G.add_edge(1,0)
>>> G.add_edge(2,2,weight=3)
>>> G.add_edge(2,2)
>>> nx.to_numpy_matrix(G, nodelist=[0,1,2])
matrix([[ 0., 2., 0.],
[ 1., 0., 0.],
[ 0., 0., 4.]])
"""
try:
import numpy as np
except ImportError:
raise ImportError(\
"to_numpy_matrix() requires numpy: http://scipy.org/ ")
if nodelist is None:
nodelist = G.nodes()
nodeset = set(nodelist)
if len(nodelist) != len(nodeset):
msg = "Ambiguous ordering: `nodelist` contained duplicates."
raise nx.NetworkXError(msg)
nlen=len(nodelist)
undirected = not G.is_directed()
index=dict(zip(nodelist,range(nlen)))
if G.is_multigraph():
# Handle MultiGraphs and MultiDiGraphs
# array of nan' to start with, any leftover nans will be converted to 0
# nans are used so we can use sum, min, max for multigraphs
M = np.zeros((nlen,nlen), dtype=dtype, order=order)+np.nan
# use numpy nan-aware operations
operator={sum:np.nansum, min:np.nanmin, max:np.nanmax}
try:
op=operator[multigraph_weight]
except:
raise ValueError('multigraph_weight must be sum, min, or max')
for u,v,attrs in G.edges_iter(data=True):
if (u in nodeset) and (v in nodeset):
i,j = index[u],index[v]
e_weight = attrs.get(weight, 1)
M[i,j] = op([e_weight,M[i,j]])
if undirected:
M[j,i] = M[i,j]
# convert any nans to zeros
M = np.asmatrix(np.nan_to_num(M))
else:
# Graph or DiGraph, this is much faster than above
M = np.zeros((nlen,nlen), dtype=dtype, order=order)
for u,nbrdict in G.adjacency_iter():
for v,d in nbrdict.items():
try:
M[index[u],index[v]]=d.get(weight,1)
except KeyError:
pass
M = np.asmatrix(M)
return M
def from_numpy_matrix(A,create_using=None):
"""Return a graph from numpy matrix.
The numpy matrix is interpreted as an adjacency matrix for the graph.
Parameters
----------
A : numpy matrix
An adjacency matrix representation of a graph
create_using : NetworkX graph
Use specified graph for result. The default is Graph()
Notes
-----
If the numpy matrix has a single data type for each matrix entry it
will be converted to an appropriate Python data type.
If the numpy matrix has a user-specified compound data type the names
of the data fields will be used as attribute keys in the resulting
NetworkX graph.
See Also
--------
to_numpy_matrix, to_numpy_recarray
Examples
--------
Simple integer weights on edges:
>>> import numpy
>>> A=numpy.matrix([[1,1],[2,1]])
>>> G=nx.from_numpy_matrix(A)
User defined compound data type on edges:
>>> import numpy
>>> dt=[('weight',float),('cost',int)]
>>> A=numpy.matrix([[(1.0,2)]],dtype=dt)
>>> G=nx.from_numpy_matrix(A)
>>> G.edges(data=True)
[(0, 0, {'cost': 2, 'weight': 1.0})]
"""
kind_to_python_type={'f':float,
'i':int,
'u':int,
'b':bool,
'c':complex,
'S':str,
'V':'void'}
try: # Python 3.x
blurb = chr(1245) # just to trigger the exception
kind_to_python_type['U']=str
except ValueError: # Python 2.6+
kind_to_python_type['U']=unicode
# This should never fail if you have created a numpy matrix with numpy...
try:
import numpy as np
except ImportError:
raise ImportError(\
"from_numpy_matrix() requires numpy: http://scipy.org/ ")
G=_prep_create_using(create_using)
n,m=A.shape
if n!=m:
raise nx.NetworkXError("Adjacency matrix is not square.",
"nx,ny=%s"%(A.shape,))
dt=A.dtype
try:
python_type=kind_to_python_type[dt.kind]
except:
raise TypeError("Unknown numpy data type: %s"%dt)
# make sure we get isolated nodes
G.add_nodes_from(range(n))
# get a list of edges
x,y=np.asarray(A).nonzero()
# handle numpy constructed data type
if python_type is 'void':
fields=sorted([(offset,dtype,name) for name,(dtype,offset) in
A.dtype.fields.items()])
for (u,v) in zip(x,y):
attr={}
for (offset,dtype,name),val in zip(fields,A[u,v]):
attr[name]=kind_to_python_type[dtype.kind](val)
G.add_edge(u,v,attr)
else: # basic data type
G.add_edges_from( ((u,v,{'weight':python_type(A[u,v])})
for (u,v) in zip(x,y)) )
return G
def to_numpy_recarray(G,nodelist=None,
dtype=[('weight',float)],
order=None):
"""Return the graph adjacency matrix as a NumPy recarray.
Parameters
----------
G : graph
The NetworkX graph used to construct the NumPy matrix.
nodelist : list, optional
The rows and columns are ordered according to the nodes in `nodelist`.
If `nodelist` is None, then the ordering is produced by G.nodes().
dtype : NumPy data-type, optional
A valid NumPy named dtype used to initialize the NumPy recarray.
The data type names are assumed to be keys in the graph edge attribute
dictionary.
order : {'C', 'F'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. If None, then the NumPy default
is used.
Returns
-------
M : NumPy recarray
The graph with specified edge data as a Numpy recarray
Notes
-----
When `nodelist` does not contain every node in `G`, the matrix is built
from the subgraph of `G` that is induced by the nodes in `nodelist`.
Examples
--------
>>> G = nx.Graph()
>>> G.add_edge(1,2,weight=7.0,cost=5)
>>> A=nx.to_numpy_recarray(G,dtype=[('weight',float),('cost',int)])
>>> print(A.weight)
[[ 0. 7.]
[ 7. 0.]]
>>> print(A.cost)
[[0 5]
[5 0]]
"""
try:
import numpy as np
except ImportError:
raise ImportError(\
"to_numpy_matrix() requires numpy: http://scipy.org/ ")
if G.is_multigraph():
raise nx.NetworkXError("Not implemented for multigraphs.")
if nodelist is None:
nodelist = G.nodes()
nodeset = set(nodelist)
if len(nodelist) != len(nodeset):
msg = "Ambiguous ordering: `nodelist` contained duplicates."
raise nx.NetworkXError(msg)
nlen=len(nodelist)
undirected = not G.is_directed()
index=dict(zip(nodelist,range(nlen)))
M = np.zeros((nlen,nlen), dtype=dtype, order=order)
names=M.dtype.names
for u,v,attrs in G.edges_iter(data=True):
if (u in nodeset) and (v in nodeset):
i,j = index[u],index[v]
values=tuple([attrs[n] for n in names])
M[i,j] = values
if undirected:
M[j,i] = M[i,j]
return M.view(np.recarray)
def to_scipy_sparse_matrix(G, nodelist=None, dtype=None,
weight='weight', format='csr'):
"""Return the graph adjacency matrix as a SciPy sparse matrix.
Parameters
----------
G : graph
The NetworkX graph used to construct the NumPy matrix.
nodelist : list, optional
The rows and columns are ordered according to the nodes in `nodelist`.
If `nodelist` is None, then the ordering is produced by G.nodes().
dtype : NumPy data-type, optional
A valid NumPy dtype used to initialize the array. If None, then the
NumPy default is used.
weight : string or None optional (default='weight')
The edge attribute that holds the numerical value used for
the edge weight. If None then all edge weights are 1.
format : str in {'bsr', 'csr', 'csc', 'coo', 'lil', 'dia', 'dok'}
The type of the matrix to be returned (default 'csr'). For
some algorithms different implementations of sparse matrices
can perform better. See [1]_ for details.
Returns
-------
M : SciPy sparse matrix
Graph adjacency matrix.
Notes
-----
The matrix entries are populated using the edge attribute held in
parameter weight. When an edge does not have that attribute, the
value of the entry is 1.
For multiple edges the matrix values are the sums of the edge weights.
When `nodelist` does not contain every node in `G`, the matrix is built
from the subgraph of `G` that is induced by the nodes in `nodelist`.
Uses coo_matrix format. To convert to other formats specify the
format= keyword.
Examples
--------
>>> G = nx.MultiDiGraph()
>>> G.add_edge(0,1,weight=2)
>>> G.add_edge(1,0)
>>> G.add_edge(2,2,weight=3)
>>> G.add_edge(2,2)
>>> S = nx.to_scipy_sparse_matrix(G, nodelist=[0,1,2])
>>> print(S.todense())
[[0 2 0]
[1 0 0]
[0 0 4]]
References
----------
.. [1] Scipy Dev. References, "Sparse Matrices",
http://docs.scipy.org/doc/scipy/reference/sparse.html
"""
try:
from scipy import sparse
except ImportError:
raise ImportError(\
"to_scipy_sparse_matrix() requires scipy: http://scipy.org/ ")
if nodelist is None:
nodelist = G
nlen = len(nodelist)
if nlen == 0:
raise nx.NetworkXError("Graph has no nodes or edges")
if len(nodelist) != len(set(nodelist)):
msg = "Ambiguous ordering: `nodelist` contained duplicates."
raise nx.NetworkXError(msg)
index = dict(zip(nodelist,range(nlen)))
if G.number_of_edges() == 0:
row,col,data=[],[],[]
else:
row,col,data=zip(*((index[u],index[v],d.get(weight,1))
for u,v,d in G.edges_iter(nodelist, data=True)
if u in index and v in index))
if G.is_directed():
M = sparse.coo_matrix((data,(row,col)),shape=(nlen,nlen), dtype=dtype)
else:
# symmetrize matrix
M = sparse.coo_matrix((data+data,(row+col,col+row)),shape=(nlen,nlen),
dtype=dtype)
try:
return M.asformat(format)
except AttributeError:
raise nx.NetworkXError("Unknown sparse matrix format: %s"%format)
def from_scipy_sparse_matrix(A,create_using=None):
"""Return a graph from scipy sparse matrix adjacency list.
Parameters
----------
A : scipy sparse matrix
An adjacency matrix representation of a graph
create_using : NetworkX graph
Use specified graph for result. The default is Graph()
Examples
--------
>>> import scipy.sparse
>>> A=scipy.sparse.eye(2,2,1)
>>> G=nx.from_scipy_sparse_matrix(A)
"""
G=_prep_create_using(create_using)
# convert all formats to lil - not the most efficient way
AA=A.tolil()
n,m=AA.shape
if n!=m:
raise nx.NetworkXError(\
"Adjacency matrix is not square. nx,ny=%s"%(A.shape,))
G.add_nodes_from(range(n)) # make sure we get isolated nodes
for i,row in enumerate(AA.rows):
for pos,j in enumerate(row):
G.add_edge(i,j,**{'weight':AA.data[i][pos]})
return G
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import numpy
except:
raise SkipTest("NumPy not available")
try:
import scipy
except:
raise SkipTest("SciPy not available")
|
|
#!/usr/bin/env python
# encoding: utf-8
"""Azkaban test module."""
from azkaban import *
from ConfigParser import RawConfigParser
from nose.tools import eq_, ok_, raises, nottest
from nose.plugins.skip import SkipTest
from os.path import relpath, abspath
from time import sleep, time
class TestFlatten(object):
def test_empty(self):
eq_(flatten({}), {})
def test_simple(self):
dct = {'a': 1, 'B': 2}
eq_(flatten(dct), dct)
def test_nested(self):
dct = {'a': 1, 'b': {'c': 3}}
eq_(flatten(dct), {'a': 1, 'b.c': 3})
class TestProject(object):
def setup(self):
self.project = Project('foo')
def test_add_file(self):
self.project.add_file(__file__, 'bar')
eq_(self.project._files, {__file__: 'bar'})
@raises(AzkabanError)
def test_missing_file(self):
self.project.add_file('bar')
# ommitted because its counter to need.
# @raises(AzkabanError)
# def test_relative_file(self):
# self.project.add_file(relpath(__file__))
def test_add_duplicate_file(self):
self.project.add_file(__file__)
self.project.add_file(__file__)
eq_(self.project._files, {__file__: None})
@raises(AzkabanError)
def test_add_inconsistent_duplicate_file(self):
self.project.add_file(__file__)
self.project.add_file(__file__, 'this.py')
def test_add_job(self):
class OtherJob(Job):
test = None
def on_add(self, project, name):
self.test = (project.name, name)
job = OtherJob()
self.project.add_job('bar', job)
eq_(job.test, ('foo', 'bar'))
def test_add_job_with_path(self):
class OtherPathJob(Job):
test = None
def on_add(self, project, name, path):
self.test = (project.name, name, 'path')
job = OtherPathJob()
self.project.add_job('bar', job, 'path')
eq_(job.test, ('path/foo', 'bar'))
@raises(AzkabanError)
def test_add_duplicate_job(self):
self.project.add_job('bar', Job())
self.project.add_job('bar', Job())
def test_merge_project(self):
job_bar = Job()
self.project.add_job('bar', job_bar)
file_bar = __file__
self.project.add_file(file_bar, 'bar')
project2 = Project('qux')
job_baz = Job()
project2.add_job('baz', job_baz)
file_baz = abspath('azkaban.py')
project2.add_file(file_baz, 'baz')
self.project.merge(project2)
eq_(self.project.name, 'foo')
eq_(self.project._jobs, {'bar': job_bar, 'baz': job_baz})
eq_(self.project._files, {file_bar: 'bar', file_baz: 'baz'})
@raises(AzkabanError)
def test_build_empty(self):
with temppath() as path:
self.project.build(path)
def test_build_single_job(self):
class OtherJob(Job):
test = None
def on_build(self, project, name):
self.test = (project.name, name)
job = OtherJob({'a': 2})
self.project.add_job('bar', job)
with temppath() as path:
self.project.build(path)
eq_(job.test, ('foo', 'bar'))
reader = ZipFile(path)
try:
ok_('bar.job' in reader.namelist())
eq_(reader.read('bar.job'), 'a=2\n')
finally:
reader.close()
def test_build_with_file(self):
self.project.add_file(__file__.rstrip('c'), 'this.py')
with temppath() as path:
self.project.build(path)
reader = ZipFile(path)
try:
ok_('this.py' in reader.namelist())
eq_(reader.read('this.py').split('\n')[5], 'from azkaban import *')
finally:
reader.close()
def test_build_multiple_jobs(self):
self.project.add_job('foo', Job({'a': 2}))
self.project.add_job('bar', Job({'b': 3}))
self.project.add_file(__file__, 'this.py')
with temppath() as path:
self.project.build(path)
reader = ZipFile(path)
try:
ok_('foo.job' in reader.namelist())
ok_('bar.job' in reader.namelist())
ok_('this.py' in reader.namelist())
eq_(reader.read('foo.job'), 'a=2\n')
finally:
reader.close()
@raises(AzkabanError)
def test_missing_alias(self):
self.project.upload(alias='bar')
class TestJob(object):
def test_options(self):
eq_(Job().options, ())
eq_(Job({'foo': 1}).options, ({'foo': 1}, ))
eq_(Job({'foo': 1}, {}).options, ({'foo': 1}, {}))
def test_build_options(self):
eq_(Job().build_options, {})
eq_(Job({'foo': 1}, {}).build_options, {'foo': '1'})
eq_(Job({'foo': 1}, {'foo': 2}).build_options, {'foo': '2'})
def test_generate_simple(self):
job = Job({'a': 1, 'b': {'c': 2, 'd': 3}})
with temppath() as path:
job.build(path)
with open(path) as reader:
eq_(reader.read(), 'a=1\nb.c=2\nb.d=3\n')
def test_generate_with_defaults(self):
defaults = {'b': {'d': 4}, 'e': 5}
job = Job(defaults, {'a': 1, 'b': {'c': 2, 'd': 3}})
with temppath() as path:
job.build(path)
with open(path) as reader:
eq_(reader.read(), 'a=1\nb.c=2\nb.d=3\ne=5\n')
def test_generate_with_dependencies(self):
foo = Job()
bar = Job({'a': 3})
job = Job({'a': 2, 'dependencies': 'bar,foo'})
with temppath() as path:
job.build(path)
with open(path) as reader:
eq_(reader.read(), 'a=2\ndependencies=bar,foo\n')
class TestPigJob(object):
def test_init(self):
with temppath() as path:
with open(path, 'w') as writer:
writer.write('-- pig script')
job = PigJob(path, {'a': 2}, {'a': 3, 'b': 4}, {'type': 'noop'})
with temppath() as tpath:
job.build(tpath)
with open(tpath) as reader:
eq_(
reader.read(),
'a=3\nb=4\npig.script=%s\ntype=noop\n' % (path.lstrip('/'), )
)
def test_type(self):
class OtherPigJob(PigJob):
type = 'foo'
with temppath() as path:
with open(path, 'w') as writer:
writer.write('-- pig script')
job = OtherPigJob(path, {'type': 'bar'})
with temppath() as tpath:
job.build(tpath)
with open(tpath) as reader:
eq_(
reader.read(),
'pig.script=%s\ntype=bar\n' % (path.lstrip('/'), )
)
@raises(AzkabanError)
def test_missing(self):
PigJob('foo.pig')
def test_on_add(self):
project = Project('pj')
with temppath() as path:
with open(path, 'w') as writer:
writer.write('-- pig script')
project.add_job('foo', PigJob(path))
eq_(project._files, {path: None})
class TestUpload(object):
# requires valid credentials and an 'azkabancli' project on the server
last_request = time()
url = None
valid_alias = None
@classmethod
def setup_class(cls):
# skip tests if no valid credentials found
parser = RawConfigParser()
parser.read(Project.rcpath)
for section in parser.sections():
url = parser.get(section, 'url').rstrip('/')
if parser.has_option(section, 'session_id'):
session_id = parser.get(section, 'session_id')
if not post(
'%s/manager' % (url, ),
{'session.id': session_id},
verify=False
).text:
cls.url = url
cls.valid_alias = section
return
def wait(self, ms=2000):
# wait before making a request
delay = time() - self.last_request
sleep(max(0, ms * 1e-3 - delay))
self.last_request = time()
def setup(self):
if not self.valid_alias:
raise SkipTest
self.wait()
self.project = Project('azkabancli')
@raises(ValueError)
def test_bad_parameters(self):
self.project.upload(user='bar')
@raises(AzkabanError)
def test_invalid_project(self):
project = Project('foobarzz')
project.upload(alias=self.valid_alias)
@raises(AzkabanError)
def test_bad_url(self):
self.project._get_credentials('http://foo', password='bar')
@raises(AzkabanError)
def test_missing_protocol(self):
self.project._get_credentials('foo', password='bar')
@raises(AzkabanError)
def test_bad_password(self):
self.project._get_credentials(self.url, password='bar')
def test_upload_simple(self):
self.project.add_job('test', Job({'type': 'noop'}))
res = self.project.upload(alias=self.valid_alias)
eq_(['projectId', 'version'], res.keys())
@raises(AzkabanError)
def test_upload_missing_type(self):
self.project.add_job('test', Job())
self.project.upload(alias=self.valid_alias)
def test_upload_pig_job(self):
with temppath() as path:
with open(path, 'w') as writer:
writer.write('-- pig script')
self.project.add_job('foo', PigJob(path))
res = self.project.upload(alias=self.valid_alias)
eq_(['projectId', 'version'], res.keys())
|
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import re
import time
from pylib import flag_changer
from pylib.base import base_test_result
from pylib.base import test_run
from pylib.constants import keyevent
from pylib.device import device_errors
from pylib.local.device import local_device_test_run
TIMEOUT_ANNOTATIONS = [
('Manual', 10 * 60 * 60),
('IntegrationTest', 30 * 60),
('External', 10 * 60),
('EnormousTest', 10 * 60),
('LargeTest', 5 * 60),
('MediumTest', 3 * 60),
('SmallTest', 1 * 60),
]
# TODO(jbudorick): Make this private once the instrumentation test_runner is
# deprecated.
def DidPackageCrashOnDevice(package_name, device):
# Dismiss any error dialogs. Limit the number in case we have an error
# loop or we are failing to dismiss.
try:
for _ in xrange(10):
package = _DismissCrashDialog(device)
if not package:
return False
# Assume test package convention of ".test" suffix
if package in package_name:
return True
except device_errors.CommandFailedError:
logging.exception('Error while attempting to dismiss crash dialog.')
return False
_CURRENT_FOCUS_CRASH_RE = re.compile(
r'\s*mCurrentFocus.*Application (Error|Not Responding): (\S+)}')
def _DismissCrashDialog(device):
# TODO(jbudorick): Try to grep the output on the device instead of using
# large_output if/when DeviceUtils exposes a public interface for piped
# shell command handling.
for l in device.RunShellCommand(
['dumpsys', 'window', 'windows'], check_return=True, large_output=True):
m = re.match(_CURRENT_FOCUS_CRASH_RE, l)
if m:
device.SendKeyEvent(keyevent.KEYCODE_DPAD_RIGHT)
device.SendKeyEvent(keyevent.KEYCODE_DPAD_RIGHT)
device.SendKeyEvent(keyevent.KEYCODE_ENTER)
return m.group(2)
return None
class LocalDeviceInstrumentationTestRun(
local_device_test_run.LocalDeviceTestRun):
def __init__(self, env, test_instance):
super(LocalDeviceInstrumentationTestRun, self).__init__(env, test_instance)
self._flag_changers = {}
def TestPackage(self):
return None
def SetUp(self):
def substitute_external_storage(d, external_storage):
if not d:
return external_storage
elif isinstance(d, list):
return '/'.join(p if p else external_storage for p in d)
else:
return d
def individual_device_set_up(dev, host_device_tuples):
dev.Install(self._test_instance.apk_under_test)
dev.Install(self._test_instance.test_apk)
external_storage = dev.GetExternalStoragePath()
host_device_tuples = [
(h, substitute_external_storage(d, external_storage))
for h, d in host_device_tuples]
logging.info('instrumentation data deps:')
for h, d in host_device_tuples:
logging.info('%r -> %r', h, d)
dev.PushChangedFiles(host_device_tuples)
if self._test_instance.flags:
if not self._test_instance.package_info:
logging.error("Couldn't set flags: no package info")
elif not self._test_instance.package_info.cmdline_file:
logging.error("Couldn't set flags: no cmdline_file")
else:
self._flag_changers[str(dev)] = flag_changer.FlagChanger(
dev, self._test_instance.package_info.cmdline_file)
logging.debug('Attempting to set flags: %r',
self._test_instance.flags)
self._flag_changers[str(dev)].AddFlags(self._test_instance.flags)
self._env.parallel_devices.pMap(
individual_device_set_up,
self._test_instance.GetDataDependencies())
def TearDown(self):
def individual_device_tear_down(dev):
if str(dev) in self._flag_changers:
self._flag_changers[str(dev)].Restore()
self._env.parallel_devices.pMap(individual_device_tear_down)
#override
def _CreateShards(self, tests):
return tests
#override
def _GetTests(self):
return self._test_instance.GetTests()
#override
def _GetTestName(self, test):
return '%s#%s' % (test['class'], test['method'])
#override
def _RunTest(self, device, test):
extras = self._test_instance.GetHttpServerEnvironmentVars()
if isinstance(test, list):
if not self._test_instance.driver_apk:
raise Exception('driver_apk does not exist. '
'Please build it and try again.')
def name_and_timeout(t):
n = self._GetTestName(t)
i = self._GetTimeoutFromAnnotations(t['annotations'], n)
return (n, i)
test_names, timeouts = zip(*(name_and_timeout(t) for t in test))
test_name = ','.join(test_names)
target = '%s/%s' % (
self._test_instance.driver_package,
self._test_instance.driver_name)
extras.update(
self._test_instance.GetDriverEnvironmentVars(
test_list=test_names))
timeout = sum(timeouts)
else:
test_name = self._GetTestName(test)
target = '%s/%s' % (
self._test_instance.test_package, self._test_instance.test_runner)
extras['class'] = test_name
timeout = self._GetTimeoutFromAnnotations(test['annotations'], test_name)
logging.info('preparing to run %s: %s' % (test_name, test))
time_ms = lambda: int(time.time() * 1e3)
start_ms = time_ms()
output = device.StartInstrumentation(
target, raw=True, extras=extras, timeout=timeout, retries=0)
duration_ms = time_ms() - start_ms
# TODO(jbudorick): Make instrumentation tests output a JSON so this
# doesn't have to parse the output.
logging.debug('output from %s:', test_name)
for l in output:
logging.debug(' %s', l)
result_code, result_bundle, statuses = (
self._test_instance.ParseAmInstrumentRawOutput(output))
results = self._test_instance.GenerateTestResults(
result_code, result_bundle, statuses, start_ms, duration_ms)
if DidPackageCrashOnDevice(self._test_instance.test_package, device):
for r in results:
if r.GetType() == base_test_result.ResultType.UNKNOWN:
r.SetType(base_test_result.ResultType.CRASH)
return results
#override
def _ShouldShard(self):
return True
@staticmethod
def _GetTimeoutFromAnnotations(annotations, test_name):
for k, v in TIMEOUT_ANNOTATIONS:
if k in annotations:
timeout = v
else:
logging.warning('Using default 1 minute timeout for %s' % test_name)
timeout = 60
try:
scale = int(annotations.get('TimeoutScale', 1))
except ValueError as e:
logging.warning("Non-integer value of TimeoutScale ignored. (%s)", str(e))
scale = 1
timeout *= scale
return timeout
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.math_ops.matrix_inverse."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import stateless_random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
def _AddTest(test_class, op_name, testcase_name, fn):
test_name = "_".join(["test", op_name, testcase_name])
if hasattr(test_class, test_name):
raise RuntimeError("Test %s defined more than once" % test_name)
setattr(test_class, test_name, fn)
class SvdOpTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes(use_gpu=True)
def testWrongDimensions(self):
# The input to svd should be a tensor of at least rank 2.
scalar = constant_op.constant(1.)
with self.assertRaisesRegex((ValueError, errors_impl.InvalidArgumentError),
"rank.* 2.*0"):
linalg_ops.svd(scalar)
vector = constant_op.constant([1., 2.])
with self.assertRaisesRegex((ValueError, errors_impl.InvalidArgumentError),
"rank.* 2.*1"):
linalg_ops.svd(vector)
@test_util.run_in_graph_and_eager_modes(use_gpu=True)
def DISABLED_testBadInputs(self):
# TODO(b/185822300): re-enable after the bug is fixed in CUDA-11.x
# The input to svd should be a tensor of at least rank 2.
for bad_val in [np.nan, np.inf]:
matrix = np.array([[1, bad_val], [0, 1]])
s, u, v = linalg_ops.svd(matrix, compute_uv=True)
s, u, v = self.evaluate([s, u, v])
for i in range(2):
self.assertTrue(np.isnan(s[i]))
for j in range(2):
self.assertTrue(np.isnan(u[i, j]))
self.assertTrue(np.isnan(v[i, j]))
@test_util.run_in_graph_and_eager_modes(use_gpu=True)
def testExecuteMultipleWithoutError(self):
all_ops = []
shape = [6, 5]
seed = [42, 24]
for compute_uv_ in True, False:
for full_matrices_ in True, False:
matrix1 = stateless_random_ops.stateless_random_normal(shape, seed)
matrix2 = stateless_random_ops.stateless_random_normal(shape, seed)
self.assertAllEqual(matrix1, matrix2)
if compute_uv_:
s1, u1, v1 = linalg_ops.svd(
matrix1, compute_uv=compute_uv_, full_matrices=full_matrices_)
s2, u2, v2 = linalg_ops.svd(
matrix2, compute_uv=compute_uv_, full_matrices=full_matrices_)
all_ops += [s1, s2, u1, u2, v1, v2]
else:
s1 = linalg_ops.svd(
matrix1, compute_uv=compute_uv_, full_matrices=full_matrices_)
s2 = linalg_ops.svd(
matrix2, compute_uv=compute_uv_, full_matrices=full_matrices_)
all_ops += [s1, s2]
val = self.evaluate(all_ops)
for i in range(0, len(val), 2):
self.assertAllEqual(val[i], val[i + 1])
def _GetSvdOpTest(dtype_, shape_, use_static_shape_, compute_uv_,
full_matrices_):
def CompareSingularValues(self, x, y, tol):
atol = (x[0] + y[0]) * tol if len(x) else tol
self.assertAllClose(x, y, atol=atol)
def CompareSingularVectors(self, x, y, rank, tol):
# We only compare the first 'rank' singular vectors since the
# remainder form an arbitrary orthonormal basis for the
# (row- or column-) null space, whose exact value depends on
# implementation details. Notice that since we check that the
# matrices of singular vectors are unitary elsewhere, we do
# implicitly test that the trailing vectors of x and y span the
# same space.
x = x[..., 0:rank]
y = y[..., 0:rank]
# Singular vectors are only unique up to sign (complex phase factor for
# complex matrices), so we normalize the sign first.
sum_of_ratios = np.sum(np.divide(y, x), -2, keepdims=True)
phases = np.divide(sum_of_ratios, np.abs(sum_of_ratios))
x *= phases
self.assertAllClose(x, y, atol=2 * tol)
def CheckApproximation(self, a, u, s, v, full_matrices_, tol):
# Tests that a ~= u*diag(s)*transpose(v).
batch_shape = a.shape[:-2]
m = a.shape[-2]
n = a.shape[-1]
diag_s = math_ops.cast(array_ops.matrix_diag(s), dtype=dtype_)
if full_matrices_:
if m > n:
zeros = array_ops.zeros(batch_shape + (m - n, n), dtype=dtype_)
diag_s = array_ops.concat([diag_s, zeros], a.ndim - 2)
elif n > m:
zeros = array_ops.zeros(batch_shape + (m, n - m), dtype=dtype_)
diag_s = array_ops.concat([diag_s, zeros], a.ndim - 1)
a_recon = math_ops.matmul(u, diag_s)
a_recon = math_ops.matmul(a_recon, v, adjoint_b=True)
self.assertAllClose(a_recon, a, rtol=tol, atol=tol)
def CheckUnitary(self, x, tol):
# Tests that x[...,:,:]^H * x[...,:,:] is close to the identity.
xx = math_ops.matmul(x, x, adjoint_a=True)
identity = array_ops.matrix_band_part(array_ops.ones_like(xx), 0, 0)
self.assertAllClose(identity, xx, atol=tol)
@test_util.run_in_graph_and_eager_modes(use_gpu=True)
def Test(self):
if not use_static_shape_ and context.executing_eagerly():
return
is_complex = dtype_ in (np.complex64, np.complex128)
is_single = dtype_ in (np.float32, np.complex64)
tol = 3e-4 if is_single else 1e-12
if test.is_gpu_available():
# The gpu version returns results that are much less accurate.
tol *= 100
np.random.seed(42)
x_np = np.random.uniform(
low=-1.0, high=1.0, size=np.prod(shape_)).reshape(shape_).astype(dtype_)
if is_complex:
x_np += 1j * np.random.uniform(
low=-1.0, high=1.0,
size=np.prod(shape_)).reshape(shape_).astype(dtype_)
if use_static_shape_:
x_tf = constant_op.constant(x_np)
else:
x_tf = array_ops.placeholder(dtype_)
if compute_uv_:
s_tf, u_tf, v_tf = linalg_ops.svd(
x_tf, compute_uv=compute_uv_, full_matrices=full_matrices_)
if use_static_shape_:
s_tf_val, u_tf_val, v_tf_val = self.evaluate([s_tf, u_tf, v_tf])
else:
with self.session() as sess:
s_tf_val, u_tf_val, v_tf_val = sess.run(
[s_tf, u_tf, v_tf], feed_dict={x_tf: x_np})
else:
s_tf = linalg_ops.svd(
x_tf, compute_uv=compute_uv_, full_matrices=full_matrices_)
if use_static_shape_:
s_tf_val = self.evaluate(s_tf)
else:
with self.session() as sess:
s_tf_val = sess.run(s_tf, feed_dict={x_tf: x_np})
if compute_uv_:
u_np, s_np, v_np = np.linalg.svd(
x_np, compute_uv=compute_uv_, full_matrices=full_matrices_)
else:
s_np = np.linalg.svd(
x_np, compute_uv=compute_uv_, full_matrices=full_matrices_)
# We explicitly avoid the situation where numpy eliminates a first
# dimension that is equal to one.
s_np = np.reshape(s_np, s_tf_val.shape)
CompareSingularValues(self, s_np, s_tf_val, tol)
if compute_uv_:
CompareSingularVectors(self, u_np, u_tf_val, min(shape_[-2:]), tol)
CompareSingularVectors(self, np.conj(np.swapaxes(v_np, -2, -1)), v_tf_val,
min(shape_[-2:]), tol)
CheckApproximation(self, x_np, u_tf_val, s_tf_val, v_tf_val,
full_matrices_, tol)
CheckUnitary(self, u_tf_val, tol)
CheckUnitary(self, v_tf_val, tol)
return Test
class SvdGradOpTest(test.TestCase):
pass # Filled in below
def _NormalizingSvd(tf_a, full_matrices_):
tf_s, tf_u, tf_v = linalg_ops.svd(
tf_a, compute_uv=True, full_matrices=full_matrices_)
# Singular vectors are only unique up to an arbitrary phase. We normalize
# the vectors such that the first component of u (if m >=n) or v (if n > m)
# have phase 0.
m = tf_a.shape[-2]
n = tf_a.shape[-1]
if m >= n:
top_rows = tf_u[..., 0:1, :]
else:
top_rows = tf_v[..., 0:1, :]
if tf_u.dtype.is_complex:
angle = -math_ops.angle(top_rows)
phase = math_ops.complex(math_ops.cos(angle), math_ops.sin(angle))
else:
phase = math_ops.sign(top_rows)
tf_u *= phase[..., :m]
tf_v *= phase[..., :n]
return tf_s, tf_u, tf_v
def _GetSvdGradOpTest(dtype_, shape_, compute_uv_, full_matrices_):
@test_util.run_in_graph_and_eager_modes(use_gpu=True)
def Test(self):
def RandomInput():
np.random.seed(42)
a = np.random.uniform(low=-1.0, high=1.0, size=shape_).astype(dtype_)
if dtype_ in [np.complex64, np.complex128]:
a += 1j * np.random.uniform(
low=-1.0, high=1.0, size=shape_).astype(dtype_)
return a
# Optimal stepsize for central difference is O(epsilon^{1/3}).
# See Equation (21) in:
# http://www.karenkopecky.net/Teaching/eco613614/Notes_NumericalDifferentiation.pdf
# TODO(rmlarsen): Move step size control to gradient checker.
epsilon = np.finfo(dtype_).eps
delta = 0.25 * epsilon**(1.0 / 3.0)
if dtype_ in [np.float32, np.complex64]:
tol = 3e-2
else:
tol = 1e-6
if compute_uv_:
funcs = [
lambda a: _NormalizingSvd(a, full_matrices_)[0],
lambda a: _NormalizingSvd(a, full_matrices_)[1],
lambda a: _NormalizingSvd(a, full_matrices_)[2]
]
else:
funcs = [lambda a: linalg_ops.svd(a, compute_uv=False)]
for f in funcs:
theoretical, numerical = gradient_checker_v2.compute_gradient(
f, [RandomInput()], delta=delta)
self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
return Test
class SvdGradGradOpTest(test.TestCase):
pass # Filled in below
def _GetSvdGradGradOpTest(dtype_, shape_, compute_uv_, full_matrices_):
@test_util.run_v1_only("b/120545219")
def Test(self):
np.random.seed(42)
a = np.random.uniform(low=-1.0, high=1.0, size=shape_).astype(dtype_)
if dtype_ in [np.complex64, np.complex128]:
a += 1j * np.random.uniform(
low=-1.0, high=1.0, size=shape_).astype(dtype_)
# Optimal stepsize for central difference is O(epsilon^{1/3}).
# See Equation (21) in:
# http://www.karenkopecky.net/Teaching/eco613614/Notes_NumericalDifferentiation.pdf
# TODO(rmlarsen): Move step size control to gradient checker.
epsilon = np.finfo(dtype_).eps
delta = 0.1 * epsilon**(1.0 / 3.0)
tol = 1e-5
with self.session():
tf_a = constant_op.constant(a)
if compute_uv_:
tf_s, tf_u, tf_v = _NormalizingSvd(tf_a, full_matrices_)
outputs = [tf_s, tf_u, tf_v]
else:
tf_s = linalg_ops.svd(tf_a, compute_uv=False)
outputs = [tf_s]
outputs_sums = [math_ops.reduce_sum(o) for o in outputs]
tf_func_outputs = math_ops.add_n(outputs_sums)
grad = gradients_impl.gradients(tf_func_outputs, tf_a)[0]
x_init = np.random.uniform(
low=-1.0, high=1.0, size=shape_).astype(dtype_)
if dtype_ in [np.complex64, np.complex128]:
x_init += 1j * np.random.uniform(
low=-1.0, high=1.0, size=shape_).astype(dtype_)
theoretical, numerical = gradient_checker.compute_gradient(
tf_a,
tf_a.get_shape().as_list(),
grad,
grad.get_shape().as_list(),
x_init_value=x_init,
delta=delta)
self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
return Test
class SVDBenchmark(test.Benchmark):
shapes = [
(4, 4),
(8, 8),
(16, 16),
(101, 101),
(256, 256),
(1024, 1024),
(2048, 2048),
(1, 8, 8),
(10, 8, 8),
(100, 8, 8),
(1000, 8, 8),
(1, 32, 32),
(10, 32, 32),
(100, 32, 32),
(1000, 32, 32),
(1, 256, 256),
(10, 256, 256),
(100, 256, 256),
]
def benchmarkSVDOp(self):
for shape_ in self.shapes:
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/cpu:0"):
matrix_value = np.random.uniform(
low=-1.0, high=1.0, size=shape_).astype(np.float32)
matrix = variables.Variable(matrix_value)
u, s, v = linalg_ops.svd(matrix)
self.evaluate(variables.global_variables_initializer())
self.run_op_benchmark(
sess,
control_flow_ops.group(u, s, v),
min_iters=25,
name="SVD_cpu_{shape}".format(shape=shape_))
if test.is_gpu_available(True):
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/device:GPU:0"):
matrix_value = np.random.uniform(
low=-1.0, high=1.0, size=shape_).astype(np.float32)
matrix = variables.Variable(matrix_value)
u, s, v = linalg_ops.svd(matrix)
self.evaluate(variables.global_variables_initializer())
self.run_op_benchmark(
sess,
control_flow_ops.group(u, s, v),
min_iters=25,
name="SVD_gpu_{shape}".format(shape=shape_))
if __name__ == "__main__":
dtypes_to_test = [np.float32, np.float64, np.complex64, np.complex128]
for compute_uv in False, True:
for full_matrices in False, True:
for dtype in dtypes_to_test:
for rows in 0, 1, 2, 5, 10, 32, 100:
for cols in 0, 1, 2, 5, 10, 32, 100:
for batch_dims in [(), (3,)] + [(3, 2)] * (max(rows, cols) < 10):
full_shape = batch_dims + (rows, cols)
for use_static_shape in set([True, False]):
name = "%s_%s_static_shape_%s__compute_uv_%s_full_%s" % (
dtype.__name__, "_".join(map(str, full_shape)),
use_static_shape, compute_uv, full_matrices)
_AddTest(
SvdOpTest, "Svd", name,
_GetSvdOpTest(dtype, full_shape, use_static_shape,
compute_uv, full_matrices))
for compute_uv in False, True:
for full_matrices in False, True:
dtypes = ([np.float32, np.float64] + [np.complex64, np.complex128] *
(not compute_uv))
for dtype in dtypes:
mat_shapes = [(10, 11), (11, 10), (11, 11), (2, 2, 2, 3)]
if not full_matrices or not compute_uv:
mat_shapes += [(5, 11), (11, 5)]
for mat_shape in mat_shapes:
for batch_dims in [(), (3,)]:
full_shape = batch_dims + mat_shape
name = "%s_%s_compute_uv_%s_full_%s" % (dtype.__name__, "_".join(
map(str, full_shape)), compute_uv, full_matrices)
_AddTest(
SvdGradOpTest, "SvdGrad", name,
_GetSvdGradOpTest(dtype, full_shape, compute_uv, full_matrices))
# The results are too inaccurate for float32.
if dtype in (np.float64, np.complex128):
_AddTest(
SvdGradGradOpTest, "SvdGradGrad", name,
_GetSvdGradGradOpTest(dtype, full_shape, compute_uv,
full_matrices))
test.main()
|
|
#!/usr/bin/env python
"""
SMILE Encode
"""
import re
import sys
import struct
import decimal
import copy
import logging
import json
import json.encoder
from pysmile.constants import *
from pysmile import util
log = logging.getLogger()
if not log.handlers:
log.addHandler(logging.NullHandler())
__author__ = 'Jonathan Hosmer'
def _utf_8_encode(s):
try:
return s.encode('UTF-8')
except UnicodeEncodeError:
return s
class SMILEEncodeError(StandardError):
pass
class SharedStringNode(object):
"""
Helper class used for keeping track of possibly shareable String references (for field names
and/or short String values)
"""
def __init__(self, value, index, nxt):
self.value = value
self.index = index
self.next = nxt
class SmileGenerator(object):
"""
To simplify certain operations, we require output buffer length
to allow outputting of contiguous 256 character UTF-8 encoded String
value. Length of the longest UTF-8 code point (from Java char) is 3 bytes,
and we need both initial token byte and single-byte end marker
so we get following value.
Note: actually we could live with shorter one; absolute minimum would be for encoding
64-character Strings.
"""
def __init__(self, shared_keys=True, shared_values=True, encode_as_7bit=True):
"""
SmileGenerator Initializer
:param bool encode_as_7bit: (optional - Default: `True`) Encode raw data as 7-bit
:param bool shared_keys: (optional - Default: `True`) Shared Key String References
:param bool shared_values: (optional - Default: `True`) Shared Value String References
"""
# Encoded data
self.output = bytearray()
# Shared Key Strings
self.shared_keys = []
# Shared Value Strings
self.shared_values = []
self.share_keys = bool(shared_keys)
self.share_values = bool(shared_values)
self.encode_as_7bit = bool(encode_as_7bit)
def write_header(self):
"""
Method that can be called to explicitly write Smile document header.
Note that usually you do not need to call this for first document to output,
but rather only if you intend to write multiple root-level documents
with same generator (and even in that case this is optional thing to do).
As a result usually only {@link SmileFactory} calls this method.
"""
last = HEADER_BYTE_4
if self.share_keys:
last |= HEADER_BIT_HAS_SHARED_NAMES
if self.share_values:
last |= HEADER_BIT_HAS_SHARED_STRING_VALUES
if not self.encode_as_7bit:
last |= HEADER_BIT_HAS_RAW_BINARY
self.write_bytes(HEADER_BYTE_1, HEADER_BYTE_2, HEADER_BYTE_3, int(last))
def write_end_marker(self):
"""Write optional end marker (BYTE_MARKER_END_OF_CONTENT - 0xFF)"""
self.write_byte(BYTE_MARKER_END_OF_CONTENT)
def write_field_name(self, name):
"""
Write Field Name
:param str name: Name
"""
str_len = len(name)
if not name:
return self.write_byte(TOKEN_KEY_EMPTY_STRING)
# First: is it something we can share?
if self.share_keys:
ix = self._find_seen_name(name)
if ix >= 0:
return self.write_shared_name_reference(ix)
if str_len > MAX_SHORT_NAME_UNICODE_BYTES:
# can not be a 'short' String; off-line (rare case)
return self.write_non_short_field_name(name)
if isinstance(name, unicode):
utf_8_name = name.encode('utf-8')
if len(utf_8_name) == str_len:
return self.write_field_name(utf_8_name)
if len(utf_8_name) <= MAX_SHORT_NAME_UNICODE_BYTES:
# yes, is short indeed
# note: since 2 is smaller allowed length, offset differs from one used for
type_token = int(((TOKEN_PREFIX_KEY_UNICODE - 2) + len(utf_8_name)))
self.write_bytes(type_token, utf_8_name)
else:
self.write_bytes(TOKEN_KEY_LONG_STRING, utf_8_name, BYTE_MARKER_END_OF_STRING)
if self.share_keys:
self._add_seen_name(utf_8_name)
else: # if isinstance(name, str):
if str_len <= MAX_SHORT_NAME_ASCII_BYTES:
self.write_bytes(int(((TOKEN_PREFIX_KEY_ASCII - 1) + str_len)), name)
else:
self.write_bytes(TOKEN_KEY_LONG_STRING, name, BYTE_MARKER_END_OF_STRING)
if self.share_keys:
self._add_seen_name(name)
def write_non_short_field_name(self, name):
"""
Write nonshort field name
:param basestring name: Name
"""
self.write_byte(TOKEN_KEY_LONG_STRING)
try:
utf_8_name = name.encode('utf-8')
except UnicodeEncodeError:
utf_8_name = name
self.write_bytes(utf_8_name)
if self.share_keys:
self._add_seen_name(name)
self.write_byte(BYTE_MARKER_END_OF_STRING)
def write_string_field(self, name, value):
"""
Write String Field
:param str name: Name
:param str value: Value
"""
self.write_field_name(name)
self.write_string(value)
def write_string(self, text):
"""
Write String
:param str text: String text
"""
if text is None:
return self.write_null()
if not text:
return self.write_byte(TOKEN_LITERAL_EMPTY_STRING)
# Longer string handling off-lined
if len(text) > MAX_SHARED_STRING_LENGTH_BYTES:
return self.write_non_shared_string(text)
# Then: is it something we can share?
if self.share_values:
ix = self._find_seen_string_value(text)
if ix >= 0:
return self.write_shared_string_value_reference(ix)
if isinstance(text, unicode):
utf_8_text = text.encode('utf-8')
if len(utf_8_text) <= MAX_SHORT_VALUE_STRING_BYTES:
if self.share_values:
self._add_seen_string_value(text)
if len(utf_8_text) == len(text):
self.write_byte(int((TOKEN_PREFIX_TINY_ASCII - 1) + len(utf_8_text)))
else:
self.write_byte(int((TOKEN_PREFIX_TINY_UNICODE - 2) + len(utf_8_text)))
self.write_bytes(utf_8_text)
else:
if len(utf_8_text) == len(text):
self.write_byte(TOKEN_BYTE_LONG_STRING_ASCII)
else:
self.write_byte(TOKEN_MISC_LONG_TEXT_UNICODE)
self.write_bytes(utf_8_text, BYTE_MARKER_END_OF_STRING)
else:
if len(text) <= MAX_SHORT_VALUE_STRING_BYTES:
if self.share_values:
self._add_seen_string_value(text)
self.write_bytes(int((TOKEN_PREFIX_TINY_ASCII - 1) + len(text)), text)
else:
self.write_bytes(TOKEN_BYTE_LONG_STRING_ASCII, text, BYTE_MARKER_END_OF_STRING)
def write_start_array(self):
"""Write start array token"""
self.write_byte(TOKEN_LITERAL_START_ARRAY)
def write_end_array(self):
"""Write end array token"""
self.write_byte(TOKEN_LITERAL_END_ARRAY)
def write_start_object(self):
"""Write start object token"""
self.write_byte(TOKEN_LITERAL_START_OBJECT)
def write_end_object(self):
"""Write end object token"""
self.write_byte(TOKEN_LITERAL_END_OBJECT)
def write_shared_name_reference(self, ix):
"""
Write Shared Name Ref
:param int ix: Index
"""
if ix >= len(self.shared_keys)-1:
raise ValueError(
'Trying to write shared name with index {} but have only seen {}!'.format(
ix, len(self.shared_keys)))
if ix < 64:
self.write_byte(int((TOKEN_PREFIX_KEY_SHARED_SHORT + ix)))
else:
self.write_bytes((int((TOKEN_PREFIX_KEY_SHARED_LONG + (ix >> 8)))), int(ix))
def write_shared_string_value_reference(self, ix):
"""
Write shared string
:param int ix: Index
"""
if ix > len(self.shared_values)-1:
raise IllegalArgumentException(
'Internal error: trying to write shared String value with index {}; but have '
'only seen {} so far!'.format(ix, len(self.shared_values)))
if ix < 31:
# add 1, as byte 0 is omitted
self.write_byte(TOKEN_PREFIX_SHARED_STRING_SHORT + 1 + ix)
else:
self.write_bytes(TOKEN_PREFIX_SHARED_STRING_LONG + (ix >> 8), int(ix))
def write_non_shared_string(self, text):
"""
Helper method called to handle cases where String value to write is known to be long
enough not to be shareable.
:param str text: Text
"""
if isinstance(text, unicode):
utf_8_text = text.encode('utf-8')
if len(utf_8_text) <= MAX_SHORT_VALUE_STRING_BYTES:
if len(utf_8_text) == len(text):
self.write_byte(int((TOKEN_PREFIX_TINY_ASCII - 1) + len(utf_8_text)))
else:
self.write_byte(int((TOKEN_PREFIX_TINY_UNICODE - 2) + len(utf_8_text)))
self.write_bytes(utf_8_text)
else:
if len(utf_8_text) == len(text):
self.write_byte(TOKEN_MISC_LONG_TEXT_ASCII)
else:
self.write_byte(TOKEN_MISC_LONG_TEXT_UNICODE)
self.write_bytes(utf_8_text, BYTE_MARKER_END_OF_STRING)
else:
if len(text) <= MAX_SHORT_VALUE_STRING_BYTES:
self.write_bytes(int((TOKEN_PREFIX_TINY_ASCII - 1) + len(text)), text)
else:
self.write_bytes(TOKEN_MISC_LONG_TEXT_ASCII, text, BYTE_MARKER_END_OF_STRING)
def write_binary(self, data):
"""
Write Data
:param data: Data
"""
if data is None:
return self.write_null()
if self.encode_as_7bit:
self.write_byte(TOKEN_MISC_BINARY_7BIT)
self.write_7bit_binary(data)
else:
self.write_byte(TOKEN_MISC_BINARY_RAW)
self.write_positive_vint(len(data))
self.write_bytes(data)
def write_true(self):
"""Write True Value"""
self.write_byte(TOKEN_LITERAL_TRUE)
def write_false(self):
"""Write True Value"""
self.write_byte(TOKEN_LITERAL_FALSE)
def write_boolean(self, state):
"""
Write Boolean
:param bool state: Bool state
"""
self.write_byte(state and TOKEN_LITERAL_TRUE or TOKEN_LITERAL_FALSE)
def write_null(self):
""" generated source for method writeNull """
self.write_byte(TOKEN_LITERAL_NULL)
def write_number(self, i):
"""
Write Numner
:param int|long|float|str i: number
"""
if isinstance(i, int):
# First things first: let's zigzag encode number
i = util.zigzag_encode(i)
# tiny (single byte) or small (type + 6-bit value) number?
if 0x3F >= i >= 0:
if i <= 0x1F:
self.write_byte(int((TOKEN_PREFIX_SMALL_INT + i)))
return
# nope, just small, 2 bytes (type, 1-byte zigzag value) for 6 bit value
self.write_bytes(TOKEN_BYTE_INT_32, int((0x80 + i)))
return
# Ok: let's find minimal representation then
b0 = int((0x80 + (i & 0x3F)))
i >>= 6
if i <= 0x7F:
# 13 bits is enough (== 3 byte total encoding)
self.write_bytes(TOKEN_BYTE_INT_32, int(i), b0)
return
b1 = int((i & 0x7F))
i >>= 7
if i <= 0x7F:
self.write_bytes(TOKEN_BYTE_INT_32, int(i), b1, b0)
return
b2 = int((i & 0x7F))
i >>= 7
if i <= 0x7F:
self.write_bytes(TOKEN_BYTE_INT_32, int(i), b2, b1, b0)
return
# no, need all 5 bytes
b3 = int((i & 0x7F))
self.write_bytes(TOKEN_BYTE_INT_32, int((i >> 7)), b3, b2, b1, b0)
elif isinstance(i, long):
# First: maybe 32 bits is enough?
if MAX_INT_AS_LONG >= i >= MIN_INT_AS_LONG:
return self.write_number(int(i))
# Then let's zigzag encode it
l = util.zigzag_encode(i)
# Ok, well, we do know that 5 lowest-significant bytes are needed
i = int(l)
# 4 can be extracted from lower int
b0 = int((0x80 + (i & 0x3F)))
# sign bit set in the last byte
b1 = int(((i >> 6) & 0x7F))
b2 = int(((i >> 13) & 0x7F))
b3 = int(((i >> 20) & 0x7F))
# fifth one is split between ints:
l = bsr(l, 27)
b4 = int(((int(l)) & 0x7F))
# which may be enough?
i = int((l >> 7))
if i == 0:
self.write_bytes(TOKEN_BYTE_INT_64, b4, b3, b2, b1, b0)
return
if i <= 0x7F:
self.write_bytes(TOKEN_BYTE_INT_64, int(i), b4, b3, b2, b1, b0)
return
b5 = int((i & 0x7F))
i >>= 7
if i <= 0x7F:
self.write_bytes(TOKEN_BYTE_INT_64, int(i), b5, b4, b3, b2, b1, b0)
return
b6 = int((i & 0x7F))
i >>= 7
if i <= 0x7F:
self.write_bytes(TOKEN_BYTE_INT_64, int(i), b6, b5, b4, b3, b2, b1, b0)
return
b7 = int((i & 0x7F))
i >>= 7
if i <= 0x7F:
self.write_bytes(TOKEN_BYTE_INT_64, int(i), b7, b6, b5, b4, b3, b2, b1, b0)
return
b8 = int((i & 0x7F))
i >>= 7
# must be done, with 10 bytes! (9 * 7 + 6 == 69 bits; only need 63)
self.write_bytes(TOKEN_BYTE_INT_64, int(i), b8, b7, b6, b5, b4, b3, b2, b1, b0)
elif isinstance(i, basestring):
if not i:
self.write_null()
return
neg = i.startswith('-')
i = i.strip('-')
if i.isdigit():
self.write_integral_number(i, neg)
else:
self.write_decimal_number(i)
elif isinstance(i, (float, decimal.Decimal)):
if isinstance(i, decimal.Decimal) and isinstance(int(float(i)), long):
self.write_byte(TOKEN_BYTE_BIG_DECIMAL)
scale = i.as_tuple().exponent
self.write_signed_vint(scale)
self.write_7bit_binary(bytearray(str(i.to_integral_value())))
else:
i = float(i)
try:
i = util.float_to_bits(i)
self.write_byte(TOKEN_BYTE_FLOAT_32)
self.write_byte(int(i & 0x7F))
for _ in xrange(4):
i >>= 7
self.write_byte(int(i & 0x7F))
except struct.error:
i = util.float_to_raw_long_bits(i)
self.write_byte(TOKEN_BYTE_FLOAT_64)
self.write_byte(int(i & 0x7F))
for _ in xrange(9):
i >>= 7
self.write_byte(int(i & 0x7F))
def write_big_number(self, i):
"""
Write Big Number
:param i: Big Number
"""
if i is None:
return self.write_null()
self.write_byte(TOKEN_BYTE_BIG_INTEGER)
self.write_7bit_binary(bytearray(str(i)))
def write_integral_number(self, num, neg=False):
"""
Write Int
:param str num: String of an integral number
:param bool neg: Is the value negative
"""
if num is None:
return self.write_null()
num_len = len(num)
if neg:
num_len -= 1
# try:
if num_len <= 9:
self.write_number(int(num))
elif num_len <= 18:
self.write_number(long(num))
else:
self.write_big_number(num)
def write_decimal_number(self, num):
"""
Write decimal
:param str num: String of a decimal number
"""
if num is None:
return self.write_null()
self.write_number(decimal.Decimal(num))
def write_byte(self, c):
"""
Write byte
:param int|long|float|basestring c: byte
"""
if isinstance(c, basestring):
if isinstance(c, unicode):
c = c.encode('utf-8')
elif isinstance(c, float):
c = str(c)
elif isinstance(c, (int, long)):
try:
c = chr(c)
except ValueError:
c = str(c)
else:
raise ValueError('Invalid type for param "c"!')
self.output.extend(c)
def write_bytes(self, *args):
"""
Write bytes
:param args: args
"""
map(self.write_byte, args)
def write_positive_vint(self, i):
"""
Helper method for writing a 32-bit positive (really 31-bit then) value.
Value is NOT zigzag encoded (since there is no sign bit to worry about)
:param int i: Int
"""
# At most 5 bytes (4 * 7 + 6 bits == 34 bits)
b0 = int((0x80 + (i & 0x3F)))
i >>= 6
if i <= 0x7F:
# 6 or 13 bits is enough (== 2 or 3 byte total encoding)
if i > 0:
self.write_byte(int(i))
self.write_byte(b0)
return
b1 = int((i & 0x7F))
i >>= 7
if i <= 0x7F:
self.write_bytes(int(i), b1, b0)
else:
b2 = int((i & 0x7F))
i >>= 7
if i <= 0x7F:
self.write_bytes(int(i), b2, b1, b0)
else:
b3 = int((i & 0x7F))
self.write_bytes(int((i >> 7)), b3, b2, b1, b0)
def write_signed_vint(self, i):
"""
Helper method for writing 32-bit signed value, using
"zig zag encoding" (see protocol buffers for explanation -- basically,
sign bit is moved as LSB, rest of value shifted left by one)
coupled with basic variable length encoding
:param int i: Signed int
"""
self.write_positive_vint(util.zigzag_encode(i))
def write_7bit_binary(self, data, offset=0):
l = len(data)
self.write_positive_vint(l)
while l >= 7:
i = data[offset]
offset += 1
for x in xrange(1, 7):
self.write_byte(int(((i >> x) & 0x7F)))
i = (i << 8) | (data[offset + x] & 0xFF)
offset += 1
self.write_bytes(int(((i >> 7) & 0x7F)), int((i & 0x7F)))
l -= 7
# and then partial piece, if any
if l > 0:
i = data[offset]
offset += 1
self.write_byte(int(((i >> 1) & 0x7F)))
if l > 1:
i = ((i & 0x01) << 8) | (data[offset] & 0xFF)
offset += 1
# 2nd
self.write_byte(int(((i >> 2) & 0x7F)))
if l > 2:
i = ((i & 0x03) << 8) | (data[offset] & 0xFF)
offset += 1
# 3rd
self.write_byte(int(((i >> 3) & 0x7F)))
if l > 3:
i = ((i & 0x07) << 8) | (data[offset] & 0xFF)
offset += 1
# 4th
self.write_byte(int(((i >> 4) & 0x7F)))
if l > 4:
i = ((i & 0x0F) << 8) | (data[offset] & 0xFF)
offset += 1
# 5th
self.write_byte(int(((i >> 5) & 0x7F)))
if l > 5:
i = ((i & 0x1F) << 8) | (data[offset] & 0xFF)
offset += 1
# 6th
self.write_byte(int(((i >> 6) & 0x7F)))
self.write_byte(int((i & 0x3F)))
# last 6 bits
else:
self.write_byte(int((i & 0x1F)))
# last 5 bits
else:
self.write_byte(int((i & 0x0F)))
# last 4 bits
else:
self.write_byte(int((i & 0x07)))
# last 3 bits
else:
self.write_byte(int((i & 0x03)))
# last 2 bits
else:
self.write_byte(int((i & 0x01)))
# last bit
def _find_seen_name(self, name):
n_hash = util.hash_string(name)
try:
head = self.shared_keys[n_hash & (len(self.shared_keys) - 1)]
except IndexError:
return -1
if head is None:
return -1
if head.value is name:
return head.index
node = head
while node:
if node.value is name:
return node.index
node = node.next
node = head
while node:
if node.value == name and util.hash_string(node.value) == n_hash:
return node.index
node = node.next
def _add_seen_name(self, name):
# if self.seen_name_count == len(self.shared_keys):
if self.shared_keys:
if len(self.shared_keys) == MAX_SHARED_NAMES:
# self.seen_name_count = 0
self.shared_keys = [None] * len(self.shared_keys)
else:
old = copy.copy(self.shared_keys)
self.shared_keys = [None] * MAX_SHARED_NAMES
mask = MAX_SHARED_NAMES - 1
for node in old:
while node:
ix = util.hash_string(node.value) & mask
next_node = node.next
try:
node.next = self.shared_keys[ix]
except IndexError:
node.next = None
self.shared_keys[ix] = node
node = next_node
# ref = self.seen_name_count
if _is_valid_back_ref(len(self.shared_keys)):
ix = util.hash_string(name) & (len(self.shared_keys) - 1)
self.shared_keys[ix] = SharedStringNode(name, ref, self.shared_keys[ix])
# self.seen_name_count = ref + 1
def _find_seen_string_value(self, text):
hash_ = util.hash_string(text)
try:
head = self.shared_values[hash_ & (len(self.shared_values) - 1)]
except IndexError:
return -1
if head is None:
return -1
node = head
while node:
if node.value is text:
return node.index
node = node.next
node = head
while node:
if util.hash_string(node.value) == hash_ and node.value == text:
return node.index
node = node.next
def _add_seen_string_value(self, text):
# if self.seen_string_count == len(self.shared_values):
if self.shared_values:
if self.seen_string_count == MAX_SHARED_STRING_VALUES:
self.seen_string_count = 0
self.shared_values = [None] * len(self.shared_values)
else:
old = copy.copy(self.shared_values)
self.shared_values = [None] * MAX_SHARED_STRING_VALUES
mask = MAX_SHARED_STRING_VALUES - 1
for node in old:
while node:
ix = util.hash_string(node.value) & mask
next_node = node.next
try:
node.next = self.shared_values[ix]
except IndexError:
node.next = None
self.shared_values[ix] = node
node = next_node
# ref = self.seen_string_count
if _is_valid_back_ref(len(self.shared_values)):
ix = util.hash_string(text) & (len(self.shared_values) - 1)
self.shared_values[ix] = SharedStringNode(text, ref, self.shared_values[ix])
# self.seen_string_count = ref + 1
def _is_valid_back_ref(index):
"""
Helper method used to ensure that we do not use back-reference values
that would produce illegal byte sequences (ones with byte 0xFE or 0xFF).
Note that we do not try to avoid null byte (0x00) by default, although
it would be technically possible as well.
:param int index: Index
:returns: Valid back ref
:rtype: bool
"""
return (index & 0xFF) < 0xFE
def encode(py_obj, header=True, ender=False, shared_keys=True, shared_vals=True, bin_7bit=True):
"""
SMILE Encode object
:param list|dict py_obj: The object to be encoded
:param bool header: (optional - Default: `True`)
:param bool ender: (optional - Default: `False`)
:param bool bin_7bit: (optional - Default: `True`) Encode raw data as 7-bit
:param bool shared_keys: (optional - Default: `True`) Shared Key String References
:param bool shared_vals: (optional - Default: `True`) Shared Value String References
:returns: SMILE encoded data
:rtype: str
"""
if isinstance(py_obj, (tuple, set)):
py_obj = list(py_obj)
elif not isinstance(py_obj, (list, dict)):
raise ValueError('Invalid type for "obj" paramater. Must be list or tuple')
sg = SmileGenerator(shared_keys, shared_vals, bin_7bit)
if header:
sg.write_header()
def _floatstr(f):
"""
Convert a Python float into a JSON float string
:param float f: Floating point number
:returns: JSON String representation of the float
:rtype: str
"""
_inf = float('inf')
if f != f:
text = 'NaN'
elif f == _inf:
text = 'Infinity'
elif f == -_inf:
text = '-Infinity'
else:
return repr(f)
return text
def _iterencode(obj):
if isinstance(obj, basestring):
sg.write_string(obj)
elif obj is None:
sg.write_null()
elif obj is True:
sg.write_true()
elif obj is False:
sg.write_false()
elif isinstance(obj, float):
sg.write_number(obj)
elif isinstance(obj, (int, long)):
sg.write_number(obj)
elif isinstance(obj, (list, tuple, set)):
sg.write_start_array()
for v in list(obj):
_iterencode(v)
sg.write_end_array()
elif isinstance(obj, dict):
sg.write_start_object()
for key, val in obj.iteritems():
if key is True:
key = 'true'
elif key is False:
key = 'false'
elif key is None:
key = 'null'
elif isinstance(key, (int, long)):
key = str(key)
elif isinstance(key, float):
key = _floatstr(key)
elif not isinstance(key, basestring):
raise TypeError('Key ' + repr(key) + ' is not a string')
sg.write_field_name(key)
_iterencode(val)
sg.write_end_object()
else:
_iterencode(obj)
_iterencode(py_obj)
if ender:
sg.write_end_marker()
return str(sg.output)
if __name__ == '__main__':
a = ':)\n\x03\xfa\x80a@1\x80c\xf8\xc6\xf9\x80b\xc4\x80e(fL\x19\x04\x04\x80d\xc1\xfb'
b = encode({'a': '1', 'b': 2, 'c': [3], 'd': -1, 'e': 4.20})
if a != b:
print repr(a)
print repr(b)
a = ':)\n\x03\xfa\x80a\xfa\x80b\xfa\x80c\xfa\x80d\xf8@e\xf9\xfb\xfb\xfb\xfb'
b = encode({'a': {'b': {'c': {'d': ['e']}}}})
if a != b:
print repr(a)
print repr(b)
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.texttospeech_v1.services.text_to_speech import TextToSpeechAsyncClient
from google.cloud.texttospeech_v1.services.text_to_speech import TextToSpeechClient
from google.cloud.texttospeech_v1.services.text_to_speech import transports
from google.cloud.texttospeech_v1.types import cloud_tts
from google.oauth2 import service_account
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert TextToSpeechClient._get_default_mtls_endpoint(None) is None
assert (
TextToSpeechClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
)
assert (
TextToSpeechClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
TextToSpeechClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
TextToSpeechClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert TextToSpeechClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class", [TextToSpeechClient, TextToSpeechAsyncClient,])
def test_text_to_speech_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "texttospeech.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.TextToSpeechGrpcTransport, "grpc"),
(transports.TextToSpeechGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_text_to_speech_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize("client_class", [TextToSpeechClient, TextToSpeechAsyncClient,])
def test_text_to_speech_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "texttospeech.googleapis.com:443"
def test_text_to_speech_client_get_transport_class():
transport = TextToSpeechClient.get_transport_class()
available_transports = [
transports.TextToSpeechGrpcTransport,
]
assert transport in available_transports
transport = TextToSpeechClient.get_transport_class("grpc")
assert transport == transports.TextToSpeechGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(TextToSpeechClient, transports.TextToSpeechGrpcTransport, "grpc"),
(
TextToSpeechAsyncClient,
transports.TextToSpeechGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
TextToSpeechClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TextToSpeechClient)
)
@mock.patch.object(
TextToSpeechAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(TextToSpeechAsyncClient),
)
def test_text_to_speech_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(TextToSpeechClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(TextToSpeechClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(TextToSpeechClient, transports.TextToSpeechGrpcTransport, "grpc", "true"),
(
TextToSpeechAsyncClient,
transports.TextToSpeechGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(TextToSpeechClient, transports.TextToSpeechGrpcTransport, "grpc", "false"),
(
TextToSpeechAsyncClient,
transports.TextToSpeechGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
TextToSpeechClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TextToSpeechClient)
)
@mock.patch.object(
TextToSpeechAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(TextToSpeechAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_text_to_speech_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class", [TextToSpeechClient, TextToSpeechAsyncClient])
@mock.patch.object(
TextToSpeechClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TextToSpeechClient)
)
@mock.patch.object(
TextToSpeechAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(TextToSpeechAsyncClient),
)
def test_text_to_speech_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(TextToSpeechClient, transports.TextToSpeechGrpcTransport, "grpc"),
(
TextToSpeechAsyncClient,
transports.TextToSpeechGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_text_to_speech_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
TextToSpeechClient,
transports.TextToSpeechGrpcTransport,
"grpc",
grpc_helpers,
),
(
TextToSpeechAsyncClient,
transports.TextToSpeechGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_text_to_speech_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_text_to_speech_client_client_options_from_dict():
with mock.patch(
"google.cloud.texttospeech_v1.services.text_to_speech.transports.TextToSpeechGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = TextToSpeechClient(client_options={"api_endpoint": "squid.clam.whelk"})
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
TextToSpeechClient,
transports.TextToSpeechGrpcTransport,
"grpc",
grpc_helpers,
),
(
TextToSpeechAsyncClient,
transports.TextToSpeechGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_text_to_speech_client_create_channel_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# test that the credentials from file are saved and used as the credentials.
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel"
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
file_creds = ga_credentials.AnonymousCredentials()
load_creds.return_value = (file_creds, None)
adc.return_value = (creds, None)
client = client_class(client_options=options, transport=transport_name)
create_channel.assert_called_with(
"texttospeech.googleapis.com:443",
credentials=file_creds,
credentials_file=None,
quota_project_id=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=None,
default_host="texttospeech.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize("request_type", [cloud_tts.ListVoicesRequest, dict,])
def test_list_voices(request_type, transport: str = "grpc"):
client = TextToSpeechClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_voices), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_tts.ListVoicesResponse()
response = client.list_voices(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_tts.ListVoicesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cloud_tts.ListVoicesResponse)
def test_list_voices_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TextToSpeechClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_voices), "__call__") as call:
client.list_voices()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_tts.ListVoicesRequest()
@pytest.mark.asyncio
async def test_list_voices_async(
transport: str = "grpc_asyncio", request_type=cloud_tts.ListVoicesRequest
):
client = TextToSpeechAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_voices), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cloud_tts.ListVoicesResponse()
)
response = await client.list_voices(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_tts.ListVoicesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cloud_tts.ListVoicesResponse)
@pytest.mark.asyncio
async def test_list_voices_async_from_dict():
await test_list_voices_async(request_type=dict)
def test_list_voices_flattened():
client = TextToSpeechClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_voices), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_tts.ListVoicesResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_voices(language_code="language_code_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].language_code
mock_val = "language_code_value"
assert arg == mock_val
def test_list_voices_flattened_error():
client = TextToSpeechClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_voices(
cloud_tts.ListVoicesRequest(), language_code="language_code_value",
)
@pytest.mark.asyncio
async def test_list_voices_flattened_async():
client = TextToSpeechAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_voices), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_tts.ListVoicesResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cloud_tts.ListVoicesResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_voices(language_code="language_code_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].language_code
mock_val = "language_code_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_voices_flattened_error_async():
client = TextToSpeechAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_voices(
cloud_tts.ListVoicesRequest(), language_code="language_code_value",
)
@pytest.mark.parametrize("request_type", [cloud_tts.SynthesizeSpeechRequest, dict,])
def test_synthesize_speech(request_type, transport: str = "grpc"):
client = TextToSpeechClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.synthesize_speech), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_tts.SynthesizeSpeechResponse(
audio_content=b"audio_content_blob",
)
response = client.synthesize_speech(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_tts.SynthesizeSpeechRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cloud_tts.SynthesizeSpeechResponse)
assert response.audio_content == b"audio_content_blob"
def test_synthesize_speech_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TextToSpeechClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.synthesize_speech), "__call__"
) as call:
client.synthesize_speech()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_tts.SynthesizeSpeechRequest()
@pytest.mark.asyncio
async def test_synthesize_speech_async(
transport: str = "grpc_asyncio", request_type=cloud_tts.SynthesizeSpeechRequest
):
client = TextToSpeechAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.synthesize_speech), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cloud_tts.SynthesizeSpeechResponse(audio_content=b"audio_content_blob",)
)
response = await client.synthesize_speech(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_tts.SynthesizeSpeechRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cloud_tts.SynthesizeSpeechResponse)
assert response.audio_content == b"audio_content_blob"
@pytest.mark.asyncio
async def test_synthesize_speech_async_from_dict():
await test_synthesize_speech_async(request_type=dict)
def test_synthesize_speech_flattened():
client = TextToSpeechClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.synthesize_speech), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_tts.SynthesizeSpeechResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.synthesize_speech(
input=cloud_tts.SynthesisInput(text="text_value"),
voice=cloud_tts.VoiceSelectionParams(language_code="language_code_value"),
audio_config=cloud_tts.AudioConfig(
audio_encoding=cloud_tts.AudioEncoding.LINEAR16
),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].input
mock_val = cloud_tts.SynthesisInput(text="text_value")
assert arg == mock_val
arg = args[0].voice
mock_val = cloud_tts.VoiceSelectionParams(language_code="language_code_value")
assert arg == mock_val
arg = args[0].audio_config
mock_val = cloud_tts.AudioConfig(
audio_encoding=cloud_tts.AudioEncoding.LINEAR16
)
assert arg == mock_val
def test_synthesize_speech_flattened_error():
client = TextToSpeechClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.synthesize_speech(
cloud_tts.SynthesizeSpeechRequest(),
input=cloud_tts.SynthesisInput(text="text_value"),
voice=cloud_tts.VoiceSelectionParams(language_code="language_code_value"),
audio_config=cloud_tts.AudioConfig(
audio_encoding=cloud_tts.AudioEncoding.LINEAR16
),
)
@pytest.mark.asyncio
async def test_synthesize_speech_flattened_async():
client = TextToSpeechAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.synthesize_speech), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_tts.SynthesizeSpeechResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cloud_tts.SynthesizeSpeechResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.synthesize_speech(
input=cloud_tts.SynthesisInput(text="text_value"),
voice=cloud_tts.VoiceSelectionParams(language_code="language_code_value"),
audio_config=cloud_tts.AudioConfig(
audio_encoding=cloud_tts.AudioEncoding.LINEAR16
),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].input
mock_val = cloud_tts.SynthesisInput(text="text_value")
assert arg == mock_val
arg = args[0].voice
mock_val = cloud_tts.VoiceSelectionParams(language_code="language_code_value")
assert arg == mock_val
arg = args[0].audio_config
mock_val = cloud_tts.AudioConfig(
audio_encoding=cloud_tts.AudioEncoding.LINEAR16
)
assert arg == mock_val
@pytest.mark.asyncio
async def test_synthesize_speech_flattened_error_async():
client = TextToSpeechAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.synthesize_speech(
cloud_tts.SynthesizeSpeechRequest(),
input=cloud_tts.SynthesisInput(text="text_value"),
voice=cloud_tts.VoiceSelectionParams(language_code="language_code_value"),
audio_config=cloud_tts.AudioConfig(
audio_encoding=cloud_tts.AudioEncoding.LINEAR16
),
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.TextToSpeechGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = TextToSpeechClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.TextToSpeechGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = TextToSpeechClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.TextToSpeechGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = TextToSpeechClient(client_options=options, transport=transport,)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = TextToSpeechClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.TextToSpeechGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = TextToSpeechClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.TextToSpeechGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = TextToSpeechClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.TextToSpeechGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.TextToSpeechGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.TextToSpeechGrpcTransport,
transports.TextToSpeechGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = TextToSpeechClient(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.TextToSpeechGrpcTransport,)
def test_text_to_speech_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.TextToSpeechTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_text_to_speech_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.texttospeech_v1.services.text_to_speech.transports.TextToSpeechTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.TextToSpeechTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"list_voices",
"synthesize_speech",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
def test_text_to_speech_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.texttospeech_v1.services.text_to_speech.transports.TextToSpeechTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.TextToSpeechTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
def test_text_to_speech_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.texttospeech_v1.services.text_to_speech.transports.TextToSpeechTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.TextToSpeechTransport()
adc.assert_called_once()
def test_text_to_speech_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
TextToSpeechClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.TextToSpeechGrpcTransport,
transports.TextToSpeechGrpcAsyncIOTransport,
],
)
def test_text_to_speech_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.TextToSpeechGrpcTransport, grpc_helpers),
(transports.TextToSpeechGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_text_to_speech_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"texttospeech.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=["1", "2"],
default_host="texttospeech.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[transports.TextToSpeechGrpcTransport, transports.TextToSpeechGrpcAsyncIOTransport],
)
def test_text_to_speech_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_text_to_speech_host_no_port():
client = TextToSpeechClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="texttospeech.googleapis.com"
),
)
assert client.transport._host == "texttospeech.googleapis.com:443"
def test_text_to_speech_host_with_port():
client = TextToSpeechClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="texttospeech.googleapis.com:8000"
),
)
assert client.transport._host == "texttospeech.googleapis.com:8000"
def test_text_to_speech_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.TextToSpeechGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_text_to_speech_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.TextToSpeechGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.TextToSpeechGrpcTransport, transports.TextToSpeechGrpcAsyncIOTransport],
)
def test_text_to_speech_transport_channel_mtls_with_client_cert_source(transport_class):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.TextToSpeechGrpcTransport, transports.TextToSpeechGrpcAsyncIOTransport],
)
def test_text_to_speech_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_common_billing_account_path():
billing_account = "squid"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = TextToSpeechClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "clam",
}
path = TextToSpeechClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = TextToSpeechClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "whelk"
expected = "folders/{folder}".format(folder=folder,)
actual = TextToSpeechClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "octopus",
}
path = TextToSpeechClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = TextToSpeechClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "oyster"
expected = "organizations/{organization}".format(organization=organization,)
actual = TextToSpeechClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nudibranch",
}
path = TextToSpeechClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = TextToSpeechClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "cuttlefish"
expected = "projects/{project}".format(project=project,)
actual = TextToSpeechClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "mussel",
}
path = TextToSpeechClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = TextToSpeechClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "winkle"
location = "nautilus"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = TextToSpeechClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "scallop",
"location": "abalone",
}
path = TextToSpeechClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = TextToSpeechClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.TextToSpeechTransport, "_prep_wrapped_messages"
) as prep:
client = TextToSpeechClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.TextToSpeechTransport, "_prep_wrapped_messages"
) as prep:
transport_class = TextToSpeechClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = TextToSpeechAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = TextToSpeechClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = TextToSpeechClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(TextToSpeechClient, transports.TextToSpeechGrpcTransport),
(TextToSpeechAsyncClient, transports.TextToSpeechGrpcAsyncIOTransport),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
|
|
# Copyright (c) 2015 Yubico AB
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import functools
import click
import sys
from typing import NoReturn
from yubikit.core.otp import OtpConnection
from yubikit.core.smartcard import SmartCardConnection
from yubikit.core.fido import FidoConnection
from yubikit.oath import parse_b32_key
from collections import OrderedDict
from collections.abc import MutableMapping
from cryptography.hazmat.primitives import serialization
from contextlib import contextmanager
from threading import Timer
class EnumChoice(click.Choice):
"""
Use an enum's member names as the definition for a choice option.
Enum member names MUST be all uppercase. Options are not case sensitive.
Underscores in enum names are translated to dashes in the option choice.
"""
def __init__(self, choices_enum, hidden=[]):
super().__init__(
[v.name.replace("_", "-") for v in choices_enum if v not in hidden],
case_sensitive=False,
)
self.choices_enum = choices_enum
def convert(self, value, param, ctx):
if isinstance(value, self.choices_enum):
return value
name = super().convert(value, param, ctx).replace("-", "_")
return self.choices_enum[name]
class _YkmanCommand(click.Command):
def __init__(self, name=None, **attrs):
self.interfaces = attrs.pop("interfaces", None)
click.Command.__init__(self, name, **attrs)
class _YkmanGroup(click.Group):
"""click.Group which returns commands before subgroups in list_commands."""
def __init__(self, name=None, commands=None, **attrs):
self.connections = attrs.pop("connections", None)
click.Group.__init__(self, name, commands, **attrs)
def list_commands(self, ctx):
return sorted(
self.commands, key=lambda c: (isinstance(self.commands[c], click.Group), c)
)
def ykman_group(
connections=[SmartCardConnection, OtpConnection, FidoConnection], *args, **kwargs
):
if not isinstance(connections, list):
connections = [connections] # Single type
return click.group(
cls=_YkmanGroup,
*args,
connections=connections,
**kwargs,
) # type: ignore
def ykman_command(interfaces, *args, **kwargs):
return click.command(
cls=_YkmanCommand,
*args,
interfaces=interfaces,
**kwargs,
) # type: ignore
def click_callback(invoke_on_missing=False):
def wrap(f):
@functools.wraps(f)
def inner(ctx, param, val):
if not invoke_on_missing and not param.required and val is None:
return None
try:
return f(ctx, param, val)
except ValueError as e:
ctx.fail(f'Invalid value for "{param.name}": {str(e)}')
return inner
return wrap
@click_callback()
def click_parse_format(ctx, param, val):
if val == "PEM":
return serialization.Encoding.PEM
elif val == "DER":
return serialization.Encoding.DER
else:
raise ValueError(val)
click_force_option = click.option(
"-f", "--force", is_flag=True, help="Confirm the action without prompting."
)
click_format_option = click.option(
"-F",
"--format",
type=click.Choice(["PEM", "DER"], case_sensitive=False),
default="PEM",
show_default=True,
help="Encoding format.",
callback=click_parse_format,
)
class YkmanContextObject(MutableMapping):
def __init__(self):
self._objects = OrderedDict()
self._resolved = False
def add_resolver(self, key, f):
if self._resolved:
f = f()
self._objects[key] = f
def resolve(self):
if not self._resolved:
self._resolved = True
for k, f in self._objects.copy().items():
self._objects[k] = f()
def __getitem__(self, key):
self.resolve()
return self._objects[key]
def __setitem__(self, key, value):
if not self._resolved:
raise ValueError("BUG: Attempted to set item when unresolved.")
self._objects[key] = value
def __delitem__(self, key):
del self._objects[key]
def __len__(self):
return len(self._objects)
def __iter__(self):
return iter(self._objects)
def click_postpone_execution(f):
@functools.wraps(f)
def inner(*args, **kwargs):
click.get_current_context().obj.add_resolver(str(f), lambda: f(*args, **kwargs))
return inner
@click_callback()
def click_parse_b32_key(ctx, param, val):
return parse_b32_key(val)
def click_prompt(prompt, err=True, **kwargs):
"""Replacement for click.prompt to better work when piping input to the command.
Note that we change the default of err to be True, since that's how we typically
use it.
"""
if not sys.stdin.isatty(): # Piped from stdin, see if there is data
line = sys.stdin.readline()
if line:
return line.rstrip("\n")
# No piped data, use standard prompt
return click.prompt(prompt, err=err, **kwargs)
def prompt_for_touch():
try:
click.echo("Touch your YubiKey...", err=True)
except Exception:
sys.stderr.write("Touch your YubiKey...\n")
@contextmanager
def prompt_timeout(timeout=0.5):
timer = Timer(timeout, prompt_for_touch)
try:
timer.start()
yield None
finally:
timer.cancel()
def cli_fail(message: str, code: int = 1) -> NoReturn:
click.echo(f"Error: {message}", err=True)
sys.exit(code)
|
|
"""The tests for Nest device triggers."""
from google_nest_sdm.device import Device
from google_nest_sdm.event import EventMessage
import pytest
import homeassistant.components.automation as automation
from homeassistant.components.device_automation import DeviceAutomationType
from homeassistant.components.device_automation.exceptions import (
InvalidDeviceAutomationConfig,
)
from homeassistant.components.nest import DOMAIN
from homeassistant.components.nest.events import NEST_EVENT
from homeassistant.helpers import device_registry as dr, entity_registry as er
from homeassistant.setup import async_setup_component
from homeassistant.util.dt import utcnow
from .common import async_setup_sdm_platform
from tests.common import (
assert_lists_same,
async_get_device_automations,
async_mock_service,
)
DEVICE_ID = "some-device-id"
DEVICE_NAME = "My Camera"
DATA_MESSAGE = {"message": "service-called"}
def make_camera(device_id, name=DEVICE_NAME, traits={}):
"""Create a nest camera."""
traits = traits.copy()
traits.update(
{
"sdm.devices.traits.Info": {
"customName": name,
},
"sdm.devices.traits.CameraLiveStream": {
"maxVideoResolution": {
"width": 640,
"height": 480,
},
"videoCodecs": ["H264"],
"audioCodecs": ["AAC"],
},
}
)
return Device.MakeDevice(
{
"name": device_id,
"type": "sdm.devices.types.CAMERA",
"traits": traits,
},
auth=None,
)
async def async_setup_camera(hass, devices=None):
"""Set up the platform and prerequisites for testing available triggers."""
if not devices:
devices = {DEVICE_ID: make_camera(device_id=DEVICE_ID)}
return await async_setup_sdm_platform(hass, "camera", devices)
async def setup_automation(hass, device_id, trigger_type):
"""Set up an automation trigger for testing triggering."""
return await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": device_id,
"type": trigger_type,
},
"action": {
"service": "test.automation",
"data": DATA_MESSAGE,
},
},
]
},
)
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_get_triggers(hass):
"""Test we get the expected triggers from a nest."""
camera = make_camera(
device_id=DEVICE_ID,
traits={
"sdm.devices.traits.CameraMotion": {},
"sdm.devices.traits.CameraPerson": {},
},
)
await async_setup_camera(hass, {DEVICE_ID: camera})
device_registry = dr.async_get(hass)
device_entry = device_registry.async_get_device({("nest", DEVICE_ID)})
expected_triggers = [
{
"platform": "device",
"domain": DOMAIN,
"type": "camera_motion",
"device_id": device_entry.id,
},
{
"platform": "device",
"domain": DOMAIN,
"type": "camera_person",
"device_id": device_entry.id,
},
]
triggers = await async_get_device_automations(
hass, DeviceAutomationType.TRIGGER, device_entry.id
)
assert_lists_same(triggers, expected_triggers)
async def test_multiple_devices(hass):
"""Test we get the expected triggers from a nest."""
camera1 = make_camera(
device_id="device-id-1",
name="Camera 1",
traits={
"sdm.devices.traits.CameraSound": {},
},
)
camera2 = make_camera(
device_id="device-id-2",
name="Camera 2",
traits={
"sdm.devices.traits.DoorbellChime": {},
},
)
await async_setup_camera(hass, {"device-id-1": camera1, "device-id-2": camera2})
registry = er.async_get(hass)
entry1 = registry.async_get("camera.camera_1")
assert entry1.unique_id == "device-id-1-camera"
entry2 = registry.async_get("camera.camera_2")
assert entry2.unique_id == "device-id-2-camera"
triggers = await async_get_device_automations(
hass, DeviceAutomationType.TRIGGER, entry1.device_id
)
assert len(triggers) == 1
assert triggers[0] == {
"platform": "device",
"domain": DOMAIN,
"type": "camera_sound",
"device_id": entry1.device_id,
}
triggers = await async_get_device_automations(
hass, DeviceAutomationType.TRIGGER, entry2.device_id
)
assert len(triggers) == 1
assert triggers[0] == {
"platform": "device",
"domain": DOMAIN,
"type": "doorbell_chime",
"device_id": entry2.device_id,
}
async def test_triggers_for_invalid_device_id(hass):
"""Get triggers for a device not found in the API."""
camera = make_camera(
device_id=DEVICE_ID,
traits={
"sdm.devices.traits.CameraMotion": {},
"sdm.devices.traits.CameraPerson": {},
},
)
await async_setup_camera(hass, {DEVICE_ID: camera})
device_registry = dr.async_get(hass)
device_entry = device_registry.async_get_device({("nest", DEVICE_ID)})
assert device_entry is not None
# Create an additional device that does not exist. Fetching supported
# triggers for an unknown device will fail.
assert len(device_entry.config_entries) == 1
config_entry_id = next(iter(device_entry.config_entries))
device_entry_2 = device_registry.async_get_or_create(
config_entry_id=config_entry_id, identifiers={(DOMAIN, "some-unknown-nest-id")}
)
assert device_entry_2 is not None
with pytest.raises(InvalidDeviceAutomationConfig):
await async_get_device_automations(
hass, DeviceAutomationType.TRIGGER, device_entry_2.id
)
async def test_no_triggers(hass):
"""Test we get the expected triggers from a nest."""
camera = make_camera(device_id=DEVICE_ID, traits={})
await async_setup_camera(hass, {DEVICE_ID: camera})
registry = er.async_get(hass)
entry = registry.async_get("camera.my_camera")
assert entry.unique_id == "some-device-id-camera"
triggers = await async_get_device_automations(
hass, DeviceAutomationType.TRIGGER, entry.device_id
)
assert triggers == []
async def test_fires_on_camera_motion(hass, calls):
"""Test camera_motion triggers firing."""
assert await setup_automation(hass, DEVICE_ID, "camera_motion")
message = {"device_id": DEVICE_ID, "type": "camera_motion", "timestamp": utcnow()}
hass.bus.async_fire(NEST_EVENT, message)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data == DATA_MESSAGE
async def test_fires_on_camera_person(hass, calls):
"""Test camera_person triggers firing."""
assert await setup_automation(hass, DEVICE_ID, "camera_person")
message = {"device_id": DEVICE_ID, "type": "camera_person", "timestamp": utcnow()}
hass.bus.async_fire(NEST_EVENT, message)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data == DATA_MESSAGE
async def test_fires_on_camera_sound(hass, calls):
"""Test camera_person triggers firing."""
assert await setup_automation(hass, DEVICE_ID, "camera_sound")
message = {"device_id": DEVICE_ID, "type": "camera_sound", "timestamp": utcnow()}
hass.bus.async_fire(NEST_EVENT, message)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data == DATA_MESSAGE
async def test_fires_on_doorbell_chime(hass, calls):
"""Test doorbell_chime triggers firing."""
assert await setup_automation(hass, DEVICE_ID, "doorbell_chime")
message = {"device_id": DEVICE_ID, "type": "doorbell_chime", "timestamp": utcnow()}
hass.bus.async_fire(NEST_EVENT, message)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data == DATA_MESSAGE
async def test_trigger_for_wrong_device_id(hass, calls):
"""Test for turn_on and turn_off triggers firing."""
assert await setup_automation(hass, DEVICE_ID, "camera_motion")
message = {
"device_id": "wrong-device-id",
"type": "camera_motion",
"timestamp": utcnow(),
}
hass.bus.async_fire(NEST_EVENT, message)
await hass.async_block_till_done()
assert len(calls) == 0
async def test_trigger_for_wrong_event_type(hass, calls):
"""Test for turn_on and turn_off triggers firing."""
assert await setup_automation(hass, DEVICE_ID, "camera_motion")
message = {
"device_id": DEVICE_ID,
"type": "wrong-event-type",
"timestamp": utcnow(),
}
hass.bus.async_fire(NEST_EVENT, message)
await hass.async_block_till_done()
assert len(calls) == 0
async def test_subscriber_automation(hass, calls):
"""Test end to end subscriber triggers automation."""
camera = make_camera(
device_id=DEVICE_ID,
traits={
"sdm.devices.traits.CameraMotion": {},
},
)
subscriber = await async_setup_camera(hass, {DEVICE_ID: camera})
device_registry = dr.async_get(hass)
device_entry = device_registry.async_get_device({("nest", DEVICE_ID)})
assert await setup_automation(hass, device_entry.id, "camera_motion")
# Simulate a pubsub message received by the subscriber with a motion event
event = EventMessage(
{
"eventId": "some-event-id",
"timestamp": "2019-01-01T00:00:01Z",
"resourceUpdate": {
"name": DEVICE_ID,
"events": {
"sdm.devices.events.CameraMotion.Motion": {
"eventSessionId": "CjY5Y3VKaTZwR3o4Y19YbTVfMF...",
"eventId": "FWWVQVUdGNUlTU2V4MGV2aTNXV...",
},
},
},
},
auth=None,
)
await subscriber.async_receive_event(event)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data == DATA_MESSAGE
|
|
# Import dependencies
import os, logging, json, math
from datasource import TrainingInstance as tri
import numpy as np
from utils import feature_extractor as fe
import pickle
import random
def generate_label_sequences(labels, n_instances, l_range=(1, 30),
print_every=100):
'''
Generates a given number of label sequences
:param labels: a list of available unique labels
:param n_instances: number of instances (sequences) to generate
:param range: A tuple that holds the min and max length of sequences
:return: list of sequences and list of their corresponding lengths
'''
seqs = []
seq_lengths = []
for i in np.arange(l_range[0], l_range[1]+1):
logging.info('Generating sequences of length ' + str(i))
for j in range(n_instances):
if j % print_every == 0:
logging.info('Generating ' + str(j) + 'th sequence of length '
+ str(i))
seqs.append(random.sample(labels, i))
seq_lengths.append(i)
return seqs, seq_lengths
def generate_data_sequences(codebook, labele_seqs, print_every=100):
'''
Generates a given number of data sequences
:param codebook: A codebook (dict obj) that holds all the labels as keys
and all the corresponding data instances as values
:param labele_seqs: Sequence of labels for which data sequences needs to
be generated
:param range: A tuple that holds the min and max length of sequences
:return:
'''
seqs = []
t_seqs = []
min_len = 10000000
max_len = 0
avg_len = 0.0;
for j, label_seq in enumerate(labele_seqs) :
if j % print_every == 0 :
logging.info('Generating ' + str(j) + 'th data sequence of length '
+ str(len(label_seq)))
d_seq_buf = None
for lbl in label_seq:
if d_seq_buf is None:
sampled_data = np.array(random.sample(codebook[lbl], 1))
sampled_data = np.reshape(sampled_data, (sampled_data.shape[1],
sampled_data.shape[2]))
d_seq_buf = np.array(sampled_data)
else:
rand_lbl_sample = np.array(random.sample(codebook[lbl], 1))
rand_lbl_sample = np.reshape(rand_lbl_sample, (rand_lbl_sample.shape[1],
rand_lbl_sample.shape[2]))
d_seq_buf = np.concatenate((d_seq_buf, rand_lbl_sample), axis=0)
avg_len += d_seq_buf.shape[0]
if d_seq_buf.shape[0] <= min_len: min_len = d_seq_buf.shape[0]
if d_seq_buf.shape[0] >= max_len : max_len = d_seq_buf.shape[0]
seqs.append(d_seq_buf)
t_seqs.append(np.transpose(d_seq_buf))
avg_len = float(avg_len)/(j+1)
return seqs, t_seqs, avg_len, min_len, max_len
def scaleData(data, scaler):
'''
Method to scale the sensor data as a preprocessing step
:param data: (list) List of all the training instance objects
:return: data: (list) List of a ll the scaled training data objects
'''
# data = np.array([ti.scaleData(scaler) for ti in data])
d = []
for i, ti in enumerate(data):
if i % 50 is 0:
print(
str(i) + ' out of ' + str(len(data)) + 'training instances scaled')
d.append(ti.scaleData(scaler))
return np.array(d)
# Serielize objects to disk for future reuse to make things faster
def dumpObject(filePath, object):
try:
with open(filePath, 'wb') as f:
pickle.dump(object, f)
return True
except IOError as e:
return False
def loadObject(filePath):
if os.path.isfile(filePath):
with open(filePath, 'rb') as f:
object = pickle.load(f)
return object
else:
return None
def read_json_file(filepath):
with open(filepath) as data_file:
data = json.load(data_file)
return data
# Get training data from the root directory where the ground truth exists
def getTrainingData(rootdir):
'''
This method gets all the training data from the root directory of the
ground truth
The following is the directory structure for the ground truth
Root_Dir
|_Labels
|_Participants
|_data_files
:param rootdir (string): path to the rood directory where the ground
truth exists
:return: labels (list), A list of class labels
data (list), A list of training instances
target (list), A list of class labels
corresponding to the training instances in the in 'data'
labelsdict (dictionary), A dictionary for
converting class labels from string to integer and vice versa
avg_len (float), The average length of the
sensor data (emg, accelerometer, gyroscope and orientation)
which would later be used for normalization
user_map (dictionary), A dictionary of all
participants and their corresponding file list to be used for
leave one out test later
user_list (list), A list of all participants
data_dict (dictionary) A dictionary containing a
mapping of all the class labels, participants, and files of
the participants which can be used later for transforming the
data for leave one out test
max_len (integer) the maximum length of the
sensor data
data_path (list) A list that will hold the
path to every training instance in the 'data list'
'''
# List of all training labels
training_class_dirs = os.walk(rootdir)
labels = [] # Empty list to hold all the class labels
labelsdict = {} # Dictionary to store the labels and their correspondig
# interger values
labeldirs = [] # Directory paths of all labels
target = [] # List that will hold class labels of the training instances
# in 'data list'
data = [] # List that will hold all the training/validation instances
sample_len_vec_emg = [] # List that holds that length of all the sensor
# data. It will be used later for calculating average length
sample_len_vec_others = [] # List that holds that length of all the
# sensor data. It will be used later for calculating average length
data_dict = {} # The dictionary that will hold that mappings for all
# labels, participants of the the label and data files corresponding to
# all the participants. This will be used later for leave one out test
user_map = {} # A dictionary that will hold the mappings for all
# participants and their corresponding ids
user_list = [] # A list of all participants
user_ids = np.arange(
100).tolist() # A pre generated list of userids for assigning a
# unique id to every user
data_path = [] # A list that will hold the path to every training
# instance in the 'data list'
# A codebook of all the labels and their corresponding
codebook = {}
# Get the list of labels by walking the root directory
for trclass in training_class_dirs:
labels = trclass[1]
break
# extracting list of participants for each label
for i, label in enumerate(labels):
dict = {} # dictionary to store participant information
lbl_users_lst = [] # list of participants per label
labelsdict[label] = i
labeldir = os.path.join(rootdir, label)
# list of users for the respective label
lbl_usrs = os.walk(labeldir)
# enumerating all the users of the respective label
for usr in lbl_usrs:
# print(usr)
lbl_users_lst = usr[1]
# assigning unique ids to all the users
for i, user in enumerate(lbl_users_lst):
if user not in user_map:
id = user_ids.pop()
user_map[user] = id
user_list.append(id)
break
# extracting data file list for every participant
for usr in lbl_users_lst:
usrdir = os.path.join(labeldir, usr)
filewalk = os.walk(usrdir)
file_list = []
for fls in filewalk:
file_list = fls[2]
break
dict[usr] = (usrdir, file_list)
dict['users'] = lbl_users_lst
data_dict[label] = dict # add all meta information to data_dict
# Extracting data from the data files from all participants
for key, value in data_dict.items():
tar_val = int(key)
users = value['users']
for user in users:
user_dir = value[user]
dirPath = user_dir[0]
filelist = user_dir[1]
for file in filelist:
fp = os.path.join(dirPath, file)
data_path.append(fp)
fileData = read_json_file(fp)
# extract data from the dictionary
# emg
emg = np.array(fileData['emg']['data'])
emgts = np.array(fileData['emg']['timestamps'])
# accelerometer
acc = np.array(fileData['acc']['data'])
accts = np.array(fileData['acc']['timestamps'])
# gyroscope
gyr = np.array(fileData['gyr']['data'])
gyrts = np.array(fileData['gyr']['timestamps'])
# orientation
ori = np.array(fileData['ori']['data'])
orits = np.array(fileData['ori']['timestamps'])
# create training instance
ti = tri.TrainingInstance(key, emg, acc, gyr, ori, emgts, accts,
gyrts, orits)
# add length for resampling later to the sample length vector
sample_len_vec_emg.append(emg.shape[0])
sample_len_vec_others.append(acc.shape[0])
# split raw data
ti.separateRawData()
ti.consolidateData(None, False, True)
# append training instance to data list
data.append(ti)
# append class label to target list
target.append(tar_val)
if codebook.has_key(key): codebook[key].append(
ti.getConsolidatedDataMatrix())
else: codebook[key] = [ti.getConsolidatedDataMatrix()]
avg_len_emg = int(np.mean(sample_len_vec_emg))
avg_len_acc = int(np.mean(sample_len_vec_others))
max_length_emg = np.amax(sample_len_vec_emg)
max_length_others = np.amax(sample_len_vec_others)
return labels, data, target, labelsdict, avg_len_emg, avg_len_acc, \
user_map, user_list, data_dict, max_length_emg, max_length_others,\
data_path, codebook
def normalizeTrainingData(data, max_length_emg, max_len_others):
'''
Method to normalize the training data to fixed length
:param data: (list) List of all the training instance objects
:param max_length_emg: (int) Normalized length for EMG signals
:param max_len_others: (int) Normalized length of IMU signals
:return: data (list) List of all the normalized training instance objects
'''
# data = np.array([ti.normalizeData(max_length_emg,max_len_others) for ti
# in data])
d = []
for i, ti in enumerate(data):
if i % 50 is 0:
print(str(i) + ' out of ' + str(
len(data)) + 'training instances normalized')
if i is 2 or i is 4:
print('case')
d.append(ti.normalizeData(max_length_emg, max_len_others))
return np.array(d)
def resampleTrainingData(data, sampling_rate, avg_len, emg=True, imu=True):
'''
Method to resample the training instances to a given sampling frequency
in HZ.
It calls consolidate data implicitly.
Can remove the consolidation to a different method.
:param data: (list) List of all the training instance objects
:param sampling_rate: (int) The new sampling rate in Hz
:param avg_len: (int) Average length of vectors in case both EMG and IMU
needs to be resampled and consolidated
:param emg: (boolean) Flag to indicate that we need to consider emg
signals for consolidating the data after resampling
:param imu: (boolean) Flag to indicate that we need to consider IMU
signals for consolidating the data after resampling
:return: data : resampled data
'''
# data = np.array([ti.resampleData(sampling_rate,avg_len,emg,imu) for ti
# in data])
d = []
for i, ti in enumerate(data):
if i % 50 is 0:
print(str(i) + ' out of ' + str(
len(data)) + 'training instances normalized')
d.append(ti.resampleData(sampling_rate, avg_len, emg, imu))
return np.array(d)
def extractFeatures(data, scaler=None, window=True, rms=False, f_mfcc=False,
emg=True, imu=True):
'''
method to loop through all training instances and extract features from
the signals
@params: data (list) : list of training instances
scaler (sclaer object) : scaler object to scale features if
necessary
window (Boolean) : To get overlapping window features
rms (Boolean) : To get features from the rms value
of the signals in all directions
f_mfcc (Boolean) : to get the MFCC features as well
@return: data (list) : list of training instances with
extracted features
'''
# data = np.array([ti.extractFeatures(window,scaler,rms,f_mfcc,emg,
# imu) for ti in data])
d = []
for i, ti in enumerate(data):
if i % 20 is 0:
print('features extracted from ' + str(i) + ' out of ' + str(
len(data)) + 'training instances')
d.append(ti.extractFeatures(window, scaler, rms, f_mfcc, emg, imu))
return np.array(d)
'''
def prepareDataPC(target, data):
consolidate = zip(target,data)
for lbl,d in consolidate:
con_mat = d.getConsolidatedFeatureMatrix()
if train_x is None:
train_x = con_mat
else:
train_x = np.append(train_x,con_mat,axis=0)
train_y.append(int(key))
return np.array(train_x),np.array(train_y),np.array(test_x),np.array(test_y)
'''
def prepareTrainingDataSvm(trainingIndexes, testingIndexes, target, data):
train_x = None # training data
train_y = [] # training labels
test_x = None # testing data
test_y = [] # testing labels
for tid in trainingIndexes:
key = target[tid]
ti = data[tid]
con_mat = ti.getConsolidatedFeatureMatrix()
if train_x is None:
train_x = con_mat
else:
train_x = np.append(train_x, con_mat, axis=0)
train_y.append(int(key))
for tid in testingIndexes:
key = target[tid]
ti = data[tid]
con_mat = ti.getConsolidatedFeatureMatrix()
if test_x is None:
test_x = con_mat
else:
test_x = np.append(test_x, con_mat, axis=0)
test_y.append(int(key))
return np.array(train_x), np.array(train_y), np.array(test_x), np.array(
test_y)
def prepareTrainingDataHmmFeatures(trainingIndexes, target, data):
trainingData = {}
for l, tid in enumerate(trainingIndexes):
# printing corrent status so that the wait is not too boring :-P
if l % 50 is 0:
print(str(l) + ' out of ' + str(
len(trainingIndexes)) + 'training instances prepared')
key = target[tid]
ti = data[tid]
# con_data = ti.getConsolidatedDataMatrix()
if key in trainingData:
# get data from existing dictionary
trld = trainingData.get(key)
lbl_data = trld.get('data')
n_data = trld.get('datal')
# extract data from the training instance
# get consolidated data matrix
con_mat = ti.getConsolidatedFeatureMatrix()
# append
lbl_data = np.append(lbl_data, con_mat, axis=0)
n_data.append(con_mat.shape[0])
# replace in the existing dict
trld['data'] = lbl_data
trld['datal'] = n_data
trainingData[key] = trld
else:
trld = {}
# extract others and get features for creating an svm model
con_mat = ti.getConsolidatedFeatureMatrix()
trld['data'] = con_mat
trld['datal'] = [con_mat.shape[0]]
trainingData[key] = trld
return trainingData
def discritizeLabels(target):
n_classes = np.unique(target)
d_labels = []
for t in target:
d_l = np.zeros(n_classes.size, dtype=np.int)
d_l[t] = 1
d_labels.append(d_l)
return np.array(d_labels)
def splitDataset(train, test, target, data):
train_x = np.take(data, train, axis=0)
train_y = np.take(target, train, axis=0)
val_x = np.take(data, test, axis=0)
val_y = np.take(target, test, axis=0)
return train_x, train_y, val_x, val_y
def prepareDataset(data):
d = []
for i, ti in enumerate(data):
if i % 20 is 0:
print(
'prepared ' + str(i) + ' out of ' + str(len(data)) + 'instances')
d.append(ti.consolidatedDataMatrix)
return np.array(d)
def prepareTrainingDataHmmRaw(trainingIndexes, target, data):
trainingData = {}
for l, tid in enumerate(trainingIndexes):
# printing corrent status so that the wait is not too boring :-P
if l % 50 is 0:
print(str(l) + ' out of ' + str(
len(trainingIndexes)) + 'training instances prepared')
key = target[tid]
ti = data[tid]
# con_data = ti.getConsolidatedDataMatrix()
if key in trainingData:
# get data from existing dictionary
trld = trainingData.get(key)
lbl_data = trld.get('data')
n_data = trld.get('datal')
# extract data from the training instance
# get consolidated data matrix
con_mat = ti.getConsolidatedDataMatrix()
# append
lbl_data = np.append(lbl_data, con_mat, axis=0)
n_data.append(con_mat.shape[0])
# replace in the existing dict
trld['data'] = lbl_data
trld['datal'] = n_data
trainingData[key] = trld
else:
trld = {}
# extract others and get features for creating an svm model
con_mat = ti.getConsolidatedDataMatrix()
trld['data'] = con_mat
trld['datal'] = [con_mat.shape[0]]
trainingData[key] = trld
return trainingData
def prepareTrainingData(trainingIndexes, target, data):
# dictionary that holds all the consolidated training data
trainingDict = {}
for tid in trainingIndexes:
key = target[tid]
ti = data[tid]
# call separate raw data to create models for the others but for now
# use raw data
if key in trainingDict:
# get data from existing dictionary
trld = trainingDict.get(key)
emg = trld.get('emg')
emgl = trld.get('emgl')
acc = trld.get('acc')
accl = trld.get('accl')
gyr = trld.get('gyr')
gyrl = trld.get('gyrl')
ori = trld.get('ori')
oril = trld.get('oril')
# extract data from the training instance
emg_t, acc_t, gyr_t, ori_t = ti.getRawData()
# append
emg = np.append(emg, emg_t, axis=0)
emgl.append(len(emg_t))
acc = np.append(acc, acc_t, axis=0)
accl.append(len(acc_t))
gyr = np.append(gyr, gyr_t, axis=0)
gyrl.append(len(gyr_t))
ori = np.append(ori, ori_t, axis=0)
oril.append(len(ori_t))
# replace in the existing dict
trld['emg'] = emg
trld['emgl'] = emgl
trld['acc'] = acc
trld['accl'] = accl
trld['gyr'] = gyr
trld['gyrl'] = gyrl
trld['ori'] = ori
trld['oril'] = oril
trainingDict[key] = trld
else:
trld = {}
# extract others and get features for creating an svm model
emg_t, acc_t, gyr_t, ori_t = ti.getRawData()
trld['emg'] = emg_t
trld['emgl'] = [len(emg_t)]
trld['acc'] = acc_t
trld['accl'] = [len(acc_t)]
trld['gyr'] = gyr_t
trld['gyrl'] = [len(gyr_t)]
trld['ori'] = ori_t
trld['oril'] = [len(ori_t)]
trainingDict[key] = trld
return trainingDict
|
|
# Author: Jacob Schofield <[email protected]>
# Copyright (c) 2017 HelpSocial, Inc.
# See LICENSE for details
try:
import ujson as json
except ImportError:
import json
from requests import Request, Session
from sseclient import SSEClient
from threading import Thread
from time import sleep, time
from .auth import ApplicationAuth, UserAuth, SSEAuth
from .decorators import Authenticate
from .exceptions import ApiException, AuthenticationException, \
BadRequestException, ForbiddenException, \
NotFoundException
from .utils import data_get, is_timeout, join
API_HOST = 'api.helpsocial.com'
API_VERSION = '2.0'
class Api(object):
"""Base Api class wraps the http transport layer with decorators for
interaction with the HelpSocial Connect Api.
It is possible to use this class directly, however it is advised that the
RestConnectClient be used for the convenience methods it supplies.
:type auth_scope: string
:param auth_scope: the client auth scope to be used in authentication.
:type api_key: string
:param api_key: the api key to be used by the client in order to authenticate requests.
:type user_token: string
:param user_token: the user's auth token that should be used to authenticate a request.
:type host: string
:param host: the api host to connect to. default ``API_HOST``.
:type ssl: bool
:param ssl: should the client connect over ssl. default True.
:type version: string
:param version: the api version. default ``2.0``
:type request_hooks: list
:param request_hooks: a list of callable request hooks that should be called before the request executes.
:type response_hooks: list
:param response_hooks: a list of callabke response hooks that should be called after the request completes.
"""
def __init__(self,
auth_scope, api_key,
user_token=None,
host=None, ssl=True, version=None,
request_hooks=None, response_hooks=None):
# set defaults
host = API_HOST if host is None else host
version = API_VERSION if version is None else version
ssl = True if ssl is None else ssl
request_hooks = [] if request_hooks is None else request_hooks
response_hooks = [] if response_hooks is None else response_hooks
self.api_key = api_key
self.auth_scope = auth_scope
self.user_token = user_token
self._http = Session()
self._api_host = host
self._api_version = version
self._ssl = ssl
self._request_hooks = request_hooks
self._response_hooks = response_hooks
@staticmethod
def process_params(params, csv_keys=None):
"""Filter the params keyword argument passed to the function.
:type params dict
:param params:
:type csv_keys: list
:param csv_keys:
:rtype: dict
:return: the filtered parameters
"""
if params is None:
return None
csv_keys = [] if csv_keys is None else csv_keys
filtered = params.copy()
for (key, value) in params.items():
if value is None:
del filtered[key]
elif key in csv_keys:
filtered[key] = join(value, ',') if isinstance(value, list) else str(value)
elif isinstance(value, bool):
filtered[key] = int(value)
return filtered
def set_user_token(self, token):
"""Set the default user token for the client."""
self.user_token = token
def get_user_token(self):
"""
:rtype: string
:return:
"""
return self.user_token
def register_event_hook(self, event, hook):
"""Register a new event hook.
:type event: string
:param event: the event [request, or response] to register the hook for.
:type hook: callable
:param hook: the action to call on the specified event.
"""
if not hasattr(hook, '__call__') or not callable(hook):
raise ValueError('callable required.')
if event == 'request':
self._response_hooks.append(hook)
elif event == 'response:':
self._response_hooks.append(hook)
else:
raise ValueError('event must be request or response')
def get(self, path, params=None, auth=None, **requests_kwargs):
"""Perform a Http GET request on the api at the specified path.
:type path: string
:param path:
:type params: dict
:param params:
:type auth: requests.AuthBase
:param auth:
:rtype: requests.Response
:return: :class:`Response <Response>` object
:raises ApiException:
:raises requests.RequestException:
:raises ssl.SSLError:
"""
auth = auth if auth is not None else self.get_auth()
kwargs = Api._pull_request_kwargs(requests_kwargs)
return self.__execute(
Request('GET', self.get_request_uri(path), params=params, auth=auth, **kwargs),
**requests_kwargs
)
def put(self, path, params=None, json=None,
auth=None, **requests_kwargs):
"""Perform a Http PUT request on the api at the specified path.
:type path: string
:param path:
:type params: dict
:param params:
:type json: dict
:param json:
:type auth: requests.AuthBase
:param auth:
:rtype: requests.Response
:return: :class:`Response <Response>` object
:raises ApiException:
:raises requests.RequestException:
:raises ssl.SSLError:
"""
auth = auth if auth is not None else self.get_auth()
kwargs = Api._pull_request_kwargs(requests_kwargs)
return self.__execute(
Request('PUT', self.get_request_uri(path), params=params, json=json, auth=auth, **kwargs),
**requests_kwargs
)
def post(self, path, params=None, json=None,
auth=None, **requests_kwargs):
"""Perform a Http POST request on the api at the specified path.
:type path: string
:param path:
:type params: dict
:param params:
:type json: dict
:param json:
:type auth: requests.AuthBase
:param auth:
:rtype: requests.Response
:return: :class:`Response <Response>` object
:raises ApiException:
:raises requests.RequestException:
:raises ssl.SSLError:
"""
auth = auth if auth is not None else self.get_auth()
kwargs = Api._pull_request_kwargs(requests_kwargs)
return self.__execute(
Request('POST', self.get_request_uri(path), params=params, json=json, auth=auth, **kwargs),
**requests_kwargs
)
def delete(self, path, params=None, json=None,
auth=None, **requests_kwargs):
"""Perform a Http DELETE request on the api at the specified path.
:type path: string
:param path:
:type params: dict
:param params:
:type json: dict
:param json:
:type auth: requests.AuthBase
:param auth:
:rtype: requests.Response
:return: :class:`Response <Response>` object
:raises ApiException:
:raises requests.RequestException:
:raises ssl.SSLError:
"""
auth = auth if auth is not None else self.get_auth()
kwargs = Api._pull_request_kwargs(requests_kwargs)
return self.__execute(
Request('DELETE', self.get_request_uri(path), params=params, json=json, auth=auth, **kwargs),
**requests_kwargs
)
def get_request_uri(self, path):
"""Retrieve the full url for the api request using the ``path``.
:type path: string
:param path: resource path
:rtype: string
:return: the full url
:raises ApiException:
"""
scheme = 'https' if self._ssl else 'http'
return '{scheme}://{host}/{version}/{path}'.format(
scheme=scheme,
host=self._api_host,
version=self._api_version,
path=path.lstrip('/')
)
def get_auth(self):
"""Auth factory for the client.
:rtype: requests.AuthBase
:return: :class:`AuthBase <AuthBase>` object
"""
if self.user_token is None:
return ApplicationAuth(self.auth_scope, self.api_key)
return UserAuth(self.auth_scope,self.api_key, self.user_token)
@staticmethod
def has_accept_header(headers):
"""Check if the headers contain an accept header.
:type headers: dict
:param headers:
:rtype: bool
:return:
"""
for key in headers.keys():
if key.lower() == 'accept':
return True
return False
@staticmethod
def _pull_request_kwargs(requests_kwargs):
"""Remove non server request keyword arguments from the arguments list.
:type requests_kwargs: dict
:param requests_kwargs:
:rtype: dict
:return: the keyword arguments for a request instance
"""
keys = ['headers', 'files', 'data', 'cookies', 'hooks']
kwargs = {}
for key in keys:
if key in requests_kwargs:
kwargs[key] = requests_kwargs[key]
del requests_kwargs[key]
return kwargs
def __execute(self, request, **transport_kwargs):
"""Wrap the requests module send method in order to call
any request (response) hooks defined.
:type request: requests.Request
:param request: :class:`requests.Request <requests.Request>` instance.
:type transport_kwargs: dict
:param transport_kwargs: keyword arguments for the transport layer
:rtype: requests.Response
:return: :class:`Response <Response>` object
:raises ApiException:
:raises requests.RequestException:
:raises ssl.SSLError:
"""
if 'headers' not in request.headers.keys():
request.headers = {'Accept': 'application/json'}
elif not Api.has_accept_header(transport_kwargs['headers']):
request.headers['Accept'] = 'application/json'
prepared = request.prepare()
for hook in self._request_hooks:
hook(prepared)
http_error_exception = not transport_kwargs.get('http_errors', False)
if 'http_errors' in transport_kwargs:
del transport_kwargs['http_errors']
response = self._http.send(prepared, **transport_kwargs)
for hook in self._response_hooks:
hook(prepared, response)
if response.status_code >= 400 and http_error_exception:
raise ApiException.make(response)
return response
class RestConnectClient(Api):
"""HelpSocial Connect Api rest client. Provides convenience methods for
available api actions on top of the default http transport methods
defined by :class:`Api <Api>`.
"""
def authenticate(self, username, password):
"""Authenticate the user.
:type username: string
:param username: the user's username
:type password: string
:param password: the user's password
:rtype: dict
:return: the token object
:raises ApiException:
:raises requests.RequestException:
:raises ssl.SSLError:
"""
auth = ApplicationAuth(self.auth_scope, self.api_key)
body = {
'username': username,
'password': password,
}
response = self.post('tokens', json=body, auth=auth)
return data_get(response.json(), 'data.token')
@Authenticate(Api.get_auth)
def list_profiles(self, auth=None, managed=None, limit=25):
query = {
'managed': managed,
'limit': limit
}
response = self.get('network_profiles',
params=self.process_params(query),
auth=auth)
return data_get(response.json(), 'data.accounts')
@Authenticate(Api.get_auth)
def get_sse_authorization(self, auth=None):
"""Retrieve an SSE authorization token for the authenticated user.
:type auth: auth.UserAuth
:param auth: :class:`auth.UserAuth <auth.UserAuth>` object
:rtype: string
:return: the authorization code
:raises ApiException:
:raises requests.RequestException:
:raises ssl.SSLError:
"""
return data_get(
self.get('streams/sse/authorization', auth=auth).json(),
'data.authorization'
)
class StreamingConnectClient(Api):
"""HelpSocial Connect Api streaming client. Provides convenience methods
for the available streams produced by the Connection Api.
:type auth_scope: string
:param auth_scope: the client auth scope to be used in authentication.
:type api_key: string
:param api_key: the api key to be used by the client in order to authenticate requests.
:type dispatcher: Dispatcher
:param dispatcher: the dispatcher is responsible for handling each stream event.
:type user_token: string
:param user_token: the user's auth token that should be used to authenticate a request.
:type host: string
:param host: the api host to connect to. default ``API_HOST``.
:type ssl: bool
:param ssl: should the client connect over ssl. default True.
:type version: string
:param version: the api version. default ``2.0``
:type request_hooks: list
:param request_hooks: a list of callable request hooks that should be called before the request executes.
:type response_hooks: list
:param response_hooks: a list of callable response hooks that should be called after the request completes.
"""
_sse_stream_headers = {'Accept': 'text/event-stream'}
_json_stream_headers = {'Accept': 'application/x-json-stream'}
def __init__(self,
auth_scope, api_key, dispatcher,
user_token=None,
host=None, ssl=True, version=None,
request_hooks=None, response_hooks=None):
# initialize api
super().__init__(auth_scope, api_key, user_token=user_token,
host=host, ssl=ssl, version=version,
request_hooks=request_hooks,
response_hooks=response_hooks)
# ensure a dispatcher has been defined
# for the client
self._dispatchers = [dispatcher]
self._running = False
@staticmethod
def stream_complete(data):
"""Check if a bounded stream is complete."""
try:
return 'complete' in json.loads(data)
except json.decoder.JSONDecodeError:
pass
return False
@Authenticate(Api.get_auth)
def conversations(self, params=None, auth=None, async=False):
"""Stream conversation json.
:type async:
:param async: run request asynchronously
:type params: dict
:param params: request parameters
:type auth: auth.TokenAuth
:param auth: request authentication method
:raises ApiException:
:raises requests.RequestException:
:raises ssl.SSLError:
"""
if self._running:
raise RuntimeError('stream already running')
self._start('streams/conversation',
auth,
params=Api.process_params(params),
headers=self._json_stream_headers,
async=async,
sse=False)
@Authenticate(Api.get_auth)
def activities(self, params=None, auth=None, async=False):
"""Stream activity json.
:type async:
:param async: run request asynchronously
:type params: dict
:param params: request parameters
:type auth: auth.TokenAuth
:param auth: request authentication method
:raises ApiException:
:raises requests.RequestException:
:raises ssl.SSLError:
"""
if self._running:
raise RuntimeError('stream already running')
self._start('streams/activity',
auth,
params=Api.process_params(params),
headers=self._json_stream_headers,
async=async,
sse=False)
@Authenticate(Api.get_auth)
def events(self, params=None, auth=None, async=False):
"""Stream event json.
:type async:
:param async: run request asynchronously
:type params: dict
:param params: request parameters
:type auth: auth.TokenAuth
:param auth: request authentication method
:raises ApiException:
:raises requests.RequestException:
:raises ssl.SSLError:
"""
if self._running:
raise RuntimeError('stream already running')
self._start('streams/event',
auth,
params=Api.process_params(params, csv_keys=['event_types']),
headers=self._json_stream_headers,
async=async,
sse=False)
def sse(self, authorization, params=None, async=False):
"""Stream server sent events.
:type async: bool
:param async: run request asynchronously
:type authorization: string
:param authorization: sse stream authorization code
:type params: dict
:param params:
:raises ApiException:
:raises requests.RequestException:
:raises ssl.SSLError:
"""
if self._running:
raise RuntimeError('stream already running')
self._start('streams/sse',
SSEAuth(authorization),
params=Api.process_params(params, csv_keys=['event_types']),
headers=self._sse_stream_headers,
async=async,
sse=True)
def is_alive(self):
"""Check if the stream is alive."""
return self._running
def shutdown(self):
"""Shutdown the running stream."""
if not self._running:
return
self._running = False
if self._sse:
# We cannot manually trigger a shutdown of the underlying thread,
# and due to the implementation of the SSEClient event loop
# we cannot quickly discover the call to shutdown and exit
# the event loop. In order to shutdown the SSEClient in a timely
# manner we forcefully close the connection, in order to trigger
# and except within the package. We then catch the exception and
# continue the shutdown process.
self._sse.close()
if self._async:
# Allow the underlying thread time to shut down gracefully.
start = time()
while self._thread.is_alive():
if 30 < (time() - start):
break
sleep(1)
def _start(self, path, auth, params=None, headers=None, async=False, sse=False):
"""Start the stream on a new thread if asynchronous.
:type path: string
:param path: streaming resource path
:type auth: requests.AuthBase
:param auth: request authentication method
:type params: dict
:param params: request parameters
:type async: bool
:param async: run request asynchronously
:type sse: bool
:param sse: is this a stream of server sent events
"""
self._running = True
self._async = async
self._sse = None
if async:
self._thread = Thread(target=self._run,
args=(path, auth,),
kwargs={'headers': headers, 'params': params, 'sse': sse})
self._thread.start()
else:
self._run(path, auth, params=params, sse=sse)
def _run(self, path, auth, params=None, headers=None, sse=False):
"""Run the desired stream.
:type path: string
:param path: the path to the streaming resource.
:type auth: requests.AuthBase
:param auth: request authentication method
:type params: dict
:param params: request parameters
:type sse: bool
:param sse: is this a stream of server sent events
"""
connection = None
initial_connection = True
disconnect_counter = 0
backoff_limit_seconds = 300
try:
while self._running:
try:
connection = self._open_stream(path, auth, params=params, headers=headers)
initial_connection = False
disconnect_counter = 0
except (AuthenticationException,
ForbiddenException,
BadRequestException,
NotFoundException) as ex:
# If we encounter any of these exceptions there
# is no way that we will be able to make the
# connection making the request as is.
raise ex
except KeyboardInterrupt:
# User terminated console app
break
except Exception as ex:
if initial_connection:
# The initial attempt to connect to stream
# has failed for some reason.
raise ex
# The stream has been interrupted
# and we should attempt to reconnect. We apply
# an exponential back off to not overload the
# server and allow it time to heal.
if not self._running:
break
disconnect_counter += 1
sleep(min(2 ** disconnect_counter, backoff_limit_seconds))
continue
if not self._running:
break
try:
if sse:
self._stream_sse(connection)
else:
self._stream_json(connection)
except KeyboardInterrupt:
# User terminated console app
break
except Exception as ex:
if not is_timeout(ex):
# a fatal exception has occurred
raise ex
disconnect_counter += 1
sleep(min(2 ** disconnect_counter, backoff_limit_seconds))
finally:
# clean up the allocated connection object
# and make sure we've flagged that we're no longer
# running
self._running = False
if connection:
connection.close()
def _open_stream(self, path, auth, params=None, headers=None):
"""Open the streaming connection.
:type path: string
:param path:
:type auth: requests.AuthBase
:param auth:
:type params: dict
:param params:
:rtype: requests.Response
:return: the connected request response
:raises ApiException:
"""
response = self.get(path, params=params, auth=auth, stream=True, headers=headers)
if response.encoding is None:
response.encoding = 'utf-8'
return response
def _stream_sse(self, connection):
"""Handle (parse) a stream of Server Sent Events
:type connection: requests.Response
:param connection:
"""
try:
self._sse = SSEClient(connection)
for event in self._sse.events():
if not self._running:
break
self._dispatch(json.loads(event.data))
except AttributeError as exc:
# if not running then we caused the except by closing the
# underlying http connection. The SSEClient event looping
# does not allow for arbitrary closures between any read, since
# an event is only yielded when data is received.
if self._running:
raise exc
finally:
if self._sse:
self._sse.close()
def _stream_json(self, connection):
"""Handle (parse) a stream of newline delimited json objects.
:type connection: requests.Response
:param connection:
"""
for line in connection.iter_lines(decode_unicode=True):
if not self._running:
break
if not line:
continue
decoded = json.loads(line)
self._dispatch(decoded)
if StreamingConnectClient.stream_complete(line):
self._running = False
break
def _dispatch(self, data):
"""Dispatch the stream data using each registered dispatcher.
:param data:
"""
for dispatcher in self._dispatchers:
dispatcher.dispatch(data)
|
|
# Copyright 2015 Daniel Neve
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#System imports
import logging
#Library imports
import requests
#Google Imports
from google.appengine.ext.webapp import template
#Project imports
import common.base
import secrets.facebook
import defines
import storage
def NetGetRealTimeSubscriptions():
url = "https://graph.facebook.com/" + defines.PLATFORM_VERSION + "/" + str( secrets.facebook.CLIENT_ID ) + "/subscriptions"
payload = \
{
'access_token' : secrets.facebook.ACCESS_TOKEN
}
response = requests.get( url = url, params = payload )
return ( response.status_code, response.json() )
class Handler( common.base.Handler ):
def Main( self, params ):
logging.debug( "Facebook Admin Main()" )
# Get list of subscribed urls from facebook
subscriptions = NetGetRealTimeSubscriptions()[ 1 ]
# Construct the URL that will allow us to subscribe us with our current URL
subscribeUrl = self.app.router.build \
(
self.request,
"admin_branch",
(),
{
'handler' : "facebook",
'branch' : "subscribe",
'_full' : False,
'_scheme' : "https"
}
)
# Construct the URL that will allow us to subscribe us with our current URL
unsubscribeUrl = self.app.router.build \
(
self.request,
"admin_branch",
(),
{
'handler' : "facebook",
'branch' : "unsubscribe",
'_full' : False,
'_scheme' : "https"
}
)
for subscription in subscriptions[ 'data' ]:
subscription[ "unsubscribe" ] = { "url" : unsubscribeUrl }
path = "templates/admin/facebook.html"
values = \
{
"subscriptions" : subscriptions[ 'data' ],
"subscribe" :
{
"url" : subscribeUrl
}
}
if len( subscriptions[ 'data' ] ) == 1:
values[ "pageonly" ] = True
self.response.data = template.render( path, values )
def Subscribe( self, params ):
logging.debug( "Facebook Subscribe()" )
if 'object' not in self.request.POST:
self.response.status = requests.codes.bad_request
logging.error( "Missing object parameter" )
return
if 'field' not in self.request.POST:
self.response.status = requests.codes.bad_request
logging.error( "Missing field parameter" )
return
callbackUrl = self.app.router.build \
(
self.request,
"callback",
(),
{
'version' : 1,
'handler' : "facebook",
'_full' : True,
'_scheme' : "https"
}
)
logging.debug( "callback url: " + callbackUrl )
import random
import string
authToken = ''.join( random.SystemRandom().choice( string.ascii_uppercase + string.digits ) for _ in range( 10 ) )
tokenStorage = storage.CreateTemporaryAuthToken( defines.SUBSCRIBE_HANDSHAKE_TOKEN, authToken )
tokenStorage.put()
payload = \
{
'object' : self.request.POST[ 'object' ],
'fields' : self.request.POST[ 'field' ],
'callback_url' : callbackUrl,
'verify_token' : authToken,
'access_token' : secrets.facebook.ACCESS_TOKEN
}
url = "https://graph.facebook.com/" + defines.PLATFORM_VERSION + "/" + str( secrets.facebook.CLIENT_ID ) + "/subscriptions"
response = requests.post( url, params = payload )
self.ReturnToMain( response )
def Unsubscribe( self, params ):
logging.debug( "Facebook Unsubscribe()" )
if 'object' not in self.request.POST:
self.response.status = requests.codes.bad_request
logging.error( "Missing object parameter" )
return
payload = \
{
'object' : self.request.POST[ 'object' ],
'access_token' : secrets.facebook.ACCESS_TOKEN
}
if 'field' in self.request.POST:
payload[ 'fields' ] = self.request.POST[ 'field' ]
url = "https://graph.facebook.com/" + defines.PLATFORM_VERSION + "/" + str( secrets.facebook.CLIENT_ID ) + "/subscriptions"
response = requests.delete( url, params = payload )
logging.debug( "URL: " + response.url )
self.ReturnToMain( response )
def ReturnToMain( self, response ):
# Construct the URL to the facebook admin page
redirectUrl = self.app.router.build \
(
self.request,
"admin_handler",
(),
{
'handler' : "facebook",
'_full' : False,
'_scheme' : "https"
}
)
path = "templates/admin/facebook_feedback.html"
values = \
{
"redirect" : response.status_code == requests.codes.ok,
"redirectUrl" : redirectUrl,
"feedback" : response.text
}
self.response.data = template.render( path, values )
self.response.status = response.status_code
def List( self ):
logging.debug( "Facebook Admin List()" )
response = NetGetRealTimeSubscriptions()
self.response.status = response[ 0 ]
self.response.data = str( response[ 1 ] )
|
|
"""
Qt techs management dialog
"""
import logging
from PyQt5.QtWidgets import QDialog
from PyQt5.QtWidgets import QListWidgetItem
from PyQt5.QtGui import QPixmap
from PyQt5.QtGui import QBrush
from PyQt5.QtGui import QColor
from PIL.ImageQt import ImageQt
import qt_techs
import saves
def new_tech_slot(tech_asset):
module = {
"module": tech_asset,
"scriptData": {}
}
return module
def make_tech_list(tech_names):
techs = []
for i in tech_names:
techs.append(saves.new_item_data(i))
return techs
class Techs(object):
def __init__(self, main_window):
self.dialog = QDialog(main_window.window)
self.ui = qt_techs.Ui_Dialog()
self.ui.setupUi(self.dialog)
self.main_window = main_window
self.assets = main_window.assets
self.player = main_window.player
self.selected_tech = None
self.ui.tech_list.currentItemChanged.connect(lambda: self.set_select(self.ui.tech_list))
self.ui.tech_list.itemDoubleClicked.connect(self.add_tech)
self.ui.known_list.currentItemChanged.connect(lambda: self.set_select(self.ui.known_list))
self.ui.known_list.itemDoubleClicked.connect(self.remove_tech)
self.techs = [None, None, None, None]
self.equip = [None, None, None, None]
self.populate_equipped()
self.ui.toggle_button.clicked.connect(self.toggle_tech)
self.ui.add_button.clicked.connect(self.add_tech)
self.ui.remove_button.clicked.connect(self.remove_tech)
self.ui.unlock_button.clicked.connect(self.learn_all_techs)
self.ui.icon1_clear.clicked.connect(lambda: self.clear_tech(0))
self.ui.icon2_clear.clicked.connect(lambda: self.clear_tech(1))
self.ui.icon3_clear.clicked.connect(lambda: self.clear_tech(2))
self.ui.icon4_clear.clicked.connect(lambda: self.clear_tech(3))
self.ui.icon1_button.clicked.connect(lambda: self.set_tech(0))
self.ui.icon2_button.clicked.connect(lambda: self.set_tech(1))
self.ui.icon3_button.clicked.connect(lambda: self.set_tech(2))
self.ui.icon4_button.clicked.connect(lambda: self.set_tech(3))
self.update_lists()
self.ui.tech_list.setFocus()
if self.ui.tech_list.count() > 0:
self.ui.tech_list.setCurrentRow(0)
else:
self.ui.known_list.setCurrentRow(0)
def populate_equipped(self):
current = 1
for i in self.player.get_equipped_techs():
if i is None:
continue
try:
name = i["content"]["name"]
tech = self.assets.techs().get_tech(name)
icon = QPixmap.fromImage(ImageQt(tech[1]))
getattr(self.ui, "icon"+str(current)).setPixmap(icon.scaled(32, 32))
getattr(self.ui, "icon"+str(current)).setToolTip(tech[0]["shortdescription"])
self.techs[current-1] = new_tech_slot(tech[0]["techModule"])
self.equip[current-1] = tech[0]["itemName"]
except TypeError:
logging.exception("Couldn't load tech")
current += 1
def update_lists(self):
visible_techs = [x["name"] for x in self.player.get_visible_techs()]
self.ui.tech_list.clear()
for tech in sorted(self.assets.techs().all()):
if tech not in visible_techs:
item = QListWidgetItem(tech)
self.ui.tech_list.addItem(item)
enabled = [x["name"] for x in self.player.get_enabled_techs()]
self.ui.known_list.clear()
for tech in sorted(visible_techs):
item = QListWidgetItem(tech)
if tech in enabled:
item.setBackground(QBrush(QColor("lightBlue")))
self.ui.known_list.addItem(item)
def update_selection(self):
tech = self.assets.techs().get_tech(self.selected_tech)
# this is only used if player has corrupt/missing modded techs known
if tech is None:
self.ui.tech_info.setText("Unknown Tech")
self.ui.current_icon.setPixmap(QPixmap())
self.ui.add_button.setEnabled(False)
self.ui.remove_button.setEnabled(True)
return
visible = [x["name"] for x in self.player.get_visible_techs()]
tech_info = "<strong>%s (%s)</strong><br><br>" % (tech[0]["shortdescription"],
tech[0]["itemName"])
tech_info += "<strong>Type:</strong> %s<br>" % tech[3]["type"]
tech_info += "<strong>Rarity:</strong> %s<br>" % tech[0]["rarity"]
tech_info += "<strong>Module:</strong> %s<br><br>" % tech[0]["techModule"]
tech_info += tech[0]["description"]+"<br>"
self.ui.tech_info.setText(tech_info)
self.ui.current_icon.setPixmap(QPixmap.fromImage(ImageQt(tech[1])).scaled(32, 32))
slots = ["head", "body", "legs", "suit"]
index = 1
for slot in slots:
set_button = getattr(self.ui, "icon" + str(index) + "_button")
clear_button = getattr(self.ui, "icon" + str(index) + "_clear")
can_set = tech[3]["type"] == slot
is_set = self.equip[index-1] is not None
set_button.setEnabled(can_set)
clear_button.setEnabled(is_set)
index += 1
can_add = self.selected_tech not in visible
self.ui.add_button.setEnabled(can_add)
self.ui.remove_button.setEnabled(not can_add)
self.ui.toggle_button.setEnabled(not can_add)
def set_select(self, source):
selected = source.currentItem()
if selected is not None:
self.selected_tech = selected.text()
else:
return
self.update_selection()
def toggle_tech(self):
enabled = [x["name"] for x in self.player.get_enabled_techs()]
item = self.selected_tech
if item in enabled:
new_techs = [x for x in enabled if x != item]
self.player.set_enabled_techs(make_tech_list(new_techs))
else:
enabled.append(item)
self.player.set_enabled_techs(make_tech_list(enabled))
self.update_lists()
self.update_selection()
def add_tech(self):
item = self.selected_tech
visible = [x["name"] for x in self.player.get_visible_techs()]
visible.append(item)
self.player.set_visible_techs(make_tech_list(visible))
self.update_lists()
self.update_selection()
def remove_tech(self):
if self.selected_tech is None:
return
item = self.selected_tech
visible = [x["name"] for x in self.player.get_visible_techs()]
enabled = [x["name"] for x in self.player.get_enabled_techs()]
self.player.set_visible_techs(make_tech_list([x for x in visible if x != item]))
self.player.set_enabled_techs(make_tech_list([x for x in enabled if x != item]))
self.update_lists()
self.update_selection()
def learn_all_techs(self):
all_techs = self.assets.techs().all()
self.player.set_visible_techs(make_tech_list(all_techs))
self.player.set_enabled_techs(make_tech_list(all_techs))
self.update_lists()
self.update_selection()
def set_tech(self, index):
if self.selected_tech is None:
return
tech_name = self.selected_tech
tech = self.assets.techs().get_tech(tech_name)
if tech is None:
return
icon = QPixmap.fromImage(ImageQt(tech[1]))
getattr(self.ui, "icon"+str(index+1)).setPixmap(icon.scaled(32, 32))
getattr(self.ui, "icon"+str(index+1)).setToolTip(tech[0]["shortdescription"])
self.techs[index] = new_tech_slot(tech[0]["techModule"])
self.equip[index] = tech[0]["itemName"]
self.update_selection()
def clear_tech(self, index):
self.techs[index] = None
self.equip[index] = None
getattr(self.ui, "icon"+str(index+1)).setPixmap(QPixmap())
getattr(self.ui, "icon"+str(index+1)).setToolTip(None)
self.update_selection()
def write_techs(self):
techs = []
equip = [None, None, None, None]
# tech list can't have empty spaces in it
for i in self.techs:
if i is not None:
techs.append(i)
index = 0
for i in self.equip:
equip[index] = i
index += 1
self.player.set_tech_modules(techs, equip)
self.main_window.window.setWindowModified(True)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
"""Sparse NDArray API of MXNet."""
from __future__ import absolute_import
from __future__ import division
try:
from __builtin__ import slice as py_slice
except ImportError:
from builtins import slice as py_slice
import ctypes
import warnings
import os as _os
import sys as _sys
__all__ = ["_ndarray_cls", "csr_matrix", "row_sparse_array",
"BaseSparseNDArray", "CSRNDArray", "RowSparseNDArray"]
# import operator
import numpy as np
from ..base import NotSupportedForSparseNDArray
from ..base import _LIB, numeric_types
from ..base import c_array, mx_real_t
from ..base import mx_uint, NDArrayHandle, check_call
from ..context import Context
from . import _internal
from .ndarray import _DTYPE_NP_TO_MX, _DTYPE_MX_TO_NP
from .ndarray import _STORAGE_TYPE_STR_TO_ID
from .ndarray import _STORAGE_TYPE_UNDEFINED, _STORAGE_TYPE_DEFAULT
from .ndarray import _STORAGE_TYPE_ROW_SPARSE, _STORAGE_TYPE_CSR
from .ndarray import NDArray, _storage_type
from .ndarray import zeros as _zeros_ndarray
from .ndarray import array as _array
from . import op
# When possible, use cython to speedup part of computation.
# pylint: disable=unused-import, too-many-lines
try:
if int(_os.environ.get("MXNET_ENABLE_CYTHON", True)) == 0:
from .._ctypes.ndarray import _set_ndarray_class
elif _sys.version_info >= (3, 0):
from .._cy3.ndarray import _set_ndarray_class
else:
from .._cy2.ndarray import _set_ndarray_class
except ImportError:
if int(_os.environ.get("MXNET_ENFORCE_CYTHON", False)) != 0:
raise ImportError("Cython Module cannot be loaded but MXNET_ENFORCE_CYTHON=1")
from .._ctypes.ndarray import _set_ndarray_class
# pylint: enable=unused-import
try:
import scipy.sparse as spsp
except ImportError:
spsp = None
_STORAGE_AUX_TYPES = {
'row_sparse': [np.int64],
'csr': [np.int64, np.int64]
}
def _new_alloc_handle(stype, shape, ctx, delay_alloc, dtype, aux_types, aux_shapes=None):
"""Return a new handle with specified storage type, shape, dtype and context.
Empty handle is only used to hold results
Returns
-------
handle
A new empty ndarray handle
"""
hdl = NDArrayHandle()
aux_type_ids = [int(_DTYPE_NP_TO_MX[np.dtype(aux_t).type]) for aux_t in aux_types]
aux_shapes = [(0,) for aux_t in aux_types] if aux_shapes is None else aux_shapes
aux_shape_lens = [len(aux_shape) for aux_shape in aux_shapes]
aux_shapes = sum(aux_shapes, ())
num_aux = mx_uint(len(aux_types))
check_call(_LIB.MXNDArrayCreateSparseEx(
ctypes.c_int(int(_STORAGE_TYPE_STR_TO_ID[stype])),
c_array(mx_uint, shape),
mx_uint(len(shape)),
ctypes.c_int(ctx.device_typeid),
ctypes.c_int(ctx.device_id),
ctypes.c_int(int(delay_alloc)),
ctypes.c_int(int(_DTYPE_NP_TO_MX[np.dtype(dtype).type])),
num_aux,
c_array(ctypes.c_int, aux_type_ids),
c_array(mx_uint, aux_shape_lens),
c_array(mx_uint, aux_shapes),
ctypes.byref(hdl)))
return hdl
class BaseSparseNDArray(NDArray):
"""The base class of an NDArray stored in a sparse storage format.
See CSRNDArray and RowSparseNDArray for more details.
"""
def __repr__(self):
"""Returns a string representation of the sparse array."""
shape_info = 'x'.join(['%d' % x for x in self.shape])
# The data content is not displayed since the array usually has big shape
return '\n<%s %s @%s>' % (self.__class__.__name__,
shape_info, self.context)
def __iadd__(self, other):
raise NotImplementedError()
def __isub__(self, other):
raise NotImplementedError()
def __imul__(self, other):
raise NotImplementedError()
def __idiv__(self, other):
raise NotImplementedError()
def __itruediv__(self, other):
raise NotImplementedError()
def _sync_copyfrom(self, source_array):
raise NotImplementedError()
def _at(self, idx):
raise NotSupportedForSparseNDArray(self._at, '[idx]', idx)
def _slice(self, start, stop):
raise NotSupportedForSparseNDArray(self._slice, None, start, stop)
def reshape(self, shape):
raise NotSupportedForSparseNDArray(self.reshape, None, shape)
def _aux_type(self, i):
"""Data-type of the array's ith aux data.
Returns
-------
numpy.dtype
This BaseSparseNDArray's aux data type.
"""
aux_type = ctypes.c_int()
check_call(_LIB.MXNDArrayGetAuxType(self.handle, i, ctypes.byref(aux_type)))
return _DTYPE_MX_TO_NP[aux_type.value]
@property
def _num_aux(self):
"""The number of aux data used to help store the sparse ndarray.
"""
return len(_STORAGE_AUX_TYPES[self.stype])
@property
def _aux_types(self):
"""The data types of the aux data for the BaseSparseNDArray.
"""
aux_types = []
num_aux = self._num_aux
for i in range(num_aux):
aux_types.append(self._aux_type(i))
return aux_types
def asnumpy(self):
"""Return a dense ``numpy.ndarray`` object with value copied from this array
"""
return self.tostype('default').asnumpy()
def astype(self, dtype):
"""Returns a copy of the array after casting to a specified type.
Parameters
----------
dtype : numpy.dtype or str
The type of the returned array.
Examples
--------
>>> x = mx.nd.sparse.zeros('row_sparse', (2,3), dtype='float32')
>>> y = x.astype('int32')
>>> y.dtype
<type 'numpy.int32'>
"""
res = zeros(shape=self.shape, ctx=self.context,
dtype=dtype, stype=self.stype)
self.copyto(res)
return res
def copyto(self, other):
"""Copies the value of this array to another array.
Parameters
----------
other : NDArray or CSRNDArray or RowSparseNDArray or Context
The destination array or context.
Returns
-------
NDArray or CSRNDArray or RowSparseNDArray
The copied array.
"""
if isinstance(other, NDArray):
if other.handle is self.handle:
warnings.warn('You are attempting to copy an array to itself', RuntimeWarning)
return
return _internal._copyto(self, out=other)
elif isinstance(other, Context):
hret = _ndarray_cls(_new_alloc_handle(self.stype, self.shape, other,
True, self.dtype, self._aux_types))
return _internal._copyto(self, out=hret)
else:
raise TypeError('copyto does not support type ' + str(type(other)))
def _data(self):
"""A deep copy NDArray of the data array associated with the BaseSparseNDArray.
This function blocks. Do not use it in performance critical code.
"""
self.wait_to_read()
hdl = NDArrayHandle()
check_call(_LIB.MXNDArrayGetDataNDArray(self.handle, ctypes.byref(hdl)))
return NDArray(hdl)
def _aux_data(self, i):
""" Get a deep copy NDArray of the i-th aux data array associated with the
BaseSparseNDArray.
This function blocks. Do not use it in performance critical code.
"""
self.wait_to_read()
hdl = NDArrayHandle()
check_call(_LIB.MXNDArrayGetAuxNDArray(self.handle, i, ctypes.byref(hdl)))
return NDArray(hdl)
# pylint: disable=abstract-method
class CSRNDArray(BaseSparseNDArray):
"""A sparse representation of 2D NDArray in the standard CSR format.
A CSRNDArray represents an NDArray as three separate arrays: `data`,
`indptr` and `indices`. It uses the standard CSR representation where the column indices for
row i are stored in indices[indptr[i]:indptr[i+1]] and their corresponding values are stored
in values[indptr[i]:indptr[i+1]].
The column indices for a given row are expected to be sorted in ascending order.
Duplicate column entries for the same row are not allowed.
Example
-------
>>> a = mx.nd.array([[0, 1, 0], [2, 0, 0], [0, 0, 0], [0, 0, 3]])
>>> a = a.tostype('csr')
>>> a.indices.asnumpy()
array([1, 0, 2])
>>> a.indptr.asnumpy()
array([0, 1, 2, 2, 3])
>>> a.data.asnumpy()
array([ 1., 2., 3.], dtype=float32)
"""
def __reduce__(self):
return CSRNDArray, (None,), super(CSRNDArray, self).__getstate__()
def __iadd__(self, other):
(self + other).copyto(self)
return self
def __isub__(self, other):
(self - other).copyto(self)
return self
def __imul__(self, other):
(self * other).copyto(self)
return self
def __idiv__(self, other):
(self / other).copyto(self)
return self
def __itruediv__(self, other):
(self / other).copyto(self)
return self
def __getitem__(self, key):
"""x.__getitem__(i) <=> x[i]
Returns a sliced view of this array.
Parameters
----------
key : slice
Indexing key.
Examples
--------
>>> indptr = np.array([0, 2, 3, 6])
>>> indices = np.array([0, 2, 2, 0, 1, 2])
>>> data = np.array([1, 2, 3, 4, 5, 6])
>>> a = mx.nd.sparse.csr_matrix(data, indptr, indices, (3, 3))
>>> a.asnumpy()
array([[1, 0, 2],
[0, 0, 3],
[4, 5, 6]])
>>> a[1:2].asnumpy()
array([[0, 0, 3]], dtype=float32)
"""
if isinstance(key, int):
raise ValueError("__getitem__ with int key is not implemented for CSRNDArray")
if isinstance(key, py_slice):
if key.step is not None:
raise ValueError('CSRNDArray only supports continuous slicing on axis 0')
if key.start is not None or key.stop is not None:
begin = key.start if key.start else 0
end = key.stop if key.stop else self.shape[0]
return op.slice(self, begin=begin, end=end)
else:
return self
if isinstance(key, tuple):
raise ValueError('Multi-dimension indexing is not supported')
def __setitem__(self, key, value):
"""x.__setitem__(i, y) <=> x[i]=y
Set self[key] to value. Only slice key [:] is supported.
Parameters
----------
key : slice
The indexing key.
value : NDArray or CSRNDArray or numpy.ndarray
The value to set.
Examples
--------
>>> src = mx.nd.sparse.zeros('csr', (3,3))
>>> src.asnumpy()
array([[ 0., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
>>> # assign CSRNDArray with same storage type
>>> x = mx.nd.ones('row_sparse', (3,3)).tostype('csr')
>>> x[:] = src
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> # assign NDArray to CSRNDArray
>>> x[:] = mx.nd.ones((3,3)) * 2
>>> x.asnumpy()
array([[ 2., 2., 2.],
[ 2., 2., 2.],
[ 2., 2., 2.]], dtype=float32)
"""
if not self.writable:
raise ValueError('Failed to assign to a readonly CSRNDArray')
if isinstance(key, py_slice):
if key.step is not None or key.start is not None or key.stop is not None:
raise ValueError('Assignment with slice for CSRNDArray is not ' \
'implmented yet.')
if isinstance(value, NDArray):
# avoid copying to itself
if value.handle is not self.handle:
value.copyto(self)
elif isinstance(value, numeric_types):
raise ValueError("Assigning numeric types to CSRNDArray is " \
"not implemented yet.")
elif isinstance(value, (np.ndarray, np.generic)):
# TODO(haibin/anisub) check scipy.sparse and use _sync_copy_from to
# avoid the temporary copy
warnings.warn('Assigning non-NDArray object to CSRNDArray is not efficient',
RuntimeWarning)
tmp = _array(value)
tmp.copyto(self)
else:
raise TypeError('type %s not supported' % str(type(value)))
else:
assert(isinstance(key, (int, tuple)))
raise Exception('CSRNDArray only supports [:] for assignment')
@property
def indices(self):
"""A deep copy NDArray of the indices array of the CSRNDArray.
This generates a deep copy of the column indices of the current `csr` matrix.
Returns
-------
NDArray
This CSRNDArray's indices array.
"""
return self._aux_data(1)
@property
def indptr(self):
"""A deep copy NDArray of the indptr array of the CSRNDArray.
This generates a deep copy of the `indptr` of the current `csr` matrix.
Returns
-------
NDArray
This CSRNDArray's indptr array.
"""
return self._aux_data(0)
@property
def data(self):
"""A deep copy NDArray of the data array of the CSRNDArray.
This generates a deep copy of the `data` of the current `csr` matrix.
Returns
-------
NDArray
This CSRNDArray's data array.
"""
return self._data()
@indices.setter
def indices(self, indices):
raise NotImplementedError()
@indptr.setter
def indptr(self, indptr):
raise NotImplementedError()
@data.setter
def data(self, data):
raise NotImplementedError()
def tostype(self, stype):
"""Return a copy of the array with chosen storage type.
Returns
-------
NDArray or CSRNDArray
A copy of the array with the chosen storage stype
"""
if stype == 'row_sparse':
raise ValueError("cast_storage from csr to row_sparse is not supported")
return op.cast_storage(self, stype=stype)
def copyto(self, other):
"""Copies the value of this array to another array.
If ``other`` is a ``NDArray`` or ``CSRNDArray`` object, then ``other.shape`` and
``self.shape`` should be the same. This function copies the value from
``self`` to ``other``.
If ``other`` is a context, a new ``CSRNDArray`` will be first created on
the target context, and the value of ``self`` is copied.
Parameters
----------
other : NDArray or CSRNDArray or Context
The destination array or context.
Returns
-------
NDArray or CSRNDArray
The copied array. If ``other`` is an ``NDArray`` or ``CSRNDArray``, then the return
value and ``other`` will point to the same ``NDArray`` or ``CSRNDArray``.
"""
if isinstance(other, Context):
return super(CSRNDArray, self).copyto(other)
elif isinstance(other, NDArray):
stype = other.stype
if stype == 'default' or stype == 'csr':
return super(CSRNDArray, self).copyto(other)
else:
raise TypeError('copyto does not support destination NDArray stype ' + str(stype))
else:
raise TypeError('copyto does not support type ' + str(type(other)))
# pylint: disable=abstract-method
class RowSparseNDArray(BaseSparseNDArray):
"""A sparse representation of a set of NDArray row slices at given indices.
A RowSparseNDArray represents a multidimensional NDArray using two separate arrays: `data` and
`indices`.
- data: an NDArray of any dtype with shape [D0, D1, ..., Dn].
- indices: a 1-D int64 NDArray with shape [D0].
The `indices` stores the indices of the row slices with non-zeros,
while the values are stored in `data`. The corresponding NDArray ``dense``
represented by RowSparseNDArray ``rsp`` has
``dense[rsp.indices[i], :, :, :, ...] = rsp.data[i, :, :, :, ...]``
>>> dense.asnumpy()
array([[ 1., 2., 3.],
[ 0., 0., 0.],
[ 4., 0., 5.],
[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
>>> rsp = dense.tostype('row_sparse')
>>> rsp.indices.asnumpy()
array([0, 2], dtype=int64)
>>> rsp.data.asnumpy()
array([[ 1., 2., 3.],
[ 4., 0., 5.]], dtype=float32)
A RowSparseNDArray is typically used to represent non-zero row-slices of a large NDArray
of shape [LARGE0, D1, .. , Dn] where LARGE0 >> D0 and most row slices are zeros.
The indices are expected to be sorted in ascending order.
RowSparseNDArray is used principally in the definition of gradients for operations
that have sparse gradients (e.g. sparse dot and sparse embedding).
"""
def __reduce__(self):
return RowSparseNDArray, (None,), super(RowSparseNDArray, self).__getstate__()
def __iadd__(self, other):
(self + other).copyto(self)
return self
def __isub__(self, other):
(self - other).copyto(self)
return self
def __imul__(self, other):
(self * other).copyto(self)
return self
def __idiv__(self, other):
(self / other).copyto(self)
return self
def __itruediv__(self, other):
(self / other).copyto(self)
return self
def __getitem__(self, key):
"""x.__getitem__(i) <=> x[i]
Returns a sliced view of this array.
Parameters
----------
key : slice
Indexing key.
Examples
--------
>>> x = mx.nd.sparse.zeros('row_sparse', (2, 3))
>>> x[:].asnumpy()
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
"""
if isinstance(key, int):
raise Exception("__getitem__ with int key is not implemented for RowSparseNDArray yet")
if isinstance(key, py_slice):
if key.step is not None or key.start is not None or key.stop is not None:
raise Exception('RowSparseNDArray only supports [:] for __getitem__')
else:
return self
if isinstance(key, tuple):
raise ValueError('Multi-dimension indexing is not supported')
def __setitem__(self, key, value):
"""x.__setitem__(i, y) <=> x[i]=y
Set self[key] to value. Only slice key [:] is supported.
Parameters
----------
key : slice
The indexing key.
value : NDArray or numpy.ndarray
The value to set.
Examples
--------
>>> src = mx.nd.row_sparse([[1, 0, 2], [4, 5, 6]], [0, 2], (3,3))
>>> src.asnumpy()
array([[ 1., 0., 2.],
[ 0., 0., 0.],
[ 4., 5., 6.]], dtype=float32)
>>> # assign RowSparseNDArray with same storage type
>>> x = mx.nd.sparse.zeros('row_sparse', (3,3))
>>> x[:] = src
>>> x.asnumpy()
array([[ 1., 0., 2.],
[ 0., 0., 0.],
[ 4., 5., 6.]], dtype=float32)
>>> # assign NDArray to RowSparseNDArray
>>> x[:] = mx.nd.ones((3,3))
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
"""
if not self.writable:
raise ValueError('Failed to assign to a readonly RowSparseNDArray')
if isinstance(key, py_slice):
if key.step is not None or key.start is not None or key.stop is not None:
raise ValueError('Assignment with slice for RowSparseNDArray ' \
'is not implmented yet.')
if isinstance(value, NDArray):
# avoid copying to itself
if value.handle is not self.handle:
value.copyto(self)
elif isinstance(value, numeric_types):
raise ValueError("Assigning numeric types to RowSparseNDArray " \
"is not implemented yet.")
elif isinstance(value, (np.ndarray, np.generic)):
warnings.warn('Assigning non-NDArray object to RowSparseNDArray is not efficient',
RuntimeWarning)
tmp = _array(value)
tmp.copyto(self)
else:
raise TypeError('type %s not supported' % str(type(value)))
else:
assert(isinstance(key, (int, tuple)))
raise TypeError('RowSparseNDArray only supports [:] for assignment')
@property
def indices(self):
"""A deep copy NDArray of the indices array of the RowSparseNDArray.
This generates a deep copy of the row indices of the current `row_sparse` matrix.
Returns
-------
NDArray
This RowSparseNDArray's indices array.
"""
return self._aux_data(0)
@property
def data(self):
"""A deep copy NDArray of the data array of the RowSparseNDArray.
This generates a deep copy of the `data` of the current `row_sparse` matrix.
Returns
-------
NDArray
This RowSparseNDArray's data array.
"""
return self._data()
@indices.setter
def indices(self, indices):
raise NotImplementedError()
@data.setter
def data(self, data):
raise NotImplementedError()
def tostype(self, stype):
"""Return a copy of the array with chosen storage type.
Returns
-------
NDArray or RowSparseNDArray
A copy of the array with the chosen storage stype
"""
if stype == 'csr':
raise ValueError("cast_storage from row_sparse to csr is not supported")
return op.cast_storage(self, stype=stype)
def copyto(self, other):
"""Copies the value of this array to another array.
If ``other`` is a ``NDArray`` or ``RowSparseNDArray`` object, then ``other.shape``
and ``self.shape`` should be the same. This function copies the value from
``self`` to ``other``.
If ``other`` is a context, a new ``RowSparseNDArray`` will be first created on
the target context, and the value of ``self`` is copied.
Parameters
----------
other : NDArray or RowSparseNDArray or Context
The destination array or context.
Returns
-------
NDArray or RowSparseNDArray
The copied array. If ``other`` is an ``NDArray`` or ``RowSparseNDArray``, then the
return value and ``other`` will point to the same ``NDArray`` or ``RowSparseNDArray``.
"""
if isinstance(other, Context):
return super(RowSparseNDArray, self).copyto(other)
elif isinstance(other, NDArray):
stype = other.stype
if stype == 'default' or stype == 'row_sparse':
return super(RowSparseNDArray, self).copyto(other)
else:
raise TypeError('copyto does not support destination NDArray stype ' + str(stype))
else:
raise TypeError('copyto does not support type ' + str(type(other)))
def _prepare_src_array(src, dtype, default_dtype):
"""Prepare `src` and its dtype so that they can be used to construct NDArray.
`src` is converted to a `np.ndarray` if it's neither an `NDArray` nor an `np.ndarray`.
"""
if isinstance(src, NDArray):
dtype = src.dtype if dtype is None else dtype
else:
dtype = default_dtype if dtype is None else dtype
if not isinstance(src, np.ndarray):
try:
src = np.array(src, dtype=dtype)
except:
raise TypeError('values must be array like object')
return src, dtype
def csr_matrix(data, indptr, indices, shape, ctx=None, dtype=None, indptr_type=None,
indices_type=None):
"""Creates a 2D array with compressed sparse row (CSR) format.
Parameters
----------
data: array_like
An object exposing the array interface, with shape [nnz], where D0 is the number of
non-zero entries.
indptr: array_like
An object exposing the array interface, with shape [D0 + 1]. The first element in indptr
should always be zero.
indices: array_like
An object exposing the array interface, with shape [nnz].
ctx: Context, optional
Device context (default is the current default context).
dtype: str or numpy.dtype, optional
The data type of the output array. The default dtype is ``values.dtype``
if `values` is an `NDArray`, `float32` otherwise.
indptr_type: str or numpy.dtype, optional
The data type of the indices array. The default dtype is ``indptr.dtype``
if `indptr` is an `NDArray`, `int64` otherwise.
indices_type: str or numpy.dtype, optional
The data type of the indices array. The default dtype is ``indices.dtype``
if `indicies` is an `NDArray`, `int64` otherwise.
Returns
-------
CSRNDArray
A `CSRNDArray` with the `csr` storage representation.
Example
-------
>>> import mxnet as mx
>>> a = mx.nd.sparse.csr_matrix([1, 2, 3], [0, 1, 2, 2, 3], [1, 0, 2], (4, 3))
>>> a.asnumpy()
array([[ 0., 1., 0.],
[ 2., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 3.]], dtype=float32)
"""
storage_type = 'csr'
# context
if ctx is None:
ctx = Context.default_ctx
# prepare src array and types
data, dtype = _prepare_src_array(data, dtype, mx_real_t)
indptr, indptr_type = _prepare_src_array(indptr, indptr_type,
_STORAGE_AUX_TYPES[storage_type][0])
indices, indices_type = _prepare_src_array(indices, indices_type,
_STORAGE_AUX_TYPES[storage_type][1])
# verify types
assert('int64' in str(indptr_type)), "expected int64 for indptr"
assert('int64' in str(indices_type)), "expected int64 for indices"
# verify shapes
aux_shapes = [indptr.shape, indices.shape]
assert(data.ndim == 1)
assert(indptr.ndim == 1)
assert(indices.ndim == 1)
assert(len(shape) == 2)
result = CSRNDArray(_new_alloc_handle(storage_type, shape, ctx, False, dtype,
[indptr_type, indices_type], aux_shapes))
# TODO(junwu): Convert data, indptr, and indices to mxnet NDArrays
# if they are not for now. In the future, we should provide a c-api
# to accept np.ndarray types to copy from to result.data and aux_data
if not isinstance(data, NDArray):
data = _array(data, ctx, dtype)
if not isinstance(indptr, NDArray):
indptr = _array(indptr, ctx, indptr_type)
if not isinstance(indices, NDArray):
indices = _array(indices, ctx, indices_type)
check_call(_LIB.MXNDArraySyncCopyFromNDArray(result.handle, data.handle, ctypes.c_int(-1)))
check_call(_LIB.MXNDArraySyncCopyFromNDArray(result.handle, indptr.handle, ctypes.c_int(0)))
check_call(_LIB.MXNDArraySyncCopyFromNDArray(result.handle, indices.handle, ctypes.c_int(1)))
return result
def row_sparse_array(data, indices, shape, ctx=None, dtype=None, indices_type=None):
"""Creates a multidimensional row sparse array with a set of tensor slices at given indices.
Parameters
----------
data: array_like
An object exposing the array interface, with shape [D0, D1, .. DK], where D0 is
the number of rows with non-zeros entries.
indices: array_like
An object exposing the array interface, with shape [D0].
ctx : Context, optional
Device context (default is the current default context).
dtype : str or numpy.dtype, optional
The data type of the output array. The default dtype is ``data.dtype``
if `data` is an `NDArray`, `float32` otherwise.
indices_type: str or numpy.dtype, optional
The data type of the indices array. The default dtype is ``indices.dtype``
if `indicies` is an `NDArray`, `int64` otherwise.
Returns
-------
RowSparseNDArray
An `RowSparseNDArray` with the `row_sparse` storage representation.
Example
-------
>>> a = mx.nd.sparse.row_sparse_array([[1, 2], [3, 4]], [1, 4], (6, 2))
>>> a.asnumpy()
array([[ 0., 0.],
[ 1., 2.],
[ 0., 0.],
[ 0., 0.],
[ 3., 4.],
[ 0., 0.]], dtype=float32)
"""
storage_type = 'row_sparse'
# context
if ctx is None:
ctx = Context.default_ctx
# prepare src array and types
data, dtype = _prepare_src_array(data, dtype, mx_real_t)
indices, indices_type = _prepare_src_array(indices, indices_type,
_STORAGE_AUX_TYPES[storage_type][0])
# verify types
assert('int64' in str(indices_type)), "expected int64 for indices"
# verify shapes
assert(data.ndim == len(shape))
assert(indices.ndim == 1)
result = RowSparseNDArray(_new_alloc_handle(storage_type, shape, ctx, False, dtype,
[indices_type], [indices.shape]))
# TODO(junwu): Convert data, indptr, and indices to mxnet NDArrays
# if they are not for now. In the future, we should provide a c-api
# to accept np.ndarray types to copy from to result.data and aux_data
if not isinstance(data, NDArray):
data = _array(data, ctx, dtype)
if not isinstance(indices, NDArray):
indices = _array(indices, ctx, indices_type)
check_call(_LIB.MXNDArraySyncCopyFromNDArray(result.handle, data.handle, ctypes.c_int(-1)))
check_call(_LIB.MXNDArraySyncCopyFromNDArray(result.handle, indices.handle, ctypes.c_int(0)))
return result
def _ndarray_cls(handle, writable=True, stype=_STORAGE_TYPE_UNDEFINED):
if stype == _STORAGE_TYPE_UNDEFINED:
stype = _storage_type(handle)
if stype == _STORAGE_TYPE_DEFAULT:
return NDArray(handle, writable=writable)
elif stype == _STORAGE_TYPE_CSR:
return CSRNDArray(handle, writable=writable)
elif stype == _STORAGE_TYPE_ROW_SPARSE:
return RowSparseNDArray(handle, writable=writable)
else:
raise Exception("unknown storage type")
_set_ndarray_class(_ndarray_cls)
def zeros(stype, shape, ctx=None, dtype=None, aux_types=None, **kwargs):
"""Return a new array of given shape and type, filled with zeros.
Parameters
----------
stype: string
The storage type of the empty array, such as 'row_sparse', 'csr', etc
shape : int or tuple of int
The shape of the empty array
ctx : Context, optional
An optional device context (default is the current default context)
dtype : str or numpy.dtype, optional
An optional value type (default is `float32`)
aux_types: list of numpy.dtype, optional
An optional list of types of the aux data for RowSparseNDArray or CSRNDArray
(default values depends on the storage type)
Returns
-------
RowSparseNDArray or CSRNDArray
A created array
Examples
--------
>>> mx.nd.sparse.zeros('csr', (1,2))
<CSRNDArray 1x2 @cpu(0)>
>>> mx.nd.sparse.zeros('row_sparse', (1,2), ctx=mx.cpu(), dtype='float16').asnumpy()
array([[ 0., 0.]], dtype=float16)
"""
if stype == 'default':
return _zeros_ndarray(shape, ctx=ctx, dtype=dtype, **kwargs)
if ctx is None:
ctx = Context.default_ctx
dtype = mx_real_t if dtype is None else dtype
if aux_types is None:
if stype == 'row_sparse' or stype == 'csr':
aux_types = _STORAGE_AUX_TYPES[stype]
else:
raise Exception("unknown storage type")
assert(len(aux_types) == len(_STORAGE_AUX_TYPES[stype]))
out = _ndarray_cls(_new_alloc_handle(stype, shape, ctx, True, dtype, aux_types))
return _internal._zeros(shape=shape, ctx=ctx, dtype=dtype, out=out, **kwargs)
def empty(stype, shape, ctx=None, dtype=None, aux_types=None):
"""Returns a new array of given shape and type, without initializing entries.
Parameters
----------
stype: string
The storage type of the empty array, such as 'row_sparse', 'csr', etc
shape : int or tuple of int
The shape of the empty array.
ctx : Context, optional
An optional device context (default is the current default context).
dtype : str or numpy.dtype, optional
An optional value type (default is `float32`).
Returns
-------
CSRNDArray or RowSparseNDArray
A created array.
"""
if isinstance(shape, int):
shape = (shape, )
if ctx is None:
ctx = Context.default_ctx
if dtype is None:
dtype = mx_real_t
assert(stype is not None)
if stype == 'csr' or stype == 'row_sparse':
return zeros(stype, shape, ctx=ctx, dtype=dtype, aux_types=aux_types)
else:
raise Exception("unknown stype : " + str(stype))
def array(source_array, ctx=None, dtype=None, aux_types=None):
"""Creates a sparse array from any object exposing the array interface.
Parameters
----------
source_array : RowSparseNDArray, CSRNDArray or scipy.sparse.csr.csr_matrix
The source sparse array
ctx : Context, optional
Device context (default is the current default context).
dtype : str or numpy.dtype, optional
The data type of the output array. The default dtype is ``source_array.dtype``
if `source_array` is an `NDArray`, `float32` otherwise.
aux_types: list of numpy.dtype, optional
An optional list of types of the aux data for RowSparseNDArray or CSRNDArray.
The default value for CSRNDArray is [`int64`, `int64`] for `indptr` and `indices`.
The default value for RowSparseNDArray is [`int64`] for `indices`.
Returns
-------
RowSparseNDArray or CSRNDArray
An array with the same contents as the `source_array`.
Examples
--------
>>> import scipy.sparse as sp
>>> csr = sp.csr_matrix((2, 100))
>>> mx.nd.sparse.array(csr)
<CSRNDArray 2x100 @cpu(0)>
>>> mx.nd.sparse.array(mx.nd.sparse.zeros('csr', (3, 2)))
<CSRNDArray 3x2 @cpu(0)>
>>> mx.nd.sparse.array(mx.nd.sparse.zeros('row_sparse', (3, 2)))
<RowSparseNDArray 3x2 @cpu(0)>
"""
if isinstance(source_array, NDArray):
assert(source_array.stype != 'default'), \
"Please use `cast_storage` to create RowSparseNDArray or CSRNDArray from an NDArray"
dtype = source_array.dtype if dtype is None else dtype
aux_types = source_array._aux_types if aux_types is None else aux_types
arr = empty(source_array.stype, source_array.shape, ctx, dtype, aux_types)
arr[:] = source_array
return arr
if spsp is not None and isinstance(source_array, spsp.csr.csr_matrix):
# TODO(haibin) implement `_sync_copy_from` with scipy csr object to reduce a copy
indptr_type = None
indices_type = None
if aux_types is not None:
assert(len(aux_types) == 2), "Expected types for both indices and indptr"
indptr_type = aux_types[0]
indices_type = aux_types[1]
# preprocess scipy csr to canonical form
csr = source_array.sorted_indices()
csr.sum_duplicates()
arr = csr_matrix(csr.data, csr.indptr, csr.indices, csr.shape, dtype=dtype,
indptr_type=indptr_type, indices_type=indices_type)
return arr
elif isinstance(source_array, (np.ndarray, np.generic)):
raise ValueError("Please use mx.nd.array to create an NDArray with source_array of type ",
type(source_array))
else:
raise ValueError("Unexpected source_array type: ", type(source_array))
|
|
# -*- coding: utf-8 -*-
from PyQt4 import QtGui, QtCore
from acq4.analysis.AnalysisModule import AnalysisModule
from collections import OrderedDict
import acq4.pyqtgraph as pg
from acq4.util.metaarray import MetaArray
import numpy as np
class ImageAnalysis(AnalysisModule):
def __init__(self, host):
AnalysisModule.__init__(self, host)
self.background = None
#self.view = pg.GraphicsView()
self.ctrl = QtGui.QWidget()
l = QtGui.QGridLayout()
self.ctrl.setLayout(l)
self.ctrl.layout = l
#self.loadBgBtn = QtGui.QPushButton('load reference')
#l.addWidget(self.loadBgBtn, 0, 0)
self.addRoiBtn = QtGui.QPushButton('add ROI')
l.addWidget(self.addRoiBtn, 0, 0)
s = QtGui.QSpinBox()
s.setMaximum(10)
s.setMinimum(1)
self.nsegSpin = s
l.addWidget(s, 1, 0)
self.rois = []
self.data = []
## Setup basic GUI
self._elements_ = OrderedDict([
('File Loader', {'type': 'fileInput', 'size': (200, 300), 'host': self, 'showFileTree': False}),
('Image', {'type': 'imageView', 'pos': ('right', 'File Loader'), 'size': (800, 300)}),
('Time Plot', {'type': 'plot', 'pos': ('bottom',), 'size': (800, 300)}),
('Trial Plot', {'type': 'plot', 'pos': ('bottom', 'Time Plot'), 'size': (800, 300)}),
('Line Scan', {'type': 'imageView', 'pos': ('right', 'Time Plot'), 'size': (800, 300)}),
#('Data Table', {'type': 'table', 'pos': ('below', 'Time Plot')}),
('Ctrl', {'type': 'ctrl', 'pos': ('bottom', 'File Loader'), 'size': (200,30), 'object': self.ctrl}),
])
self.initializeElements()
#self.traces = None
self.plot = self.getElement('Time Plot', create=True)
self.plot2 = self.getElement('Trial Plot', create=True)
self.lr = pg.LinearRegionItem([0, 1])
self.plot.addItem(self.lr)
self.view = self.getElement('Image', create=True)
## Add a color scale
## removed for now--seems to be causing crashes :(
#self.colorScale = pg.GradientLegend(self.plot1, (20, 150), (-10, -10))
#self.plot1.scene().addItem(self.colorScale)
## Plots are updated when the selected region changes
self.lr.sigRegionChanged.connect(self.updateAnalysis)
self.addRoiBtn.clicked.connect(self.addRoi)
self.view.sigProcessingChanged.connect(self.processData)
#self.loadBgBtn.clicked.connect(self.loadBg)
def addRoi(self):
if self.nsegSpin.value() == 1:
roi = pg.widgets.LineROI((0,0), (20, 20), 5)
else:
pts = [(i*10,i*10) for i in range(self.nsegSpin.value()+1)]
roi = pg.widgets.MultiLineROI(pts, 5)
self.rois.append(roi)
self.view.addItem(roi)
roi.sigRegionChanged.connect(self.roiChanged)
def roiChanged(self, roi):
if isinstance(roi, int):
roi = self.currentRoi
self.plot.clearPlots()
c = 0
lineScans = []
for imgSet in self.data:
data = roi.getArrayRegion(imgSet['procMean'], self.view.imageItem, axes=(1,2))
m = data.mean(axis=1).mean(axis=1)
lineScans.append(data.mean(axis=2))
spacer = np.empty((lineScans[-1].shape[0], 1), dtype = lineScans[-1].dtype)
spacer[:] = lineScans[-1].min()
lineScans.append(spacer)
data = roi.getArrayRegion(imgSet['procStd'], self.view.imageItem, axes=(1,2))
s = data.mean(axis=1).mean(axis=1)
self.plot.plot(m, pen=pg.hsvColor(c*0.2, 1.0, 1.0))
self.plot.plot(m-s, pen=pg.hsvColor(c*0.2, 1.0, 0.4))
self.plot.plot(m+s, pen=pg.hsvColor(c*0.2, 1.0, 0.4))
c += 1
lineScan = np.hstack(lineScans)
self.getElement('Line Scan').setImage(lineScan)
self.currentRoi = roi
def processData(self):
self.normData = []
self.data = []
for img in self.rawData:
n = np.empty(img.shape, dtype=img.dtype)
for i in range(img.shape[0]):
n[i] = self.view.normalize(img[i])
self.normData.append(n)
imgSet = {'procMean': n.mean(axis=0), 'procStd': n.std(axis=0)}
self.data.append(imgSet)
def updateAnalysis(self):
roi = self.currentRoi
plot = self.getElement('Trial Plot')
plot.clearPlots()
c = 0
for img in self.normData:
#img = img.mean(axis=1)
rgn = self.lr.getRegion()
img = img[:, rgn[0]:rgn[1]].mean(axis=1)
data = roi.getArrayRegion(img, self.view.imageItem, axes=(1,2))
m = data.mean(axis=1).mean(axis=1)
#data = roi.getArrayRegion(img, self.view.imageItem, axes=(1,2))
#s = data.mean(axis=1).mean(axis=1)
plot.plot(m, pen=pg.hsvColor(c*0.2, 1.0, 1.0))
#self.plot.plot(m-s, pen=pg.hsvColor(c*0.2, 1.0, 0.4))
#self.plot.plot(m+s, pen=pg.hsvColor(c*0.2, 1.0, 0.4))
c += 1
#if c == 1:
#self.getElement('Line Scan').setImage(data.mean(axis=2))
#if self.traces is None:
#return
#rgn = self.lr.getRegion()
#data = self.traces['Time': rgn[0]:rgn[1]]
#self.plot2.plot(data.mean(axis=1), clear=True)
#self.plot2.plot(data.max(axis=1))
#self.plot2.plot(data.min(axis=1))
def loadFileRequested(self, dh):
"""Called by file loader when a file load is requested."""
if len(dh) != 1:
raise Exception("Can only load one file at a time.")
dh = dh[0]
if dh.isFile():
self.background = dh.read()[np.newaxis,...].astype(float)
self.background /= self.background.max()
return
self.plot.clearPlots()
dirs = dh.subDirs()
images = [[],[],[],[]]
## Iterate over sequence
minFrames = None
for d in dirs:
d = dh[d]
try:
ind = d.info()[('Clamp1', 'amp')]
except:
print d
print d.info()
raise
img = d['Camera/frames.ma'].read()
images[ind].append(img)
if minFrames is None or img.shape[0] < minFrames:
minFrames = img.shape[0]
self.rawData = []
self.data = []
#print "len images: %d " % (len(images))
while len(images) > 0:
imgs = images.pop(0)
img = np.concatenate([i[np.newaxis,:minFrames,...] for i in imgs], axis=0)
self.rawData.append(img.astype(np.float32))
#img /= self.background
## remove bleaching curve from first two axes
ctrlMean = self.rawData[0].mean(axis=2).mean(axis=2)
trialCurve = ctrlMean.mean(axis=1)[:,np.newaxis,np.newaxis,np.newaxis]
timeCurve = ctrlMean.mean(axis=0)[np.newaxis,:,np.newaxis,np.newaxis]
del ctrlMean
for img in self.rawData:
img /= trialCurve
img /= timeCurve
#for img in self.rawData:
#m = img.mean(axis=0)
#s = img.std(axis=0)
#if self.background is not None:
#m = m.astype(np.float32)
#m /= self.background
#s = s.astype(np.float32)
#s /= self.background
#imgSet = {'mean': m, 'std': s}
#self.data.append(imgSet)
#self.imgMeans.append(m)
#self.imgStds.append(s)
self.view.setImage(self.rawData[1].mean(axis=0))
self.processData()
## set up the selection region correctly and prepare IV curves
#if len(dirs) > 0:
#end = cmd.xvals('Time')[-1]
#self.lr.setRegion([end *0.5, end * 0.6])
#self.updateAnalysis()
#info = [
#{'name': 'Command', 'units': cmd.axisUnits(-1), 'values': np.array(values)},
#data.infoCopy('Time'),
#data.infoCopy(-1)]
#self.traces = MetaArray(np.vstack(traces), info=info)
return True
|
|
# Zulip's OpenAPI-based API documentation system is documented at
# https://zulip.readthedocs.io/en/latest/documentation/api.html
#
# This file defines the special Markdown extension that is used to
# render the code examples, example responses, etc. that appear in
# Zulip's public API documentation.
import inspect
import json
import re
import shlex
from typing import Any, Dict, List, Mapping, Optional, Pattern, Tuple
import markdown
from django.conf import settings
from markdown.extensions import Extension
from markdown.preprocessors import Preprocessor
import zerver.openapi.python_examples
from zerver.openapi.openapi import get_openapi_description, get_openapi_fixture, openapi_spec
MACRO_REGEXP = re.compile(
r'\{generate_code_example(\(\s*(.+?)\s*\))*\|\s*(.+?)\s*\|\s*(.+?)\s*(\(\s*(.+)\s*\))?\}')
PYTHON_EXAMPLE_REGEX = re.compile(r'\# \{code_example\|\s*(.+?)\s*\}')
JS_EXAMPLE_REGEX = re.compile(r'\/\/ \{code_example\|\s*(.+?)\s*\}')
MACRO_REGEXP_DESC = re.compile(r'\{generate_api_description(\(\s*(.+?)\s*\))}')
PYTHON_CLIENT_CONFIG = """
#!/usr/bin/env python3
import zulip
# Pass the path to your zuliprc file here.
client = zulip.Client(config_file="~/zuliprc")
"""
PYTHON_CLIENT_ADMIN_CONFIG = """
#!/usr/bin/env python
import zulip
# The user for this zuliprc file must be an organization administrator
client = zulip.Client(config_file="~/zuliprc-admin")
"""
JS_CLIENT_CONFIG = """
const Zulip = require('zulip-js');
// Pass the path to your zuliprc file here.
const config = { zuliprc: 'zuliprc' };
"""
JS_CLIENT_ADMIN_CONFIG = """
const Zulip = require('zulip-js');
// The user for this zuliprc file must be an organization administrator.
const config = { zuliprc: 'zuliprc-admin' };
"""
DEFAULT_AUTH_EMAIL = "BOT_EMAIL_ADDRESS"
DEFAULT_AUTH_API_KEY = "BOT_API_KEY"
DEFAULT_EXAMPLE = {
"integer": 1,
"string": "demo",
"boolean": False,
}
def parse_language_and_options(input_str: Optional[str]) -> Tuple[str, Dict[str, Any]]:
if not input_str:
return ("", {})
language_and_options = re.match(r"(?P<language>\w+)(,\s*(?P<options>[\"\'\w\d\[\],= ]+))?", input_str)
assert(language_and_options is not None)
kwargs_pattern = re.compile(r"(?P<key>\w+)\s*=\s*(?P<value>[\'\"\w\d]+|\[[\'\",\w\d ]+\])")
language = language_and_options.group("language")
assert(language is not None)
if language_and_options.group("options"):
_options = kwargs_pattern.finditer(language_and_options.group("options"))
options = {}
for m in _options:
options[m.group("key")] = json.loads(m.group("value").replace("'", '"'))
return (language, options)
return (language, {})
def extract_code_example(source: List[str], snippet: List[Any],
example_regex: Pattern[str]) -> List[Any]:
start = -1
end = -1
for line in source:
match = example_regex.search(line)
if match:
if match.group(1) == 'start':
start = source.index(line)
elif match.group(1) == 'end':
end = source.index(line)
break
if (start == -1 and end == -1):
return snippet
snippet.append(source[start + 1: end])
source = source[end + 1:]
return extract_code_example(source, snippet, example_regex)
def render_python_code_example(function: str, admin_config: bool=False,
**kwargs: Any) -> List[str]:
method = zerver.openapi.python_examples.TEST_FUNCTIONS[function]
function_source_lines = inspect.getsourcelines(method)[0]
if admin_config:
config = PYTHON_CLIENT_ADMIN_CONFIG.splitlines()
else:
config = PYTHON_CLIENT_CONFIG.splitlines()
snippets = extract_code_example(function_source_lines, [], PYTHON_EXAMPLE_REGEX)
code_example = []
code_example.append('```python')
code_example.extend(config)
for snippet in snippets:
for line in snippet:
# Remove one level of indentation and strip newlines
code_example.append(line[4:].rstrip())
code_example.append('print(result)')
code_example.append('\n')
code_example.append('```')
return code_example
def render_javascript_code_example(function: str, admin_config: bool=False,
**kwargs: Any) -> List[str]:
function_source_lines = []
with open('zerver/openapi/javascript_examples.js') as f:
parsing = False
for line in f:
if line.startswith("}"):
parsing = False
if parsing:
function_source_lines.append(line.rstrip())
if line.startswith("add_example(") and function in line:
parsing = True
snippets = extract_code_example(function_source_lines, [], JS_EXAMPLE_REGEX)
if admin_config:
config = JS_CLIENT_ADMIN_CONFIG.splitlines()
else:
config = JS_CLIENT_CONFIG.splitlines()
code_example = []
code_example.append('```js')
code_example.extend(config)
for snippet in snippets:
code_example.append("Zulip(config).then(async (client) => {")
for line in snippet:
result = re.search('const result.*=(.*);', line)
if result:
line = f" return{result.group(1)};"
# Strip newlines
code_example.append(line.rstrip())
code_example.append("}).then(console.log).catch(console.err);")
code_example.append(" ")
code_example.append('```')
return code_example
def curl_method_arguments(endpoint: str, method: str,
api_url: str) -> List[str]:
# We also include the -sS verbosity arguments here.
method = method.upper()
url = f"{api_url}/v1{endpoint}"
valid_methods = ["GET", "POST", "DELETE", "PUT", "PATCH", "OPTIONS"]
if method == "GET":
# Then we need to make sure that each -d option translates to becoming
# a GET parameter (in the URL) and not a POST parameter (in the body).
# TODO: remove the -X part by updating the linting rule. It's redundant.
return ["-sSX", "GET", "-G", url]
elif method in valid_methods:
return ["-sSX", method, url]
else:
msg = f"The request method {method} is not one of {valid_methods}"
raise ValueError(msg)
def get_openapi_param_example_value_as_string(endpoint: str, method: str, param: Dict[str, Any],
curl_argument: bool=False) -> str:
jsonify = False
param_name = param["name"]
if "content" in param:
param = param["content"]["application/json"]
jsonify = True
if "type" in param["schema"]:
param_type = param["schema"]["type"]
else:
# Hack: Ideally, we'd extract a common function for handling
# oneOf values in types and do something with the resulting
# union type. But for this logic's purpose, it's good enough
# to just check the first parameter.
param_type = param["schema"]["oneOf"][0]["type"]
if param_type in ["object", "array"]:
example_value = param.get("example", None)
if not example_value:
msg = f"""All array and object type request parameters must have
concrete examples. The openAPI documentation for {endpoint}/{method} is missing an example
value for the {param_name} parameter. Without this we cannot automatically generate a
cURL example."""
raise ValueError(msg)
ordered_ex_val_str = json.dumps(example_value, sort_keys=True)
# We currently don't have any non-JSON encoded arrays.
assert(jsonify)
if curl_argument:
return " --data-urlencode " + shlex.quote(f"{param_name}={ordered_ex_val_str}")
return ordered_ex_val_str # nocoverage
else:
example_value = param.get("example", DEFAULT_EXAMPLE[param_type])
if isinstance(example_value, bool):
example_value = str(example_value).lower()
if jsonify:
example_value = json.dumps(example_value)
if curl_argument:
return " --data-urlencode " + shlex.quote(f"{param_name}={example_value}")
return example_value
def generate_curl_example(endpoint: str, method: str,
api_url: str,
auth_email: str=DEFAULT_AUTH_EMAIL,
auth_api_key: str=DEFAULT_AUTH_API_KEY,
exclude: Optional[List[str]]=None,
include: Optional[List[str]]=None) -> List[str]:
if exclude is not None and include is not None:
raise AssertionError("exclude and include cannot be set at the same time.")
lines = ["```curl"]
operation = endpoint + ":" + method.lower()
operation_entry = openapi_spec.openapi()['paths'][endpoint][method.lower()]
global_security = openapi_spec.openapi()['security']
operation_params = operation_entry.get("parameters", [])
operation_request_body = operation_entry.get("requestBody", None)
operation_security = operation_entry.get("security", None)
if settings.RUNNING_OPENAPI_CURL_TEST: # nocoverage
from zerver.openapi.curl_param_value_generators import patch_openapi_example_values
operation_params, operation_request_body = patch_openapi_example_values(operation, operation_params,
operation_request_body)
format_dict = {}
for param in operation_params:
if param["in"] != "path":
continue
example_value = get_openapi_param_example_value_as_string(endpoint, method, param)
format_dict[param["name"]] = example_value
example_endpoint = endpoint.format_map(format_dict)
curl_first_line_parts = ["curl", *curl_method_arguments(example_endpoint, method,
api_url)]
lines.append(" ".join(map(shlex.quote, curl_first_line_parts)))
insecure_operations = ['/dev_fetch_api_key:post', '/fetch_api_key:post']
if operation_security is None:
if global_security == [{'basicAuth': []}]:
authentication_required = True
else:
raise AssertionError("Unhandled global securityScheme."
+ " Please update the code to handle this scheme.")
elif operation_security == []:
if operation in insecure_operations:
authentication_required = False
else:
raise AssertionError("Unknown operation without a securityScheme. "
+ "Please update insecure_operations.")
else:
raise AssertionError("Unhandled securityScheme. Please update the code to handle this scheme.")
if authentication_required:
lines.append(" -u " + shlex.quote(f"{auth_email}:{auth_api_key}"))
for param in operation_params:
if param["in"] == "path":
continue
param_name = param["name"]
if include is not None and param_name not in include:
continue
if exclude is not None and param_name in exclude:
continue
example_value = get_openapi_param_example_value_as_string(endpoint, method, param,
curl_argument=True)
lines.append(example_value)
if "requestBody" in operation_entry:
properties = operation_entry["requestBody"]["content"]["multipart/form-data"]["schema"]["properties"]
for key, property in properties.items():
lines.append(' -F ' + shlex.quote('{}=@{}'.format(key, property["example"])))
for i in range(1, len(lines)-1):
lines[i] = lines[i] + " \\"
lines.append("```")
return lines
def render_curl_example(function: str, api_url: str,
exclude: Optional[List[str]]=None,
include: Optional[List[str]]=None) -> List[str]:
""" A simple wrapper around generate_curl_example. """
parts = function.split(":")
endpoint = parts[0]
method = parts[1]
kwargs: Dict[str, Any] = {}
if len(parts) > 2:
kwargs["auth_email"] = parts[2]
if len(parts) > 3:
kwargs["auth_api_key"] = parts[3]
kwargs["api_url"] = api_url
kwargs["exclude"] = exclude
kwargs["include"] = include
return generate_curl_example(endpoint, method, **kwargs)
SUPPORTED_LANGUAGES: Dict[str, Any] = {
'python': {
'client_config': PYTHON_CLIENT_CONFIG,
'admin_config': PYTHON_CLIENT_ADMIN_CONFIG,
'render': render_python_code_example,
},
'curl': {
'render': render_curl_example,
},
'javascript': {
'client_config': JS_CLIENT_CONFIG,
'admin_config': JS_CLIENT_ADMIN_CONFIG,
'render': render_javascript_code_example,
},
}
class APIMarkdownExtension(Extension):
def __init__(self, api_url: Optional[str]) -> None:
self.config = {
'api_url': [
api_url,
'API URL to use when rendering curl examples',
],
}
def extendMarkdown(self, md: markdown.Markdown) -> None:
md.preprocessors.register(
APICodeExamplesPreprocessor(md, self.getConfigs()), 'generate_code_example', 525
)
md.preprocessors.register(
APIDescriptionPreprocessor(md, self.getConfigs()), 'generate_api_description', 530
)
class APICodeExamplesPreprocessor(Preprocessor):
def __init__(self, md: markdown.Markdown, config: Mapping[str, Any]) -> None:
super().__init__(md)
self.api_url = config['api_url']
def run(self, lines: List[str]) -> List[str]:
done = False
while not done:
for line in lines:
loc = lines.index(line)
match = MACRO_REGEXP.search(line)
if match:
language, options = parse_language_and_options(match.group(2))
function = match.group(3)
key = match.group(4)
argument = match.group(6)
if self.api_url is None:
raise AssertionError("Cannot render curl API examples without API URL set.")
options['api_url'] = self.api_url
if key == 'fixture':
if argument:
text = self.render_fixture(function, name=argument)
elif key == 'example':
if argument == 'admin_config=True':
text = SUPPORTED_LANGUAGES[language]['render'](function, admin_config=True)
else:
text = SUPPORTED_LANGUAGES[language]['render'](function, **options)
# The line that contains the directive to include the macro
# may be preceded or followed by text or tags, in that case
# we need to make sure that any preceding or following text
# stays the same.
line_split = MACRO_REGEXP.split(line, maxsplit=0)
preceding = line_split[0]
following = line_split[-1]
text = [preceding, *text, following]
lines = lines[:loc] + text + lines[loc+1:]
break
else:
done = True
return lines
def render_fixture(self, function: str, name: Optional[str]=None) -> List[str]:
fixture = []
path, method = function.rsplit(':', 1)
fixture_dict = get_openapi_fixture(path, method, name)
fixture_json = json.dumps(fixture_dict, indent=4, sort_keys=True,
separators=(',', ': '))
fixture.append('``` json')
fixture.extend(fixture_json.splitlines())
fixture.append('```')
return fixture
class APIDescriptionPreprocessor(Preprocessor):
def __init__(self, md: markdown.Markdown, config: Mapping[str, Any]) -> None:
super().__init__(md)
self.api_url = config['api_url']
def run(self, lines: List[str]) -> List[str]:
done = False
while not done:
for line in lines:
loc = lines.index(line)
match = MACRO_REGEXP_DESC.search(line)
if match:
function = match.group(2)
text = self.render_description(function)
# The line that contains the directive to include the macro
# may be preceded or followed by text or tags, in that case
# we need to make sure that any preceding or following text
# stays the same.
line_split = MACRO_REGEXP_DESC.split(line, maxsplit=0)
preceding = line_split[0]
following = line_split[-1]
text = [preceding, *text, following]
lines = lines[:loc] + text + lines[loc+1:]
break
else:
done = True
return lines
def render_description(self, function: str) -> List[str]:
description: List[str] = []
path, method = function.rsplit(':', 1)
description_dict = get_openapi_description(path, method)
description_dict = description_dict.replace('{{api_url}}', self.api_url)
description.extend(description_dict.splitlines())
return description
def makeExtension(*args: Any, **kwargs: str) -> APIMarkdownExtension:
return APIMarkdownExtension(*args, **kwargs)
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/globocom/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com [email protected]
import functools
from pyvows import Vows, expect
from thumbor.filters import BaseFilter, FiltersFactory, filter_method
import thumbor.filters
FILTER_PARAMS_DATA = [
{
'type': BaseFilter.Number,
'values': [
('1', 1), ('10', 10), ('99', 99), ('-1', -1), ('-10', -10), ('010', 10), (' 1 ', 1), ('0', 0)
],
'invalid_values': ['x', 'x10', '10x', '- 1', '']
},
{
'type': BaseFilter.PositiveNumber,
'values': [
('1', 1), ('10', 10), ('99', 99), (' 1 ', 1), ('010', 10), ('0', 0)
],
'invalid_values': ['-1', 'x', 'x10', '10x', '']
},
{
'type': BaseFilter.NegativeNumber,
'values': [
('-1', -1), ('-10', -10), (' -9 ', -9), ('-0', 0)
],
'invalid_values': ['x', 'x10', '10x', '- 1', '']
},
{
'type': BaseFilter.DecimalNumber,
'values': [
('1', 1.0), ('10', 10.0), ('99', 99.0), ('-1', -1.0), ('-10', -10.0), ('010', 10.0), (' 1 ', 1.0),
('1.0', 1.0), ('10.12', 10.12), ('9.9', 9.9), ('-1.1', -1.1), (' -10.2 ', -10.2), (' 1 ', 1.0),
('.11', 0.11), ('0.111', 0.111), ('0', 0.0)
],
'invalid_values': ['x', 'x10', '10x', '- 1.1', '', '.']
},
{
'type': BaseFilter.String,
'values': [
('a', 'a'), ('bbbb', 'bbbb'), (' cccc ', 'cccc'), (' cc:cc ', 'cc:cc'), ('\'a,b\'', 'a,b')
],
'invalid_values': ['', ',', ',,,,']
},
{
'type': BaseFilter.Boolean,
'values': [
('1', True), ('True', True), ('true', True), ('0', False), ('False', False), ('false', False), (' True ', True)
],
'invalid_values': ['', 'x', 'TRUE', '111']
},
{
'type': r'\dx\d',
'values': [
('1x1', '1x1'), (' 9x9 ', '9x9')
],
'invalid_values': ['a', ',', '9 x 9']
}
]
@Vows.batch
class FilterParamsVows(Vows.Context):
def topic(self):
for test_data in FILTER_PARAMS_DATA:
yield(test_data)
class WithValidValues(Vows.Context):
def topic(self, test_data):
for value in test_data['values']:
yield(test_data['type'], value[0], value[1])
def should_correctly_parse_value(self, data):
type, test_data, expected_data = data
BaseFilter.compile_regex({'name': 'x', 'params': [type]})
f = BaseFilter('x(%s)' % test_data)
expect(f.params[0]).to_equal(expected_data)
class WithInvalidValues(Vows.Context):
def topic(self, test_data):
for value in test_data['invalid_values']:
yield(test_data['type'], value)
def should_not_parse_invalid_value(self, data):
type, test_data = data
BaseFilter.compile_regex({'name': 'x', 'params': [type]})
f = BaseFilter('x(%s)' % test_data)
expect(f.params).to_be_null()
class MyFilter(BaseFilter):
@filter_method(BaseFilter.Number, BaseFilter.DecimalNumber)
def my_filter(self, value1, value2):
return (value1, value2)
class StringFilter(BaseFilter):
@filter_method(BaseFilter.String)
def my_string_filter(self, value):
return value
class EmptyFilter(BaseFilter):
@filter_method()
def my_empty_filter(self):
return 'ok'
class AsyncFilter(BaseFilter):
@filter_method(BaseFilter.String, async=True)
def my_async_filter(self, callback, value):
callback(value)
class InvalidFilter(BaseFilter):
def my_invalid_filter(self, value):
return value
class DoubleStringFilter(BaseFilter):
@filter_method(BaseFilter.String, BaseFilter.String)
def my_string_filter(self, value1, value2):
return (value1, value2)
class OptionalParamFilter(BaseFilter):
@filter_method(BaseFilter.String, BaseFilter.String)
def my_optional_filter(self, value1, value2="not provided"):
return (value1, value2)
class PreLoadFilter(BaseFilter):
phase = thumbor.filters.PHASE_PRE_LOAD
@filter_method(BaseFilter.String)
def my_pre_load_filter(self, value):
return value
@Vows.batch
class FilterVows(Vows.Context):
class CreatingFilterInstances(Vows.Context):
def topic(self):
class Any:
pass
ctx = Any()
ctx.modules = Any()
engine = Any()
is_multiple = lambda: False
engine.is_multiple = is_multiple
ctx.modules.engine = engine
fact = FiltersFactory([MyFilter, StringFilter, OptionalParamFilter, PreLoadFilter])
return (fact, ctx)
class RunnerWithParameters(Vows.Context):
def topic(self, parent_topic):
factory, context = parent_topic
return factory.create_instances(context, 'my_string_filter(aaaa):my_string_filter(bbb):my_pre_load_filter(ccc)')
def should_create_two_instances(self, runner):
post_instances = runner.filter_instances[thumbor.filters.PHASE_POST_TRANSFORM]
pre_instances = runner.filter_instances[thumbor.filters.PHASE_PRE_LOAD]
expect(len(post_instances)).to_equal(2)
expect(post_instances[0].__class__).to_equal(StringFilter)
expect(post_instances[1].__class__).to_equal(StringFilter)
expect(len(pre_instances)).to_equal(1)
expect(pre_instances[0].__class__).to_equal(PreLoadFilter)
class RunningPostFilters(Vows.Context):
@Vows.async_topic
def topic(self, callback, runner):
runner.apply_filters(thumbor.filters.PHASE_POST_TRANSFORM, functools.partial(callback, runner))
def should_run_only_post_filters(self, args):
runner = args.args[0]
post_instances = runner.filter_instances[thumbor.filters.PHASE_POST_TRANSFORM]
pre_instances = runner.filter_instances[thumbor.filters.PHASE_PRE_LOAD]
expect(len(post_instances)).to_equal(0)
expect(len(pre_instances)).to_equal(1)
class RunningPreFilters(Vows.Context):
@Vows.async_topic
def topic(self, callback, args):
runner = args.args[0]
runner.apply_filters(thumbor.filters.PHASE_PRE_LOAD, functools.partial(callback, runner))
def should_run_only_pre_filters(self, args):
runner = args.args[0]
post_instances = runner.filter_instances[thumbor.filters.PHASE_POST_TRANSFORM]
pre_instances = runner.filter_instances[thumbor.filters.PHASE_PRE_LOAD]
expect(len(post_instances)).to_equal(0)
expect(len(pre_instances)).to_equal(0)
class WithOneValidParam(Vows.Context):
def topic(self, parent_topic):
factory, context = parent_topic
runner = factory.create_instances(context, 'my_filter(1, 0a):my_string_filter(aaaa)')
return runner.filter_instances[thumbor.filters.PHASE_POST_TRANSFORM]
def should_create_one_instance(self, instances):
expect(len(instances)).to_equal(1)
expect(instances[0].__class__).to_equal(StringFilter)
class WithParameterContainingColons(Vows.Context):
def topic(self, parent_topic):
factory, context = parent_topic
runner = factory.create_instances(context, 'my_string_filter(aaaa):my_string_filter(aa:aa)')
return runner.filter_instances[thumbor.filters.PHASE_POST_TRANSFORM]
def should_create_two_instances(self, instances):
expect(len(instances)).to_equal(2)
expect(instances[0].__class__).to_equal(StringFilter)
expect(instances[1].__class__).to_equal(StringFilter)
def should_understant_parameters(self, instances):
expect(instances[0].params).to_equal(["aaaa"])
expect(instances[1].params).to_equal(["aa:aa"])
class WithValidParams(Vows.Context):
def topic(self, parent_topic):
factory, context = parent_topic
runner = factory.create_instances(context, 'my_filter(1, 0):my_string_filter(aaaa)')
return runner.filter_instances[thumbor.filters.PHASE_POST_TRANSFORM]
def should_create_two_instances(self, instances):
expect(len(instances)).to_equal(2)
expect(instances[0].__class__).to_equal(MyFilter)
expect(instances[1].__class__).to_equal(StringFilter)
class WhenRunning(Vows.Context):
def topic(self, instances):
result = []
for instance in instances:
result.append(instance.run())
return result
def should_create_two_instances(self, result):
expect(result[0]).to_equal([(1, 0.0)])
expect(result[1]).to_equal(['aaaa'])
class WithOptionalParamFilter(Vows.Context):
def topic(self, parent_topic):
factory, context = parent_topic
runner = factory.create_instances(context, 'my_optional_filter(aa, bb)')
return runner.filter_instances[thumbor.filters.PHASE_POST_TRANSFORM]
def should_create_two_instances(self, instances):
expect(len(instances)).to_equal(1)
expect(instances[0].__class__).to_equal(OptionalParamFilter)
def should_understand_parameters(self, instances):
expect(instances[0].run()).to_equal([("aa", "bb")])
class WithOptionalParamsInOptionalFilter(Vows.Context):
def topic(self, parent_topic):
factory, context = parent_topic
runner = factory.create_instances(context, 'my_optional_filter(aa)')
return runner.filter_instances[thumbor.filters.PHASE_POST_TRANSFORM]
def should_create_two_instances(self, instances):
expect(len(instances)).to_equal(1)
expect(instances[0].__class__).to_equal(OptionalParamFilter)
def should_understand_parameters(self, instances):
expect(instances[0].run()).to_equal([("aa", "not provided")])
class WithInvalidOptionalFilter(Vows.Context):
def topic(self, parent_topic):
factory, context = parent_topic
runner = factory.create_instances(context, 'my_optional_filter()')
return runner.filter_instances[thumbor.filters.PHASE_POST_TRANSFORM]
def should_create_two_instances(self, instances):
expect(len(instances)).to_equal(0)
class WithPreLoadFilter(Vows.Context):
def topic(self, parent_topic):
factory, context = parent_topic
runner = factory.create_instances(context, 'my_pre_load_filter(aaaa)')
return runner.filter_instances[thumbor.filters.PHASE_PRE_LOAD]
def should_create_two_instances(self, instances):
expect(len(instances)).to_equal(1)
expect(instances[0].__class__).to_equal(PreLoadFilter)
def should_understant_parameters(self, instances):
expect(instances[0].params).to_equal(["aaaa"])
class WithInvalidFilter(Vows.Context):
def topic(self):
InvalidFilter.pre_compile()
return InvalidFilter
def doesnt_create_a_runnable_method(self, cls):
expect(hasattr(cls, 'runnable_method')).to_be_false()
class WithValidFilter(Vows.Context):
def topic(self):
MyFilter.pre_compile()
return MyFilter
def creates_a_runnable_method(self, cls):
expect(cls.runnable_method).to_equal(MyFilter.my_filter)
class WithValidNumber:
def topic(self, cls):
f = cls("my_filter(1, -1.1)")
return f.run()
def sets_correct_result_value(self, topic):
expect(topic).to_equal([(1, -1.1)])
class WithInvalidNumber:
def topic(self, cls):
f = cls("my_invalid_filter(x, 1)")
return f.run()
def throws_an_error(self, topic):
expect(hasattr(topic, 'result')).to_be_false()
class WhenPassedCallback:
@Vows.async_topic
def topic(self, callback, cls):
f = cls("my_filter(1, -1.1)")
f.run(callback)
def calls_callback(self, topic):
expect(topic.args).to_equal(())
class DoubleStringFilter(Vows.Context):
def topic(self):
DoubleStringFilter.pre_compile()
return DoubleStringFilter
class WithTwoNormalStrings:
def topic(self, cls):
f = cls("my_string_filter(a, b)")
return f.run()
def sets_correct_values(self, topic):
expect(topic).to_equal([('a', 'b')])
class WithStringsWithCommas:
def topic(self, cls):
tests = [
("my_string_filter(a,'b, c')", [('a', 'b, c')]),
("my_string_filter('a,b', c)", [('a,b', 'c')]),
("my_string_filter('ab', c)", [('ab', 'c')]),
("my_string_filter('ab,', c)", [('ab,', 'c')]),
("my_string_filter('ab,', ',c')", [('ab,', ',c')]),
("my_string_filter('ab, c)", [('\'ab', 'c')]),
("my_string_filter('ab, c',d)", [('ab, c', 'd')]),
("my_string_filter('a,b, c)", None),
("my_string_filter('a,b, c')", None),
]
for (test, expected) in tests:
f = cls(test)
yield f.run(), expected
def sets_correct_values(self, test_data):
result, expected = test_data
expect(result).to_equal(expected)
class WithEmptyFilter(Vows.Context):
def topic(self):
EmptyFilter.pre_compile()
f = EmptyFilter('my_empty_filter()')
return f.run()
def should_call_filter(self, value):
expect(value).to_equal(['ok'])
class WithAsyncFilter(Vows.Context):
@Vows.async_topic
def topic(self, callback):
AsyncFilter.pre_compile()
f = AsyncFilter("my_async_filter(yyy)")
f.run(callback)
def should_call_callback(self, topic):
expect(topic.args[0]).to_equal('yyy')
|
|
import json
import os
import shutil
import boto3
import pandas as pd
import pytest
from featuretools.demo import load_mock_customer
from featuretools.entityset import EntitySet, deserialize, serialize
from featuretools.tests import integration_data
from featuretools.variable_types.variable import (
Categorical,
Index,
TimeIndex,
find_variable_types
)
CACHE = os.path.join(os.path.dirname(integration_data.__file__), '.cache')
BUCKET_NAME = "test-bucket"
WRITE_KEY_NAME = "test-key"
TEST_S3_URL = "s3://{}/{}".format(BUCKET_NAME, WRITE_KEY_NAME)
S3_URL = "s3://featuretools-static/test_serialization_data_2.0.0.tar"
URL = 'https://featuretools-static.s3.amazonaws.com/test_serialization_data_2.0.0.tar'
TEST_KEY = "test_access_key_es"
def test_all_variable_descriptions():
variable_types = find_variable_types()
es = EntitySet()
dataframe = pd.DataFrame(columns=list(variable_types))
es.entity_from_dataframe(
'variable_types',
dataframe,
index='index',
time_index='datetime_time_index',
variable_types=variable_types,
)
entity = es['variable_types']
for variable in entity.variables:
description = variable.to_data_description()
_variable = deserialize.description_to_variable(description, entity=entity)
assert variable.__eq__(_variable)
def test_custom_variable_descriptions():
class ItemList(Categorical):
type_string = "item_list"
_default_pandas_dtype = list
es = EntitySet()
variables = {'item_list': ItemList, 'time_index': TimeIndex, 'index': Index}
dataframe = pd.DataFrame(columns=list(variables))
es.entity_from_dataframe(
'custom_variable', dataframe, index='index',
time_index='time_index', variable_types=variables)
entity = es['custom_variable']
for variable in entity.variables:
description = variable.to_data_description()
_variable = deserialize.description_to_variable(description, entity=entity)
assert variable.__eq__(_variable)
def test_variable_descriptions(es):
for entity in es.entities:
for variable in entity.variables:
description = variable.to_data_description()
_variable = deserialize.description_to_variable(description, entity=entity)
assert variable.__eq__(_variable)
def test_entity_descriptions(es):
_es = EntitySet(es.id)
for entity in es.metadata.entities:
description = serialize.entity_to_description(entity)
deserialize.description_to_entity(description, _es)
_entity = _es[description['id']]
_entity.last_time_index = entity.last_time_index
assert entity.__eq__(_entity, deep=True)
def test_entityset_description(es):
description = serialize.entityset_to_description(es)
_es = deserialize.description_to_entityset(description)
assert es.metadata.__eq__(_es, deep=True)
@pytest.fixture
def path_management():
path = os.path.join(CACHE, 'es')
os.makedirs(path, exist_ok=True)
yield path
shutil.rmtree(path)
def test_invalid_formats(es, path_management):
error_text = 'must be one of the following formats: {}'
error_text = error_text.format(', '.join(serialize.FORMATS))
with pytest.raises(ValueError, match=error_text):
serialize.write_entity_data(es.entities[0], path=path_management, format='')
with pytest.raises(ValueError, match=error_text):
entity = {'loading_info': {'location': 'data', 'type': ''}}
deserialize.read_entity_data(entity, path='.')
def test_empty_dataframe(es):
for entity in es.entities:
description = serialize.entity_to_description(entity)
dataframe = deserialize.empty_dataframe(description)
assert dataframe.empty
def test_to_csv(es, path_management):
es.to_csv(path_management, encoding='utf-8', engine='python')
new_es = deserialize.read_entityset(path_management)
assert es.__eq__(new_es, deep=True)
assert type(es['log'].df['latlong'][0]) == tuple
assert type(new_es['log'].df['latlong'][0]) == tuple
def test_to_pickle(es, path_management):
es.to_pickle(path_management)
new_es = deserialize.read_entityset(path_management)
assert es.__eq__(new_es, deep=True)
assert type(es['log'].df['latlong'][0]) == tuple
assert type(new_es['log'].df['latlong'][0]) == tuple
def test_to_parquet(es, path_management):
es.to_parquet(path_management)
new_es = deserialize.read_entityset(path_management)
assert es.__eq__(new_es, deep=True)
assert type(es['log'].df['latlong'][0]) == tuple
assert type(new_es['log'].df['latlong'][0]) == tuple
def test_to_parquet_with_lti(path_management):
es = load_mock_customer(return_entityset=True, random_seed=0)
es.to_parquet(path_management)
new_es = deserialize.read_entityset(path_management)
assert es.__eq__(new_es, deep=True)
def test_to_pickle_id_none(path_management):
es = EntitySet()
es.to_pickle(path_management)
new_es = deserialize.read_entityset(path_management)
assert es.__eq__(new_es, deep=True)
# TODO: Fix Moto tests needing to explicitly set permissions for objects
@pytest.fixture
def s3_client():
_environ = os.environ.copy()
from moto import mock_s3
with mock_s3():
s3 = boto3.resource('s3')
yield s3
os.environ.clear()
os.environ.update(_environ)
@pytest.fixture
def s3_bucket(s3_client):
s3_client.create_bucket(Bucket=BUCKET_NAME, ACL='public-read-write')
s3_bucket = s3_client.Bucket(BUCKET_NAME)
yield s3_bucket
def make_public(s3_client, s3_bucket):
obj = list(s3_bucket.objects.all())[0].key
s3_client.ObjectAcl(BUCKET_NAME, obj).put(ACL='public-read-write')
def test_serialize_s3_csv(es, s3_client, s3_bucket):
es.to_csv(TEST_S3_URL, encoding='utf-8', engine='python')
make_public(s3_client, s3_bucket)
new_es = deserialize.read_entityset(TEST_S3_URL)
assert es.__eq__(new_es, deep=True)
def test_serialize_s3_pickle(es, s3_client, s3_bucket):
es.to_pickle(TEST_S3_URL)
make_public(s3_client, s3_bucket)
new_es = deserialize.read_entityset(TEST_S3_URL)
assert es.__eq__(new_es, deep=True)
def test_serialize_s3_parquet(es, s3_client, s3_bucket):
es.to_parquet(TEST_S3_URL)
make_public(s3_client, s3_bucket)
new_es = deserialize.read_entityset(TEST_S3_URL)
assert es.__eq__(new_es, deep=True)
def test_serialize_s3_anon_csv(es, s3_client, s3_bucket):
es.to_csv(TEST_S3_URL, encoding='utf-8', engine='python', profile_name=False)
make_public(s3_client, s3_bucket)
new_es = deserialize.read_entityset(TEST_S3_URL, profile_name=False)
assert es.__eq__(new_es, deep=True)
def test_serialize_s3_anon_pickle(es, s3_client, s3_bucket):
es.to_pickle(TEST_S3_URL, profile_name=False)
make_public(s3_client, s3_bucket)
new_es = deserialize.read_entityset(TEST_S3_URL, profile_name=False)
assert es.__eq__(new_es, deep=True)
def test_serialize_s3_anon_parquet(es, s3_client, s3_bucket):
es.to_parquet(TEST_S3_URL, profile_name=False)
make_public(s3_client, s3_bucket)
new_es = deserialize.read_entityset(TEST_S3_URL, profile_name=False)
assert es.__eq__(new_es, deep=True)
def create_test_credentials(test_path):
with open(test_path, "w+") as f:
f.write("[test]\n")
f.write("aws_access_key_id=AKIAIOSFODNN7EXAMPLE\n")
f.write("aws_secret_access_key=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY\n")
def create_test_config(test_path_config):
with open(test_path_config, "w+") as f:
f.write("[profile test]\n")
f.write("region=us-east-2\n")
f.write("output=text\n")
@pytest.fixture
def setup_test_profile(monkeypatch):
test_path = os.path.join(CACHE, 'test_credentials')
test_path_config = os.path.join(CACHE, 'test_config')
monkeypatch.setenv("AWS_SHARED_CREDENTIALS_FILE", test_path)
monkeypatch.setenv("AWS_CONFIG_FILE", test_path_config)
monkeypatch.delenv("AWS_ACCESS_KEY_ID", raising=False)
monkeypatch.delenv("AWS_SECRET_ACCESS_KEY", raising=False)
monkeypatch.setenv("AWS_PROFILE", "test")
try:
os.remove(test_path)
os.remove(test_path_config)
except OSError:
pass
create_test_credentials(test_path)
create_test_config(test_path_config)
yield
os.remove(test_path)
os.remove(test_path_config)
def test_s3_test_profile(es, s3_client, s3_bucket, setup_test_profile):
es.to_csv(TEST_S3_URL, encoding='utf-8', engine='python', profile_name='test')
make_public(s3_client, s3_bucket)
new_es = deserialize.read_entityset(TEST_S3_URL, profile_name='test')
assert es.__eq__(new_es, deep=True)
def test_serialize_url_csv(es):
error_text = "Writing to URLs is not supported"
with pytest.raises(ValueError, match=error_text):
es.to_csv(URL, encoding='utf-8', engine='python')
def test_serialize_subdirs_not_removed(es, tmpdir):
write_path = tmpdir.mkdir("test")
test_dir = write_path.mkdir("test_dir")
with open(str(write_path.join('data_description.json')), 'w') as f:
json.dump('__SAMPLE_TEXT__', f)
serialize.write_data_description(es, path=str(write_path), index='1', sep='\t', encoding='utf-8', compression=None)
assert os.path.exists(str(test_dir))
with open(str(write_path.join('data_description.json')), 'r') as f:
assert '__SAMPLE_TEXT__' not in json.load(f)
def test_deserialize_url_csv(es):
new_es = deserialize.read_entityset(URL)
assert es.__eq__(new_es, deep=True)
def test_default_s3_csv(es):
new_es = deserialize.read_entityset(S3_URL)
assert es.__eq__(new_es, deep=True)
def test_anon_s3_csv(es):
new_es = deserialize.read_entityset(S3_URL, profile_name=False)
assert es.__eq__(new_es, deep=True)
|
|
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 VMware, Inc.
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A fake VMware VI API implementation.
"""
import collections
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import units
from oslo_utils import uuidutils
from oslo_vmware import exceptions as vexc
from oslo_vmware.objects import datastore as ds_obj
import six
from nova import exception
from nova.virt.vmwareapi import constants
_CLASSES = ['Datacenter', 'Datastore', 'ResourcePool', 'VirtualMachine',
'Network', 'HostSystem', 'HostNetworkSystem', 'Task', 'session',
'files', 'ClusterComputeResource', 'HostStorageSystem']
_FAKE_FILE_SIZE = 1024
_FAKE_VCENTER_UUID = '497c514c-ef5e-4e7f-8d93-ec921993b93a'
_db_content = {}
_array_types = {}
_vim_map = {}
LOG = logging.getLogger(__name__)
def reset():
"""Resets the db contents."""
cleanup()
create_network()
create_host_network_system()
create_host_storage_system()
ds_ref1 = create_datastore('ds1', 1024, 500)
create_host(ds_ref=ds_ref1)
ds_ref2 = create_datastore('ds2', 1024, 500)
create_host(ds_ref=ds_ref2)
create_datacenter('dc1', ds_ref1)
create_datacenter('dc2', ds_ref2)
create_res_pool()
create_cluster('test_cluster', ds_ref1)
create_cluster('test_cluster2', ds_ref2)
def cleanup():
"""Clear the db contents."""
for c in _CLASSES:
# We fake the datastore by keeping the file references as a list of
# names in the db
if c == 'files':
_db_content[c] = []
else:
_db_content[c] = {}
def _create_object(table, table_obj):
"""Create an object in the db."""
_db_content[table][table_obj.obj] = table_obj
def _get_object(obj_ref):
"""Get object for the give reference."""
return _db_content[obj_ref.type][obj_ref]
def _get_objects(obj_type):
"""Get objects of the type."""
lst_objs = FakeRetrieveResult()
for key in _db_content[obj_type]:
lst_objs.add_object(_db_content[obj_type][key])
return lst_objs
def _convert_to_array_of_mor(mors):
"""Wraps the given array into a DataObject."""
array_of_mors = DataObject()
array_of_mors.ManagedObjectReference = mors
return array_of_mors
def _convert_to_array_of_opt_val(optvals):
"""Wraps the given array into a DataObject."""
array_of_optv = DataObject()
array_of_optv.OptionValue = optvals
return array_of_optv
def _create_array_of_type(t):
"""Returns an array to contain objects of type t."""
if t in _array_types:
return _array_types[t]()
array_type_name = 'ArrayOf%s' % t
array_type = type(array_type_name, (DataObject,), {})
def __init__(self):
super(array_type, self).__init__(array_type_name)
setattr(self, t, [])
setattr(array_type, '__init__', __init__)
_array_types[t] = array_type
return array_type()
class FakeRetrieveResult(object):
"""Object to retrieve a ObjectContent list."""
def __init__(self, token=None):
self.objects = []
if token is not None:
self.token = token
def add_object(self, object):
self.objects.append(object)
class MissingProperty(object):
"""Missing object in ObjectContent's missing set."""
def __init__(self, path='fake-path', message='fake_message',
method_fault=None):
self.path = path
self.fault = DataObject()
self.fault.localizedMessage = message
self.fault.fault = method_fault
def _get_object_refs(obj_type):
"""Get object References of the type."""
lst_objs = []
for key in _db_content[obj_type]:
lst_objs.append(key)
return lst_objs
def _update_object(table, table_obj):
"""Update objects of the type."""
_db_content[table][table_obj.obj] = table_obj
class Prop(object):
"""Property Object base class."""
def __init__(self, name=None, val=None):
self.name = name
self.val = val
class ManagedObjectReference(object):
"""A managed object reference is a remote identifier."""
def __init__(self, name="ManagedObject", value=None):
super(ManagedObjectReference, self)
# Managed Object Reference value attributes
# typically have values like vm-123 or
# host-232 and not UUID.
self.value = value
# Managed Object Reference type
# attributes hold the name of the type
# of the vCenter object the value
# attribute is the identifier for
self.type = name
self._type = name
class ObjectContent(object):
"""ObjectContent array holds dynamic properties."""
# This class is a *fake* of a class sent back to us by
# SOAP. It has its own names. These names are decided
# for us by the API we are *faking* here.
def __init__(self, obj_ref, prop_list=None, missing_list=None):
self.obj = obj_ref
if not isinstance(prop_list, collections.Iterable):
prop_list = []
if not isinstance(missing_list, collections.Iterable):
missing_list = []
# propSet is the name your Python code will need to
# use since this is the name that the API will use
if prop_list:
self.propSet = prop_list
# missingSet is the name your python code will
# need to use since this is the name that the
# API we are talking to will use.
if missing_list:
self.missingSet = missing_list
class ManagedObject(object):
"""Managed Object base class."""
_counter = 0
def __init__(self, mo_id_prefix="obj"):
"""Sets the obj property which acts as a reference to the object."""
object.__setattr__(self, 'mo_id', self._generate_moid(mo_id_prefix))
object.__setattr__(self, 'propSet', [])
object.__setattr__(self, 'obj',
ManagedObjectReference(self.__class__.__name__,
self.mo_id))
def set(self, attr, val):
"""Sets an attribute value. Not using the __setattr__ directly for we
want to set attributes of the type 'a.b.c' and using this function
class we set the same.
"""
self.__setattr__(attr, val)
def get(self, attr):
"""Gets an attribute. Used as an intermediary to get nested
property like 'a.b.c' value.
"""
return self.__getattr__(attr)
def delete(self, attr):
"""Deletes an attribute."""
self.propSet = filter(lambda elem: elem.name != attr, self.propSet)
def __setattr__(self, attr, val):
# TODO(hartsocks): this is adds unnecessary complexity to the class
for prop in self.propSet:
if prop.name == attr:
prop.val = val
return
elem = Prop()
elem.name = attr
elem.val = val
self.propSet.append(elem)
def __getattr__(self, attr):
# TODO(hartsocks): remove this
# in a real ManagedObject you have to iterate the propSet
# in a real ManagedObject, the propSet is a *set* not a list
for elem in self.propSet:
if elem.name == attr:
return elem.val
msg = "Property %(attr)s not set for the managed object %(name)s"
raise exception.NovaException(msg % {'attr': attr,
'name': self.__class__.__name__})
def _generate_moid(self, prefix):
"""Generates a new Managed Object ID."""
self.__class__._counter += 1
return prefix + "-" + str(self.__class__._counter)
def __repr__(self):
return jsonutils.dumps({elem.name: elem.val
for elem in self.propSet})
class DataObject(object):
"""Data object base class."""
def __init__(self, obj_name=None):
self.obj_name = obj_name
def __repr__(self):
return str(self.__dict__)
def __eq__(self, other):
return self.__dict__ == other.__dict__
class HostInternetScsiHba(DataObject):
"""iSCSI Host Bus Adapter."""
def __init__(self):
super(HostInternetScsiHba, self).__init__()
self.device = 'vmhba33'
self.key = 'key-vmhba33'
class FileAlreadyExists(DataObject):
"""File already exists class."""
def __init__(self):
super(FileAlreadyExists, self).__init__()
self.__name__ = vexc.FILE_ALREADY_EXISTS
class FileNotFound(DataObject):
"""File not found class."""
def __init__(self):
super(FileNotFound, self).__init__()
self.__name__ = vexc.FILE_NOT_FOUND
class FileFault(DataObject):
"""File fault."""
def __init__(self):
super(FileFault, self).__init__()
self.__name__ = vexc.FILE_FAULT
class CannotDeleteFile(DataObject):
"""Cannot delete file."""
def __init__(self):
super(CannotDeleteFile, self).__init__()
self.__name__ = vexc.CANNOT_DELETE_FILE
class FileLocked(DataObject):
"""File locked."""
def __init__(self):
super(FileLocked, self).__init__()
self.__name__ = vexc.FILE_LOCKED
class VirtualDisk(DataObject):
"""Virtual Disk class."""
def __init__(self, controllerKey=0, unitNumber=0):
super(VirtualDisk, self).__init__()
self.key = 0
self.controllerKey = controllerKey
self.unitNumber = unitNumber
class VirtualDiskFlatVer2BackingInfo(DataObject):
"""VirtualDiskFlatVer2BackingInfo class."""
def __init__(self):
super(VirtualDiskFlatVer2BackingInfo, self).__init__()
self.thinProvisioned = False
self.eagerlyScrub = False
class VirtualDiskRawDiskMappingVer1BackingInfo(DataObject):
"""VirtualDiskRawDiskMappingVer1BackingInfo class."""
def __init__(self):
super(VirtualDiskRawDiskMappingVer1BackingInfo, self).__init__()
self.lunUuid = ""
class VirtualIDEController(DataObject):
def __init__(self, key=0):
self.key = key
class VirtualLsiLogicController(DataObject):
"""VirtualLsiLogicController class."""
def __init__(self, key=0, scsiCtlrUnitNumber=0):
self.key = key
self.scsiCtlrUnitNumber = scsiCtlrUnitNumber
self.device = []
class VirtualLsiLogicSASController(DataObject):
"""VirtualLsiLogicSASController class."""
pass
class VirtualPCNet32(DataObject):
"""VirtualPCNet32 class."""
def __init__(self):
super(VirtualPCNet32, self).__init__()
self.key = 4000
class OptionValue(DataObject):
"""OptionValue class."""
def __init__(self, key=None, value=None):
super(OptionValue, self).__init__()
self.key = key
self.value = value
class VirtualMachine(ManagedObject):
"""Virtual Machine class."""
def __init__(self, **kwargs):
super(VirtualMachine, self).__init__("vm")
self.set("name", kwargs.get("name", 'test-vm'))
self.set("runtime.connectionState",
kwargs.get("conn_state", "connected"))
self.set("summary.config.guestId",
kwargs.get("guest", constants.DEFAULT_OS_TYPE))
ds_do = kwargs.get("ds", None)
self.set("datastore", _convert_to_array_of_mor(ds_do))
self.set("summary.guest.toolsStatus", kwargs.get("toolsstatus",
"toolsOk"))
self.set("summary.guest.toolsRunningStatus", kwargs.get(
"toolsrunningstate", "guestToolsRunning"))
self.set("runtime.powerState", kwargs.get("powerstate", "poweredOn"))
self.set("config.files.vmPathName", kwargs.get("vmPathName"))
self.set("summary.config.numCpu", kwargs.get("numCpu", 1))
self.set("summary.config.memorySizeMB", kwargs.get("mem", 1))
self.set("summary.config.instanceUuid", kwargs.get("instanceUuid"))
self.set("version", kwargs.get("version"))
devices = _create_array_of_type('VirtualDevice')
devices.VirtualDevice = kwargs.get("virtual_device", [])
self.set("config.hardware.device", devices)
exconfig_do = kwargs.get("extra_config", None)
self.set("config.extraConfig",
_convert_to_array_of_opt_val(exconfig_do))
if exconfig_do:
for optval in exconfig_do:
self.set('config.extraConfig["%s"]' % optval.key, optval)
self.set('runtime.host', kwargs.get("runtime_host", None))
self.device = kwargs.get("virtual_device", [])
# Sample of diagnostics data is below.
config = [
('template', False),
('vmPathName', 'fake_path'),
('memorySizeMB', 512),
('cpuReservation', 0),
('memoryReservation', 0),
('numCpu', 1),
('numEthernetCards', 1),
('numVirtualDisks', 1)]
self.set("summary.config", config)
quickStats = [
('overallCpuUsage', 0),
('overallCpuDemand', 0),
('guestMemoryUsage', 0),
('hostMemoryUsage', 141),
('balloonedMemory', 0),
('consumedOverheadMemory', 20)]
self.set("summary.quickStats", quickStats)
key1 = {'key': 'cpuid.AES'}
key2 = {'key': 'cpuid.AVX'}
runtime = [
('connectionState', 'connected'),
('powerState', 'poweredOn'),
('toolsInstallerMounted', False),
('suspendInterval', 0),
('memoryOverhead', 21417984),
('maxCpuUsage', 2000),
('featureRequirement', [key1, key2])]
self.set("summary.runtime", runtime)
def _update_extra_config(self, extra):
extra_config = self.get("config.extraConfig")
values = extra_config.OptionValue
for value in values:
if value.key == extra.key:
value.value = extra.value
return
kv = DataObject()
kv.key = extra.key
kv.value = extra.value
extra_config.OptionValue.append(kv)
self.set("config.extraConfig", extra_config)
extra_config = self.get("config.extraConfig")
def reconfig(self, factory, val):
"""Called to reconfigure the VM. Actually customizes the property
setting of the Virtual Machine object.
"""
if hasattr(val, 'name') and val.name:
self.set("name", val.name)
if hasattr(val, 'extraConfig'):
extraConfigs = _merge_extraconfig(
self.get("config.extraConfig").OptionValue,
val.extraConfig)
self.get("config.extraConfig").OptionValue = extraConfigs
if hasattr(val, 'instanceUuid') and val.instanceUuid is not None:
if val.instanceUuid == "":
val.instanceUuid = uuidutils.generate_uuid()
self.set("summary.config.instanceUuid", val.instanceUuid)
try:
if not hasattr(val, 'deviceChange'):
return
if hasattr(val, 'extraConfig'):
# there are 2 cases - new entry or update an existing one
for extra in val.extraConfig:
self._update_extra_config(extra)
if len(val.deviceChange) < 2:
return
# Case of Reconfig of VM to attach disk
controller_key = val.deviceChange[0].device.controllerKey
filename = val.deviceChange[0].device.backing.fileName
disk = VirtualDisk()
disk.controllerKey = controller_key
disk_backing = VirtualDiskFlatVer2BackingInfo()
disk_backing.fileName = filename
disk_backing.key = -101
disk.backing = disk_backing
disk.capacityInBytes = 1024
disk.capacityInKB = 1
controller = VirtualLsiLogicController()
controller.key = controller_key
devices = _create_array_of_type('VirtualDevice')
devices.VirtualDevice = [disk, controller, self.device[0]]
self.set("config.hardware.device", devices)
except AttributeError:
pass
class Network(ManagedObject):
"""Network class."""
def __init__(self):
super(Network, self).__init__("network")
self.set("summary.name", "vmnet0")
class ResourcePool(ManagedObject):
"""Resource Pool class."""
def __init__(self, name="test_ResPool", value="resgroup-test"):
super(ResourcePool, self).__init__("rp")
self.set("name", name)
summary = DataObject()
runtime = DataObject()
config = DataObject()
memory = DataObject()
cpu = DataObject()
memoryAllocation = DataObject()
cpuAllocation = DataObject()
vm_list = DataObject()
memory.maxUsage = 1000 * units.Mi
memory.overallUsage = 500 * units.Mi
cpu.maxUsage = 10000
cpu.overallUsage = 1000
runtime.cpu = cpu
runtime.memory = memory
summary.runtime = runtime
cpuAllocation.limit = 10000
memoryAllocation.limit = 1024
memoryAllocation.reservation = 1024
config.memoryAllocation = memoryAllocation
config.cpuAllocation = cpuAllocation
vm_list.ManagedObjectReference = []
self.set("summary", summary)
self.set("summary.runtime.memory", memory)
self.set("config", config)
self.set("vm", vm_list)
parent = ManagedObjectReference(value=value,
name=name)
owner = ManagedObjectReference(value=value,
name=name)
self.set("parent", parent)
self.set("owner", owner)
class DatastoreHostMount(DataObject):
def __init__(self, value='host-100'):
super(DatastoreHostMount, self).__init__()
host_ref = (_db_content["HostSystem"]
[_db_content["HostSystem"].keys()[0]].obj)
host_system = DataObject()
host_system.ManagedObjectReference = [host_ref]
host_system.value = value
self.key = host_system
class ClusterComputeResource(ManagedObject):
"""Cluster class."""
def __init__(self, name="test_cluster"):
super(ClusterComputeResource, self).__init__("domain")
self.set("name", name)
self.set("host", None)
self.set("datastore", None)
self.set("resourcePool", None)
summary = DataObject()
summary.numHosts = 0
summary.numCpuCores = 0
summary.numCpuThreads = 0
summary.numEffectiveHosts = 0
summary.totalMemory = 0
summary.effectiveMemory = 0
summary.effectiveCpu = 10000
self.set("summary", summary)
def _add_root_resource_pool(self, r_pool):
if r_pool:
self.set("resourcePool", r_pool)
def _add_host(self, host_sys):
if host_sys:
hosts = self.get("host")
if hosts is None:
hosts = DataObject()
hosts.ManagedObjectReference = []
self.set("host", hosts)
hosts.ManagedObjectReference.append(host_sys)
# Update summary every time a new host is added
self._update_summary()
def _add_datastore(self, datastore):
if datastore:
datastores = self.get("datastore")
if datastores is None:
datastores = DataObject()
datastores.ManagedObjectReference = []
self.set("datastore", datastores)
datastores.ManagedObjectReference.append(datastore)
# Method to update summary of a cluster upon host addition
def _update_summary(self):
summary = self.get("summary")
summary.numHosts = 0
summary.numCpuCores = 0
summary.numCpuThreads = 0
summary.numEffectiveHosts = 0
summary.totalMemory = 0
summary.effectiveMemory = 0
hosts = self.get("host")
# Compute the aggregate stats
summary.numHosts = len(hosts.ManagedObjectReference)
for host_ref in hosts.ManagedObjectReference:
host_sys = _get_object(host_ref)
connected = host_sys.get("connected")
host_summary = host_sys.get("summary")
summary.numCpuCores += host_summary.hardware.numCpuCores
summary.numCpuThreads += host_summary.hardware.numCpuThreads
summary.totalMemory += host_summary.hardware.memorySize
free_memory = (host_summary.hardware.memorySize / units.Mi
- host_summary.quickStats.overallMemoryUsage)
summary.effectiveMemory += free_memory if connected else 0
summary.numEffectiveHosts += 1 if connected else 0
self.set("summary", summary)
class Datastore(ManagedObject):
"""Datastore class."""
def __init__(self, name="fake-ds", capacity=1024, free=500,
accessible=True, maintenance_mode="normal"):
super(Datastore, self).__init__("ds")
self.set("summary.type", "VMFS")
self.set("summary.name", name)
self.set("summary.capacity", capacity * units.Gi)
self.set("summary.freeSpace", free * units.Gi)
self.set("summary.accessible", accessible)
self.set("summary.maintenanceMode", maintenance_mode)
self.set("browser", "")
class HostNetworkSystem(ManagedObject):
"""HostNetworkSystem class."""
def __init__(self, name="networkSystem"):
super(HostNetworkSystem, self).__init__("ns")
self.set("name", name)
pnic_do = DataObject()
pnic_do.device = "vmnic0"
net_info_pnic = DataObject()
net_info_pnic.PhysicalNic = [pnic_do]
self.set("networkInfo.pnic", net_info_pnic)
class HostStorageSystem(ManagedObject):
"""HostStorageSystem class."""
def __init__(self):
super(HostStorageSystem, self).__init__("storageSystem")
class HostSystem(ManagedObject):
"""Host System class."""
def __init__(self, name="ha-host", connected=True, ds_ref=None,
maintenance_mode=False):
super(HostSystem, self).__init__("host")
self.set("name", name)
if _db_content.get("HostNetworkSystem", None) is None:
create_host_network_system()
if not _get_object_refs('HostStorageSystem'):
create_host_storage_system()
host_net_key = _db_content["HostNetworkSystem"].keys()[0]
host_net_sys = _db_content["HostNetworkSystem"][host_net_key].obj
self.set("configManager.networkSystem", host_net_sys)
host_storage_sys_key = _get_object_refs('HostStorageSystem')[0]
self.set("configManager.storageSystem", host_storage_sys_key)
if not ds_ref:
ds_ref = create_datastore('local-host-%s' % name, 500, 500)
datastores = DataObject()
datastores.ManagedObjectReference = [ds_ref]
self.set("datastore", datastores)
summary = DataObject()
hardware = DataObject()
hardware.numCpuCores = 8
hardware.numCpuPkgs = 2
hardware.numCpuThreads = 16
hardware.vendor = "Intel"
hardware.cpuModel = "Intel(R) Xeon(R)"
hardware.uuid = "host-uuid"
hardware.memorySize = units.Gi
summary.hardware = hardware
runtime = DataObject()
if connected:
runtime.connectionState = "connected"
else:
runtime.connectionState = "disconnected"
runtime.inMaintenanceMode = maintenance_mode
summary.runtime = runtime
quickstats = DataObject()
quickstats.overallMemoryUsage = 500
summary.quickStats = quickstats
product = DataObject()
product.name = "VMware ESXi"
product.version = constants.MIN_VC_VERSION
config = DataObject()
config.product = product
summary.config = config
pnic_do = DataObject()
pnic_do.device = "vmnic0"
net_info_pnic = DataObject()
net_info_pnic.PhysicalNic = [pnic_do]
self.set("summary", summary)
self.set("capability.maxHostSupportedVcpus", 600)
self.set("summary.hardware", hardware)
self.set("summary.runtime", runtime)
self.set("config.network.pnic", net_info_pnic)
self.set("connected", connected)
if _db_content.get("Network", None) is None:
create_network()
net_ref = _db_content["Network"][_db_content["Network"].keys()[0]].obj
network_do = DataObject()
network_do.ManagedObjectReference = [net_ref]
self.set("network", network_do)
vswitch_do = DataObject()
vswitch_do.pnic = ["vmnic0"]
vswitch_do.name = "vSwitch0"
vswitch_do.portgroup = ["PortGroup-vmnet0"]
net_swicth = DataObject()
net_swicth.HostVirtualSwitch = [vswitch_do]
self.set("config.network.vswitch", net_swicth)
host_pg_do = DataObject()
host_pg_do.key = "PortGroup-vmnet0"
pg_spec = DataObject()
pg_spec.vlanId = 0
pg_spec.name = "vmnet0"
host_pg_do.spec = pg_spec
host_pg = DataObject()
host_pg.HostPortGroup = [host_pg_do]
self.set("config.network.portgroup", host_pg)
config = DataObject()
storageDevice = DataObject()
iscsi_hba = HostInternetScsiHba()
iscsi_hba.iScsiName = "iscsi-name"
host_bus_adapter_array = DataObject()
host_bus_adapter_array.HostHostBusAdapter = [iscsi_hba]
storageDevice.hostBusAdapter = host_bus_adapter_array
config.storageDevice = storageDevice
self.set("config.storageDevice.hostBusAdapter", host_bus_adapter_array)
# Set the same on the storage system managed object
host_storage_sys = _get_object(host_storage_sys_key)
host_storage_sys.set('storageDeviceInfo.hostBusAdapter',
host_bus_adapter_array)
def _add_iscsi_target(self, data):
default_lun = DataObject()
default_lun.scsiLun = 'key-vim.host.ScsiDisk-010'
default_lun.key = 'key-vim.host.ScsiDisk-010'
default_lun.deviceName = 'fake-device'
default_lun.uuid = 'fake-uuid'
scsi_lun_array = DataObject()
scsi_lun_array.ScsiLun = [default_lun]
self.set("config.storageDevice.scsiLun", scsi_lun_array)
transport = DataObject()
transport.address = [data['target_portal']]
transport.iScsiName = data['target_iqn']
default_target = DataObject()
default_target.lun = [default_lun]
default_target.transport = transport
iscsi_adapter = DataObject()
iscsi_adapter.adapter = 'key-vmhba33'
iscsi_adapter.transport = transport
iscsi_adapter.target = [default_target]
iscsi_topology = DataObject()
iscsi_topology.adapter = [iscsi_adapter]
self.set("config.storageDevice.scsiTopology", iscsi_topology)
def _add_port_group(self, spec):
"""Adds a port group to the host system object in the db."""
pg_name = spec.name
vswitch_name = spec.vswitchName
vlanid = spec.vlanId
vswitch_do = DataObject()
vswitch_do.pnic = ["vmnic0"]
vswitch_do.name = vswitch_name
vswitch_do.portgroup = ["PortGroup-%s" % pg_name]
vswitches = self.get("config.network.vswitch").HostVirtualSwitch
vswitches.append(vswitch_do)
host_pg_do = DataObject()
host_pg_do.key = "PortGroup-%s" % pg_name
pg_spec = DataObject()
pg_spec.vlanId = vlanid
pg_spec.name = pg_name
host_pg_do.spec = pg_spec
host_pgrps = self.get("config.network.portgroup").HostPortGroup
host_pgrps.append(host_pg_do)
class Datacenter(ManagedObject):
"""Datacenter class."""
def __init__(self, name="ha-datacenter", ds_ref=None):
super(Datacenter, self).__init__("dc")
self.set("name", name)
self.set("vmFolder", "vm_folder_ref")
if _db_content.get("Network", None) is None:
create_network()
net_ref = _db_content["Network"][_db_content["Network"].keys()[0]].obj
network_do = DataObject()
network_do.ManagedObjectReference = [net_ref]
self.set("network", network_do)
if ds_ref:
datastore = DataObject()
datastore.ManagedObjectReference = [ds_ref]
else:
datastore = None
self.set("datastore", datastore)
class Task(ManagedObject):
"""Task class."""
def __init__(self, task_name, state="running", result=None,
error_fault=None):
super(Task, self).__init__("Task")
info = DataObject()
info.name = task_name
info.state = state
if state == 'error':
error = DataObject()
error.localizedMessage = "Error message"
if not error_fault:
error.fault = DataObject()
else:
error.fault = error_fault
info.error = error
info.result = result
self.set("info", info)
def create_host_network_system():
host_net_system = HostNetworkSystem()
_create_object("HostNetworkSystem", host_net_system)
def create_host_storage_system():
host_storage_system = HostStorageSystem()
_create_object("HostStorageSystem", host_storage_system)
def create_host(ds_ref=None):
host_system = HostSystem(ds_ref=ds_ref)
_create_object('HostSystem', host_system)
def create_datacenter(name, ds_ref=None):
data_center = Datacenter(name, ds_ref)
_create_object('Datacenter', data_center)
def create_datastore(name, capacity, free):
data_store = Datastore(name, capacity, free)
_create_object('Datastore', data_store)
return data_store.obj
def create_res_pool():
res_pool = ResourcePool()
_create_object('ResourcePool', res_pool)
return res_pool.obj
def create_network():
network = Network()
_create_object('Network', network)
def create_cluster(name, ds_ref):
cluster = ClusterComputeResource(name=name)
cluster._add_host(_get_object_refs("HostSystem")[0])
cluster._add_host(_get_object_refs("HostSystem")[1])
cluster._add_datastore(ds_ref)
cluster._add_root_resource_pool(create_res_pool())
_create_object('ClusterComputeResource', cluster)
return cluster
def create_vm(uuid=None, name=None,
cpus=1, memory=128, devices=None,
vmPathName=None, extraConfig=None,
res_pool_ref=None, host_ref=None,
version=None):
if uuid is None:
uuid = uuidutils.generate_uuid()
if name is None:
name = uuid
if devices is None:
devices = []
if vmPathName is None:
vm_path = ds_obj.DatastorePath(_db_content['Datastore'].values()[0])
else:
vm_path = ds_obj.DatastorePath.parse(vmPathName)
if res_pool_ref is None:
res_pool_ref = _db_content['ResourcePool'].keys()[0]
if host_ref is None:
host_ref = _db_content["HostSystem"].keys()[0]
# Fill in the default path to the vmx file if we were only given a
# datastore. Note that if you create a VM with vmPathName '[foo]', when you
# retrieve vmPathName it will be '[foo] uuid/uuid.vmx'. Hence we use
# vm_path below for the stored value of vmPathName.
if vm_path.rel_path == '':
vm_path = vm_path.join(name, name + '.vmx')
for key, value in six.iteritems(_db_content["Datastore"]):
if value.get('summary.name') == vm_path.datastore:
ds = key
break
else:
ds = create_datastore(vm_path.datastore, 1024, 500)
vm_dict = {"name": name,
"ds": [ds],
"runtime_host": host_ref,
"powerstate": "poweredOff",
"vmPathName": str(vm_path),
"numCpu": cpus,
"mem": memory,
"extra_config": extraConfig,
"virtual_device": devices,
"instanceUuid": uuid,
"version": version}
vm = VirtualMachine(**vm_dict)
_create_object("VirtualMachine", vm)
res_pool = _get_object(res_pool_ref)
res_pool.vm.ManagedObjectReference.append(vm.obj)
return vm.obj
def create_task(task_name, state="running", result=None, error_fault=None):
task = Task(task_name, state, result, error_fault)
_create_object("Task", task)
return task
def _add_file(file_path):
"""Adds a file reference to the db."""
_db_content["files"].append(file_path)
def _remove_file(file_path):
"""Removes a file reference from the db."""
# Check if the remove is for a single file object or for a folder
if file_path.find(".vmdk") != -1:
if file_path not in _db_content.get("files"):
raise vexc.FileNotFoundException(file_path)
_db_content.get("files").remove(file_path)
else:
# Removes the files in the folder and the folder too from the db
to_delete = set()
for file in _db_content.get("files"):
if file.find(file_path) != -1:
to_delete.add(file)
for file in to_delete:
_db_content.get("files").remove(file)
def fake_plug_vifs(*args, **kwargs):
"""Fakes plugging vifs."""
pass
def fake_get_network(*args, **kwargs):
"""Fake get network."""
return {'type': 'fake'}
def assertPathExists(test, path):
test.assertIn(path, _db_content.get('files'))
def assertPathNotExists(test, path):
test.assertNotIn(path, _db_content.get('files'))
def get_file(file_path):
"""Check if file exists in the db."""
return file_path in _db_content.get("files")
def fake_upload_image(context, image, instance, **kwargs):
"""Fakes the upload of an image."""
pass
def fake_fetch_image(context, instance, host, port, dc_name, ds_name,
file_path, cookies=None):
"""Fakes the fetch of an image."""
ds_file_path = "[" + ds_name + "] " + file_path
_add_file(ds_file_path)
def _get_vm_mdo(vm_ref):
"""Gets the Virtual Machine with the ref from the db."""
if _db_content.get("VirtualMachine", None) is None:
raise exception.NotFound("There is no VM registered")
if vm_ref not in _db_content.get("VirtualMachine"):
raise exception.NotFound("Virtual Machine with ref %s is not "
"there" % vm_ref)
return _db_content.get("VirtualMachine")[vm_ref]
def _merge_extraconfig(existing, changes):
"""Imposes the changes in extraConfig over the existing extraConfig."""
existing = existing or []
if (changes):
for c in changes:
if len([x for x in existing if x.key == c.key]) > 0:
extraConf = [x for x in existing if x.key == c.key][0]
extraConf.value = c.value
else:
existing.append(c)
return existing
class FakeFactory(object):
"""Fake factory class for the suds client."""
def create(self, obj_name):
"""Creates a namespace object."""
return DataObject(obj_name)
class FakeService(DataObject):
"""Fake service class."""
def Logout(self, session_manager):
pass
def FindExtension(self, extension_manager, key):
return []
class FakeClient(DataObject):
"""Fake client class."""
def __init__(self):
"""Creates a namespace object."""
self.service = FakeService()
class FakeSession(object):
"""Fake Session Class."""
def __init__(self):
self.vim = FakeVim()
def _call_method(self, module, method, *args, **kwargs):
raise NotImplementedError()
def _wait_for_task(self, task_ref):
raise NotImplementedError()
class FakeObjectRetrievalSession(FakeSession):
"""A session for faking object retrieval tasks.
_call_method() returns a given set of objects
sequentially, regardless of the method called.
"""
def __init__(self, *ret):
super(FakeObjectRetrievalSession, self).__init__()
self.ret = ret
self.ind = 0
def _call_method(self, module, method, *args, **kwargs):
if (method == 'continue_retrieval' or
method == 'cancel_retrieval'):
return
# return fake objects in a circular manner
self.ind = (self.ind + 1) % len(self.ret)
return self.ret[self.ind - 1]
def get_fake_vim_object(vmware_api_session):
key = vmware_api_session.__repr__()
if key not in _vim_map:
_vim_map[key] = FakeVim()
return _vim_map[key]
class FakeVim(object):
"""Fake VIM Class."""
def __init__(self, protocol="https", host="localhost", trace=None):
"""Initializes the suds client object, sets the service content
contents and the cookies for the session.
"""
self._session = None
self.client = FakeClient()
self.client.factory = FakeFactory()
transport = DataObject()
transport.cookiejar = "Fake-CookieJar"
options = DataObject()
options.transport = transport
self.client.options = options
service_content = self.client.factory.create('ns0:ServiceContent')
service_content.propertyCollector = "PropCollector"
service_content.virtualDiskManager = "VirtualDiskManager"
service_content.fileManager = "FileManager"
service_content.rootFolder = "RootFolder"
service_content.sessionManager = "SessionManager"
service_content.extensionManager = "ExtensionManager"
service_content.searchIndex = "SearchIndex"
about_info = DataObject()
about_info.name = "VMware vCenter Server"
about_info.version = "5.1.0"
about_info.instanceUuid = _FAKE_VCENTER_UUID
service_content.about = about_info
self._service_content = service_content
@property
def service_content(self):
return self._service_content
def __repr__(self):
return "Fake VIM Object"
def __str__(self):
return "Fake VIM Object"
def _login(self):
"""Logs in and sets the session object in the db."""
self._session = uuidutils.generate_uuid()
session = DataObject()
session.key = self._session
session.userName = 'sessionUserName'
_db_content['session'][self._session] = session
return session
def _terminate_session(self, *args, **kwargs):
"""Terminates a session."""
s = kwargs.get("sessionId")[0]
if s not in _db_content['session']:
return
del _db_content['session'][s]
def _check_session(self):
"""Checks if the session is active."""
if (self._session is None or self._session not in
_db_content['session']):
LOG.debug("Session is faulty")
raise vexc.VimFaultException([vexc.NOT_AUTHENTICATED],
"Session Invalid")
def _session_is_active(self, *args, **kwargs):
try:
self._check_session()
return True
except Exception:
return False
def _create_vm(self, method, *args, **kwargs):
"""Creates and registers a VM object with the Host System."""
config_spec = kwargs.get("config")
if config_spec.guestId not in constants.VALID_OS_TYPES:
ex = vexc.VMwareDriverException('A specified parameter was '
'not correct.')
return create_task(method, "error", error_fault=ex).obj
pool = kwargs.get('pool')
version = getattr(config_spec, 'version', None)
devices = []
for device_change in config_spec.deviceChange:
if device_change.operation == 'add':
devices.append(device_change.device)
vm_ref = create_vm(config_spec.instanceUuid, config_spec.name,
config_spec.numCPUs, config_spec.memoryMB,
devices, config_spec.files.vmPathName,
config_spec.extraConfig, pool,
version=version)
task_mdo = create_task(method, "success", result=vm_ref)
return task_mdo.obj
def _reconfig_vm(self, method, *args, **kwargs):
"""Reconfigures a VM and sets the properties supplied."""
vm_ref = args[0]
vm_mdo = _get_vm_mdo(vm_ref)
vm_mdo.reconfig(self.client.factory, kwargs.get("spec"))
task_mdo = create_task(method, "success")
return task_mdo.obj
def _create_copy_disk(self, method, vmdk_file_path):
"""Creates/copies a vmdk file object in the datastore."""
# We need to add/create both .vmdk and .-flat.vmdk files
flat_vmdk_file_path = vmdk_file_path.replace(".vmdk", "-flat.vmdk")
_add_file(vmdk_file_path)
_add_file(flat_vmdk_file_path)
task_mdo = create_task(method, "success")
return task_mdo.obj
def _extend_disk(self, method, size):
"""Extend disk size when create a instance."""
task_mdo = create_task(method, "success")
return task_mdo.obj
def _snapshot_vm(self, method):
"""Snapshots a VM. Here we do nothing for faking sake."""
task_mdo = create_task(method, "success")
return task_mdo.obj
def _find_all_by_uuid(self, *args, **kwargs):
uuid = kwargs.get('uuid')
vm_refs = []
for vm_ref in _db_content.get("VirtualMachine"):
vm = _get_object(vm_ref)
vm_uuid = vm.get("summary.config.instanceUuid")
if vm_uuid == uuid:
vm_refs.append(vm_ref)
return vm_refs
def _delete_snapshot(self, method, *args, **kwargs):
"""Deletes a VM snapshot. Here we do nothing for faking sake."""
task_mdo = create_task(method, "success")
return task_mdo.obj
def _delete_file(self, method, *args, **kwargs):
"""Deletes a file from the datastore."""
_remove_file(kwargs.get("name"))
task_mdo = create_task(method, "success")
return task_mdo.obj
def _just_return(self):
"""Fakes a return."""
return
def _just_return_task(self, method):
"""Fakes a task return."""
task_mdo = create_task(method, "success")
return task_mdo.obj
def _clone_vm(self, method, *args, **kwargs):
"""Fakes a VM clone."""
"""Creates and registers a VM object with the Host System."""
source_vmref = args[0]
source_vm_mdo = _get_vm_mdo(source_vmref)
clone_spec = kwargs.get("spec")
vm_dict = {
"name": kwargs.get("name"),
"ds": source_vm_mdo.get("datastore"),
"runtime_host": source_vm_mdo.get("runtime.host"),
"powerstate": source_vm_mdo.get("runtime.powerState"),
"vmPathName": source_vm_mdo.get("config.files.vmPathName"),
"numCpu": source_vm_mdo.get("summary.config.numCpu"),
"mem": source_vm_mdo.get("summary.config.memorySizeMB"),
"extra_config": source_vm_mdo.get("config.extraConfig").OptionValue,
"virtual_device":
source_vm_mdo.get("config.hardware.device").VirtualDevice,
"instanceUuid": source_vm_mdo.get("summary.config.instanceUuid")}
if hasattr(clone_spec, 'config'):
# Impose the config changes specified in the config property
if (hasattr(clone_spec.config, 'instanceUuid') and
clone_spec.config.instanceUuid is not None):
vm_dict["instanceUuid"] = clone_spec.config.instanceUuid
if hasattr(clone_spec.config, 'extraConfig'):
extraConfigs = _merge_extraconfig(vm_dict["extra_config"],
clone_spec.config.extraConfig)
vm_dict["extra_config"] = extraConfigs
virtual_machine = VirtualMachine(**vm_dict)
_create_object("VirtualMachine", virtual_machine)
task_mdo = create_task(method, "success")
return task_mdo.obj
def _unregister_vm(self, method, *args, **kwargs):
"""Unregisters a VM from the Host System."""
vm_ref = args[0]
_get_vm_mdo(vm_ref)
del _db_content["VirtualMachine"][vm_ref]
task_mdo = create_task(method, "success")
return task_mdo.obj
def _search_ds(self, method, *args, **kwargs):
"""Searches the datastore for a file."""
# TODO(garyk): add support for spec parameter
ds_path = kwargs.get("datastorePath")
matched_files = set()
# Check if we are searching for a file or a directory
directory = False
dname = '%s/' % ds_path
for file in _db_content.get("files"):
if file == dname:
directory = True
break
# A directory search implies that we must return all
# subdirectories
if directory:
for file in _db_content.get("files"):
if file.find(ds_path) != -1:
if not file.endswith(ds_path):
path = file.replace(dname, '', 1).split('/')
if path:
matched_files.add(path[0])
if not matched_files:
matched_files.add('/')
else:
for file in _db_content.get("files"):
if file.find(ds_path) != -1:
matched_files.add(ds_path)
if matched_files:
result = DataObject()
result.path = ds_path
result.file = []
for file in matched_files:
matched = DataObject()
matched.path = file
matched.fileSize = 1024
result.file.append(matched)
task_mdo = create_task(method, "success", result=result)
else:
task_mdo = create_task(method, "error", error_fault=FileNotFound())
return task_mdo.obj
def _move_file(self, method, *args, **kwargs):
source = kwargs.get('sourceName')
destination = kwargs.get('destinationName')
new_files = []
if source != destination:
for file in _db_content.get("files"):
if source in file:
new_file = file.replace(source, destination)
new_files.append(new_file)
# if source is not a file then the children will also
# be deleted
_remove_file(source)
for file in new_files:
_add_file(file)
task_mdo = create_task(method, "success")
return task_mdo.obj
def _make_dir(self, method, *args, **kwargs):
"""Creates a directory in the datastore."""
ds_path = kwargs.get("name")
if get_file(ds_path):
raise vexc.FileAlreadyExistsException()
_db_content["files"].append('%s/' % ds_path)
def _set_power_state(self, method, vm_ref, pwr_state="poweredOn"):
"""Sets power state for the VM."""
if _db_content.get("VirtualMachine", None) is None:
raise exception.NotFound("No Virtual Machine has been "
"registered yet")
if vm_ref not in _db_content.get("VirtualMachine"):
raise exception.NotFound("Virtual Machine with ref %s is not "
"there" % vm_ref)
vm_mdo = _db_content.get("VirtualMachine").get(vm_ref)
vm_mdo.set("runtime.powerState", pwr_state)
task_mdo = create_task(method, "success")
return task_mdo.obj
def _retrieve_properties_continue(self, method, *args, **kwargs):
"""Continues the retrieve."""
return FakeRetrieveResult()
def _retrieve_properties_cancel(self, method, *args, **kwargs):
"""Cancels the retrieve."""
return None
def _retrieve_properties(self, method, *args, **kwargs):
"""Retrieves properties based on the type."""
spec_set = kwargs.get("specSet")[0]
spec_type = spec_set.propSet[0].type
properties = spec_set.propSet[0].pathSet
if not isinstance(properties, list):
properties = properties.split()
objs = spec_set.objectSet
lst_ret_objs = FakeRetrieveResult()
for obj in objs:
try:
obj_ref = obj.obj
if obj_ref == "RootFolder":
# This means that we are retrieving props for all managed
# data objects of the specified 'type' in the entire
# inventory. This gets invoked by vim_util.get_objects.
mdo_refs = _db_content[spec_type]
elif obj_ref.type != spec_type:
# This means that we are retrieving props for the managed
# data objects in the parent object's 'path' property.
# This gets invoked by vim_util.get_inner_objects
# eg. obj_ref = <ManagedObjectReference of a cluster>
# type = 'DataStore'
# path = 'datastore'
# the above will retrieve all datastores in the given
# cluster.
parent_mdo = _db_content[obj_ref.type][obj_ref]
path = obj.selectSet[0].path
mdo_refs = parent_mdo.get(path).ManagedObjectReference
else:
# This means that we are retrieving props of the given
# managed data object. This gets invoked by
# vim_util.get_properties_for_a_collection_of_objects.
mdo_refs = [obj_ref]
for mdo_ref in mdo_refs:
mdo = _db_content[spec_type][mdo_ref]
prop_list = []
for prop_name in properties:
prop = Prop(prop_name, mdo.get(prop_name))
prop_list.append(prop)
obj_content = ObjectContent(mdo.obj, prop_list)
lst_ret_objs.add_object(obj_content)
except Exception:
LOG.exception("_retrieve_properties error")
continue
return lst_ret_objs
def _add_port_group(self, method, *args, **kwargs):
"""Adds a port group to the host system."""
_host_sk = _db_content["HostSystem"].keys()[0]
host_mdo = _db_content["HostSystem"][_host_sk]
host_mdo._add_port_group(kwargs.get("portgrp"))
def _add_iscsi_send_tgt(self, method, *args, **kwargs):
"""Adds a iscsi send target to the hba."""
send_targets = kwargs.get('targets')
host_storage_sys = _get_objects('HostStorageSystem').objects[0]
iscsi_hba_array = host_storage_sys.get('storageDeviceInfo'
'.hostBusAdapter')
iscsi_hba = iscsi_hba_array.HostHostBusAdapter[0]
if hasattr(iscsi_hba, 'configuredSendTarget'):
iscsi_hba.configuredSendTarget.extend(send_targets)
else:
iscsi_hba.configuredSendTarget = send_targets
def __getattr__(self, attr_name):
if attr_name != "Login":
self._check_session()
if attr_name == "Login":
return lambda *args, **kwargs: self._login()
elif attr_name == "SessionIsActive":
return lambda *args, **kwargs: self._session_is_active(
*args, **kwargs)
elif attr_name == "TerminateSession":
return lambda *args, **kwargs: self._terminate_session(
*args, **kwargs)
elif attr_name == "CreateVM_Task":
return lambda *args, **kwargs: self._create_vm(attr_name,
*args, **kwargs)
elif attr_name == "ReconfigVM_Task":
return lambda *args, **kwargs: self._reconfig_vm(attr_name,
*args, **kwargs)
elif attr_name == "CreateVirtualDisk_Task":
return lambda *args, **kwargs: self._create_copy_disk(attr_name,
kwargs.get("name"))
elif attr_name == "DeleteDatastoreFile_Task":
return lambda *args, **kwargs: self._delete_file(attr_name,
*args, **kwargs)
elif attr_name == "PowerOnVM_Task":
return lambda *args, **kwargs: self._set_power_state(attr_name,
args[0], "poweredOn")
elif attr_name == "PowerOffVM_Task":
return lambda *args, **kwargs: self._set_power_state(attr_name,
args[0], "poweredOff")
elif attr_name == "RebootGuest":
return lambda *args, **kwargs: self._just_return()
elif attr_name == "ResetVM_Task":
return lambda *args, **kwargs: self._set_power_state(attr_name,
args[0], "poweredOn")
elif attr_name == "SuspendVM_Task":
return lambda *args, **kwargs: self._set_power_state(attr_name,
args[0], "suspended")
elif attr_name == "CreateSnapshot_Task":
return lambda *args, **kwargs: self._snapshot_vm(attr_name)
elif attr_name == "RemoveSnapshot_Task":
return lambda *args, **kwargs: self._delete_snapshot(attr_name,
*args, **kwargs)
elif attr_name == "CopyVirtualDisk_Task":
return lambda *args, **kwargs: self._create_copy_disk(attr_name,
kwargs.get("destName"))
elif attr_name == "ExtendVirtualDisk_Task":
return lambda *args, **kwargs: self._extend_disk(attr_name,
kwargs.get("size"))
elif attr_name == "Destroy_Task":
return lambda *args, **kwargs: self._unregister_vm(attr_name,
*args, **kwargs)
elif attr_name == "UnregisterVM":
return lambda *args, **kwargs: self._unregister_vm(attr_name,
*args, **kwargs)
elif attr_name == "CloneVM_Task":
return lambda *args, **kwargs: self._clone_vm(attr_name,
*args, **kwargs)
elif attr_name == "FindAllByUuid":
return lambda *args, **kwargs: self._find_all_by_uuid(attr_name,
*args, **kwargs)
elif attr_name == "SearchDatastore_Task":
return lambda *args, **kwargs: self._search_ds(attr_name,
*args, **kwargs)
elif attr_name == "MoveDatastoreFile_Task":
return lambda *args, **kwargs: self._move_file(attr_name,
*args, **kwargs)
elif attr_name == "MakeDirectory":
return lambda *args, **kwargs: self._make_dir(attr_name,
*args, **kwargs)
elif attr_name == "RetrievePropertiesEx":
return lambda *args, **kwargs: self._retrieve_properties(
attr_name, *args, **kwargs)
elif attr_name == "ContinueRetrievePropertiesEx":
return lambda *args, **kwargs: self._retrieve_properties_continue(
attr_name, *args, **kwargs)
elif attr_name == "CancelRetrievePropertiesEx":
return lambda *args, **kwargs: self._retrieve_properties_cancel(
attr_name, *args, **kwargs)
elif attr_name == "AddPortGroup":
return lambda *args, **kwargs: self._add_port_group(attr_name,
*args, **kwargs)
elif attr_name in ("RebootHost_Task",
"ShutdownHost_Task",
"PowerUpHostFromStandBy_Task",
"EnterMaintenanceMode_Task",
"ExitMaintenanceMode_Task",
"RescanHba"):
return lambda *args, **kwargs: self._just_return_task(attr_name)
elif attr_name == "AddInternetScsiSendTargets":
return lambda *args, **kwargs: self._add_iscsi_send_tgt(attr_name,
*args, **kwargs)
|
|
'''
Created on June 28, 2016
This file is subject to the terms and conditions defined in the
file 'LICENSE.txt', which is part of this source code package.
@author: David Moss
'''
import utilities
import intelligence.index
import importlib
# This is the maximum number of elements we'll average over for RSSI and LQI readings
MAXIMUM_AVERAGING_ELEMENTS = 25
# Maximum number of attempts for any one command
MAX_ATTEMPTS = 20
# Time between attempts, in seconds
TIME_BETWEEN_ATTEMPTS_SEC = 30
# Reliability variable name so we prevent typos
RELIABILITY_VARIABLE_NAME = "reliability"
# Total duration of time in which we should cache measurements here locally. Larger = slower to download, faster to execute.
TOTAL_DURATION_TO_CACHE_MEASUREMENTS_MS = utilities.ONE_HOUR_MS
# Take a battery reading every 4 hours
BATTERY_MEASUREMENT_PERIODICITY_MS = utilities.ONE_HOUR_MS * 6
# Minimum number of battery readings required to make a decision on the battery life - 3 days worth
MINIMUM_BATTERY_READINGS = 10
# Total number of battery readings to maintain
MAXIMUM_BATTERY_READINGS = MINIMUM_BATTERY_READINGS * 2
# Space type language-neutral constants
# Internal docs: https://presence.atlassian.net/wiki/spaces/BOTS/pages/656638178/Space+Constants+and+Definitions
SPACE_TYPE = {
"kitchen": 1,
"bedroom": 2,
"bathroom": 3,
"hallway": 4,
"livingroom": 5,
"diningroom": 6,
"familyroom": 7,
"laundryroom": 8,
"office": 9,
"stairs": 10,
"garage": 11,
"basement": 12
}
class Device:
"""This is a base class for each of our devices"""
# Low battery tag - Override in sub-classes to make it more specific
LOW_BATTERY_TAG = "lowbattery"
# Low signal strength tag
LOW_SIGNAL_STRENGTH_TAG = "weaksignal"
# Low battery threshold - Override in sub-classes
LOW_BATTERY_THRESHOLD = 10
# Low signal strength threshold - Override in sub-classes
LOW_RSSI_THRESHOLD = -80
# List of Device Types this class is compatible with - Specify in sub-classes
DEVICE_TYPES = []
def __init__(self, botengine, device_id, device_type, device_description, precache_measurements=True):
"""
Constructor
:param botengine: BotEngine environment
:param device_id: Device ID
:param device_type: Device Type
:param device_description: Device description (nickname)
:param precache_measurements: True (default) to download historical measurements to cache them locally, the length of time of which is defined by device.TOTAL_DURATION_TO_CACHE_MEASUREMENTS_MS
"""
# Device ID
self.device_id = device_id.encode('utf-8')
# Device type
self.device_type = int(device_type)
# Device description
self.description = device_description.encode('utf-8').strip()
# This is set by the controller object after init during synchronization with the location
self.location_object = None
# Measurements for each parameter, newest measurements at index 0
self.measurements = {}
# Spaces this device is associated with. For example:
# "spaces": [
# {
# "name": "Kitchen",
# "spaceId": 152,
# "spaceType": 1
# },
# {
# "name": "Hallway",
# "spaceId": 154,
# "spaceType": 4
# },
# {
# "name": "Living Room",
# "spaceId": 157,
# "spaceType": 5
# }
# ]
self.spaces = []
# Last parameters that we updated
self.last_updated_params = []
# Battery level
self.battery_level = 100
# List of battery measured battery levels over time
self.battery_levels = []
# Last battery update time in ms
self.last_battery_update_ms = 0
# True if we have a low battery
self.low_battery = False
# RSSI averaging elements
self._rssi_elements = []
# List of arbitrary tags this device has
self.tags = []
# True if this device is currently connected
self.is_connected = False
# True if we can control this device
self.can_control = False
# True if we can read from this device
self.can_read = False
# Remote IP address hash. Devices connected to the same external IP address will have the same hash.
self.remote_addr_hash = None
# The proxy ID is the device ID of the gateway this device connects through, if any.
self.proxy_id = None
# The goal (scenario) ID for this device
self.goal_id = None
# Approximate latitude (available on devices that directly connect to the cloud, like gateways)
self.latitude = None
# Approximate longitude (available on devices that directly connect to the cloud, like gateways)
self.longitude = None
# Born on timestamp
self.born_on = None
# True to enforce the default cache size. This can be reconfigured externally, followed by a call to garbage collect when needed to get rid of excess cache.
self.enforce_cache_size = precache_measurements
# Total communications odometer (includes measurements and RSSI updates / check-ins)
self.total_communications_odometer = 0
# Trip communications odometer - see how many communications we received in a shorter period of time, including RSSI check-ins
self.communications_odometer = 0
# Measurement odometer - how many actual new measurements did we receive
self.measurement_odometer = 0
# Timestamp of the last time we received a communication from this device
self.last_communications_timestamp = None
# Every device gets a dictionary of intelligence modules, and can populate these intelligence modules in each device model
self.intelligence_modules = {}
if precache_measurements:
# Download and start this object out with a history of measurements
self.cache_measurements(botengine, botengine.get_timestamp() - TOTAL_DURATION_TO_CACHE_MEASUREMENTS_MS, botengine.get_timestamp())
# initialize(...) gets called in controller.py, after this device is finished syncing with the system.
def initialize(self, botengine):
"""
Initialize this object
The correct behavior is to create the object, then initialize() it every time you want to use it in a new bot execution environment
"""
# Added January 27, 2019
if not hasattr(self, 'spaces'):
self.spaces = []
if str(self.device_type) in intelligence.index.MICROSERVICES['DEVICE_MICROSERVICES']:
# Synchronize microservice capabilities
if len(self.intelligence_modules) != len(intelligence.index.MICROSERVICES['DEVICE_MICROSERVICES'][str(self.device_type)]):
# Add more microservices
# Example of an element in the DEVICE_MICROSERVICES dictionary:
# 10014: [{"module": "intelligence.rules.device_entry_microservice", "class": "EntryRulesMicroservice"}],
for intelligence_info in intelligence.index.MICROSERVICES['DEVICE_MICROSERVICES'][str(self.device_type)]:
if intelligence_info['module'] not in self.intelligence_modules:
try:
intelligence_module = importlib.import_module(intelligence_info['module'])
class_ = getattr(intelligence_module, intelligence_info['class'])
botengine.get_logger().info("\tAdding device microservice: " + str(intelligence_info['module']))
intelligence_object = class_(botengine, self)
self.intelligence_modules[intelligence_info['module']] = intelligence_object
except Exception as e:
botengine.get_logger().error("\tCould not add device microservice: " + str(intelligence_info) + ": " + str(e))
import traceback
traceback.print_exc()
# Remove microservices that no longer exist
for module_name in self.intelligence_modules.keys():
found = False
for intelligence_info in intelligence.index.MICROSERVICES['DEVICE_MICROSERVICES'][str(self.device_type)]:
if intelligence_info['module'] == module_name:
found = True
break
if not found:
botengine.get_logger().info("\tDeleting device microservice: " + str(module_name))
del self.intelligence_modules[module_name]
for i in self.intelligence_modules:
self.intelligence_modules[i].parent = self
self.intelligence_modules[i].initialize(botengine)
elif len(self.intelligence_modules) > 0:
# There are no intelligence modules for this device type, and yet we have some intelligence modules locally. Delete everything.
botengine.get_logger().info("\tDeleting all device microservices")
self.intelligence_modules = {}
def get_device_type_name(self, language):
"""
:return: the name of this device type in the given language, for example, "Entry Sensor"
"""
# NOTE: Super abstract device type name
return _("Device")
def get_image_name(self, botengine=None):
"""
:return: the font icon name of this device type
"""
raise NotImplementedError
def is_goal_id(self, target_goal_id):
"""
This is the proper way to check for whether or not this device matches the given target goal ID,
because goal IDs can change by an order of 1000 for each different brand.
:param botengine: BotEngine environment
:return: True if the goal ID matches for this device
"""
if self.goal_id is not None:
return self.goal_id % 1000 == target_goal_id
return False
#===========================================================================
# Microservice notification distribution methods
#===========================================================================
def device_measurements_updated(self, botengine):
"""
Distribute notifications to all microservices that your measurements have been updated
:param botengine:
:return:
"""
for intelligence_id in self.intelligence_modules:
self.intelligence_modules[intelligence_id].device_measurements_updated(botengine, self)
def device_metadata_updated(self, botengine):
"""
Distribute notifications to all microservices that your metadata has been updated
:param botengine:
:return:
"""
for intelligence_id in self.intelligence_modules:
self.intelligence_modules[intelligence_id].device_metadata_updated(botengine, self)
def device_alert(self, botengine, alert_type, alert_params):
"""
Distribute notifications to all microservices that an alert has been generated from this device
:param botengine: BotEngine environment
:param alert_type: Type of alert
:param alert_params: Dictionary of alert parameters
"""
for intelligence_id in self.intelligence_modules:
self.intelligence_modules[intelligence_id].device_alert(botengine, self, alert_type, alert_params)
#===========================================================================
# Measurement synchronization and updates
#===========================================================================
def synchronize(self, botengine):
"""
Synchronize with the server
:param botengine: BotEngine environment
"""
self.cache_measurements(botengine, botengine.get_timestamp() - TOTAL_DURATION_TO_CACHE_MEASUREMENTS_MS, botengine.get_timestamp())
def cache_measurements(self, botengine, oldest_timestamp_ms, newest_timestamp_ms):
"""
Download and cache historical measurements locally
:param botengine: BotEngine environment
:param oldest_timestamp_ms: Oldest timestamp to download history from
:param newest_timestamp_ms: Newest timestamp to download history to
"""
try:
measurements = botengine.get_measurements(self.device_id, oldest_timestamp_ms=oldest_timestamp_ms, newest_timestamp_ms=newest_timestamp_ms)
except:
# This can happen because this bot may not have read permissions for this device.
botengine.get_logger().warning("Cannot synchronize measurements for device: " + str(self.description))
return
botengine.get_logger().info("Synchronizing measurements for device: " + str(self.description))
if 'measures' in measurements:
for measure in measurements['measures']:
if 'value' not in measure:
#botengine.get_logger().error("device.py: Measurement has no value: " + str(measure) + ";\n Measurement was: " + str(measure))
continue
value = utilities.normalize_measurement(measure['value'])
param_name = measure['name']
time = measure['time']
# If there's an index number, we just augment the parameter name with the index number to make it a unique parameter name. param_name.index
if 'index' in measure:
if measure['index'] is not None:
if measure['index'].lower() != "none":
param_name = "{}.{}".format(param_name, measure['index'])
if param_name in self.measurements:
if self.measurements[param_name][0][0] == value and self.measurements[param_name][0][1] == time:
# Already captured this measurement
continue
self.add_measurement(botengine, param_name, value, time)
def update(self, botengine):
"""
Attempt to parse the inputs to update this object
"""
self.last_updated_params = []
self.communicated(botengine.get_timestamp())
botengine.get_logger().info("Updating: " + self.description)
measures = botengine.get_measures_block()
if measures is not None:
for measure in measures:
if measure['deviceId'] == self.device_id:
param_name = measure['name']
if param_name == 'rssi':
if measure['updated']:
# Update the RSSI
rssi = int(measure['value'])
self.update_rssi(botengine, rssi)
self.last_updated_params.append('rssi')
else:
# RSSI didn't change
self.rssi_status_quo(botengine)
elif param_name == 'batteryLevel' and measure['updated']:
# Update the battery_level
self.battery_level = int(measure['value'])
self.last_updated_params.append('batteryLevel')
elif param_name not in self.measurements or measure['updated']:
if 'value' not in measure:
#botengine.get_logger().error("device.py: Measurement has no value: " + str(measure) + ";\n Measures block was: " + str(botengine.get_measures_block()))
continue
value = utilities.normalize_measurement(measure['value'])
# If there's an index number, we just augment the parameter name with the index number to make it a unique parameter name. param_name.index
if 'index' in measure:
if measure['index'] is not None:
if measure['index'].lower() != "none":
param_name = "{}.{}".format(param_name, measure['index'])
self.add_measurement(botengine, param_name, value, measure['time'])
self.last_updated_params.append(param_name)
# List of devices (this one and its proxy) that were updated, to later synchronize with the location outside of this object
updated_devices = []
updated_metadata = []
# Update all device intelligence modules
if len(self.last_updated_params) > 0:
updated_devices.append(self)
else:
# Metadata was updated
updated_metadata.append(self)
# Make sure our proxy (gateway) gets pinged - it implicitly updated here and needs to trigger microservices
if self.proxy_id is not None:
if self.proxy_id in self.location_object.devices:
d, m = self.location_object.devices[self.proxy_id].update(botengine)
updated_devices += d
updated_metadata += m
return (updated_devices, updated_metadata)
def file_uploaded(self, botengine, device_object, file_id, filesize_bytes, content_type, file_extension):
"""
A device file has been uploaded
:param botengine: BotEngine environment
:param device_object: Device object that uploaded the file
:param file_id: File ID to reference this file at the server
:param filesize_bytes: The file size in bytes
:param content_type: The content type, for example 'video/mp4'
:param file_extension: The file extension, for example 'mp4'
"""
for intelligence_id in self.intelligence_modules:
self.intelligence_modules[intelligence_id].file_uploaded(botengine, device_object, file_id, filesize_bytes, content_type, file_extension)
def add_measurement(self, botengine, name, value, timestamp):
"""
Update the device's status
:param botengine:
:param name:
:param value:
:param timestamp:
:return:
"""
#botengine.get_logger().info("{}: '{}': {}={}".format(self.device_id, self.description, name, value))
self.measurement_odometer += 1
if name not in self.measurements:
# Create the measurement
self.measurements[name] = []
self.measurements[name].insert(0, (value, timestamp))
# Auto garbage-collect
if self.enforce_cache_size and len(self.measurements[name]) > 1:
if self.measurements[name][-1][1] <= botengine.get_timestamp() - TOTAL_DURATION_TO_CACHE_MEASUREMENTS_MS:
del(self.measurements[name][-1])
def garbage_collect(self, botengine):
"""
Clean up the garbage
:param current_timestamp: Current timestamp in ms
"""
current_timestamp = botengine.get_timestamp()
for name in self.measurements:
i = 0
if len(self.measurements[name]) > 1:
for (value, timestamp) in self.measurements[name]:
i += 1
if timestamp <= current_timestamp - TOTAL_DURATION_TO_CACHE_MEASUREMENTS_MS:
del self.measurements[name][i:]
break
def communicated(self, timestamp):
"""
Call this function when the device communicates at all.
This lets us evaluate how often the device communicates, how many times per day, communications during test mode, etc.
"""
if self.last_communications_timestamp is not None:
#self.log("\t=> Last communication was " + str((timestamp - self.last_communications_timestamp) / 1000) + " seconds ago")
pass
self.last_communications_timestamp = timestamp
self.total_communications_odometer += 1
self.communications_odometer += 1
def reset_odometers(self):
"""
Reset all our odometers except the total_communications_odometer
For example, if we're entering TEST mode and want to keep track of communications
"""
self.communications_odometer = 0
self.measurement_odometer = 0
#===========================================================================
# Device health
#===========================================================================
def update_rssi(self, botengine, rssi):
"""
Update our RSSI readings
:param rssi
"""
self._rssi_elements.append(int(rssi))
if len(self._rssi_elements) > MAXIMUM_AVERAGING_ELEMENTS:
del self._rssi_elements[0]
rssi_average = int(sum(self._rssi_elements) / len(self._rssi_elements))
if rssi_average < self.LOW_RSSI_THRESHOLD:
# Should be tagged
if self.LOW_SIGNAL_STRENGTH_TAG not in self.tags:
# Wasn't tagged before, tag it.
botengine.tag_device(self.LOW_SIGNAL_STRENGTH_TAG, self.device_id)
self.tags.append(self.LOW_SIGNAL_STRENGTH_TAG)
else:
# Shouldn't be tagged
if self.LOW_SIGNAL_STRENGTH_TAG in self.tags:
# Was tagged, delete it.
botengine.delete_device_tag(self.LOW_SIGNAL_STRENGTH_TAG, self.device_id)
self.tags.remove(self.LOW_SIGNAL_STRENGTH_TAG)
def rssi_status_quo(self, botengine):
"""
RSSI reading didn't change from last time, duplicate the last reading
:param botengine:
:return:
"""
if len(self._rssi_elements) > 0:
self.update_rssi(botengine, self._rssi_elements[-1])
def raw_command(self, name, value):
"""
Send a command for the given local measurement name
"""
pass
def is_command(self, measurement_name):
"""
:param measurement_name: Name of a local measurement name
:return: True if the given parameter name is a command
"""
return False
def get_proxy_object(self, botengine=None):
"""
:return: Gateway / Proxy object this device connects through. None if it doesn't exist
"""
if self.proxy_id is not None:
if self.proxy_id in self.location_object.devices:
return self.location_object.devices[self.proxy_id]
return None
#===========================================================================
# Coordinates
#===========================================================================
def update_coordinates(self, botengine, latitude, longitude):
"""
Update the latitude and longitude
:param botengine: BotEngine environment
:param latitude: Latitude
:param longitude: Longitude
"""
if float(latitude) == self.latitude and float(longitude) == self.longitude:
return
self.latitude = float(latitude)
self.longitude = float(longitude)
# Notify my microservices
for intelligence_id in self.intelligence_modules:
self.intelligence_modules[intelligence_id].coordinates_updated(botengine, latitude, longitude)
# Notify all children microservices
for device_id in self.location_object.devices:
if self.location_object.devices[device_id].proxy_id == self.device_id:
for intelligence_id in self.location_object.devices[device_id].intelligence_modules:
self.location_object.devices[device_id].intelligence_modules[intelligence_id].coordinates_updated(botengine, latitude, longitude)
#===========================================================================
# Daylight
#===========================================================================
def is_daylight(self, botengine):
"""
REQUIRES THE 'daylight' MICROSERVICE.
This is a convenience method that requires you to attach the 'daylight' microservice.
It will then seek out this microservice for your device and ask it whether it's daylight outside or not.
:param botengine:
:return: True/False if daylight information can be accessed from the 'daylight' microservice; None if there is no information.
"""
proxy_object = self.get_proxy_object(botengine)
if proxy_object is not None:
for intelligence_id in proxy_object.intelligence_modules:
try:
daylight = proxy_object.intelligence_modules[intelligence_id].is_daylight(botengine)
if daylight:
botengine.get_logger().info("It is day time")
else:
botengine.get_logger().info("It is night time")
return daylight
except:
pass
return None
#===========================================================================
# Spaces
#===========================================================================
def is_in_space(self, botengine, space_description_or_type):
"""
Determine if this device is associated with the given space description.
The description must be a word inside our SPACE_TYPE dictionary.
:param botengine: BotEngine environment
:param space_description_or_type: Space type number or description from our SPACE_TYPE dictionary
:return: True if the device is in the given space
"""
space_type = None
if space_description_or_type.lower() in SPACE_TYPE:
space_type = SPACE_TYPE[space_description_or_type.lower()]
else:
try:
space_type = int(space_description_or_type)
except:
botengine.get_logger().error("device.is_in_space(): Couldn't identify what space type you're talking about - {}".format(space_description_or_type))
return False
for space in self.spaces:
if space['spaceType'] == space_type:
return True
return False
def is_in_spaces(self, botengine, space_descriptions_or_types_list):
"""
Determine if this device is associated with any of the given spaces in the list.
If the list contains descriptive strings, the strings must be words inside of our SPACE_TYPE dictionary.
:param botengine: BotEngine environment
:param space_descriptions_or_types_list: List of space type numbers, or list of strings from our SPACE_TYPE dictionary
:return: True if the device is in any of the given spaces
"""
space_types = []
for s in space_descriptions_or_types_list:
if s.lower() in SPACE_TYPE:
space_types.append(SPACE_TYPE[s.lower()])
else:
try:
space_type = int(s)
space_types.append(space_type)
except:
botengine.get_logger().error("device.is_in_spaces(): Couldn't identify what space type you're talking about - {}".format(s))
continue
comparison_types = []
for space in self.spaces:
comparison_types.append(space['spaceType'])
for t in space_types:
if t in comparison_types:
return True
return False
#===========================================================================
# Data request
#===========================================================================
def request_data(self, botengine, oldest_timestamp_ms=None, newest_timestamp_ms=None, param_name_list=None, reference=None, index=None, ordered=1):
"""
Selecting a large amount of data from the database can take a significant amount of time and impact server
performance. To avoid this long waiting period while executing bots, a bot can submit a request for all the
data it wants from this location asynchronously. The server gathers all the data on its own time, and then
triggers the bot with trigger 2048. Your bot must include trigger 2048 to receive the trigger.
Selected data becomes available as a file in CSV format, compressed by LZ4, and stored for one day.
The bot receives direct access to this file.
You can call this multiple times to extract data out of multiple devices. The request will be queued up and
the complete set of requests will be flushed at the end of this bot execution.
:param botengine:
:param oldest_timestamp_ms:
:param newest_timestamp_ms:
:param param_name_list:
:param reference:
:param index:
:param ordered:
:return:
"""
botengine.request_data(self.device_id,
oldest_timestamp_ms=oldest_timestamp_ms,
newest_timestamp_ms=newest_timestamp_ms,
param_name_list=param_name_list,
reference=reference,
index=index,
ordered=ordered)
#===========================================================================
# CSV methods for machine learning algorithm integrations
#===========================================================================
def get_csv(self, botengine, oldest_timestamp_ms=None, newest_timestamp_ms=None, params=[]):
"""
Get a .csv string of all the data
This is useful when you're using .csv data from a user's account outside of the bot microservices environment to construct machine learning algorithms,
and then want to drag-and-drop those same algorithms into a bot environment and watch it run the same way without having to transform data.
Mimics the type of .csv output you'd obtain with the following CLI commands:
botengine --download_device <device_id>
botengine --download_type <device_type>
:param botengine: BotEngine environment
:param oldest_timestamp_ms: oldest timestamp in milliseconds
:param newest_timestamp_ms: newest timestamp in milliseconds
:param params: List of parameters
:return: .csv string, largely matching the .csv data you would receive from the "botengine --download_device [device_id]" command line interface. Or None if this device doesn't have data.
"""
if len(self.measurements) == 0:
botengine.get_logger().info("{}: get_csv() - This device has no measurements")
return None
if params:
titles = sorted(params)
else:
titles = sorted(self.measurements.keys())
last_measurements = {}
for title in titles:
try:
last_measurements[title] = self.measurements[title][0][0]
except:
pass
# Check to see that all the parameters we're requesting have valid measurements in this device object
# Remember that an index number will modify the name of the parameter to make it unique, and we need to match against the unique name of each parameter
if not set(params).issubset(last_measurements.keys()):
botengine.get_logger().info("{}: get_csv() - Not all of the requested parameters exist for this device")
return None
output = "device_type,device_id,description,timestamp_ms,timestamp_iso,"
for t in titles:
output = "{}{},".format(output, t)
output += "\n"
try:
measurements = botengine.get_measurements(self.device_id, oldest_timestamp_ms=oldest_timestamp_ms, newest_timestamp_ms=newest_timestamp_ms, param_name=params)
except:
# This can happen because this bot may not have read permissions for this device.
botengine.get_logger().warning("Cannot synchronize measurements for device: " + str(self.description))
return None
processed_readings = {}
if 'measures' in measurements:
for measure in measurements['measures']:
if 'value' not in measure:
continue
value = utilities.normalize_measurement(measure['value'])
param_name = measure['name']
time = int(measure['time'])
# If there's an index number, we just augment the parameter name with the index number to make it a unique parameter name. param_name.index
if 'index' in measure:
if measure['index'] is not None:
if measure['index'].lower() != "none":
param_name = "{}.{}".format(param_name, measure['index'])
processed_readings[time] = (param_name, value)
measurements = None
import gc
gc.collect()
botengine.get_logger().info("{}: get_csv() - Processing {} measurements ...".format(self.description, str(len(processed_readings))))
for timestamp_ms in sorted(processed_readings.keys()):
dt = self.location_object.get_local_datetime_from_timestamp(botengine, timestamp_ms)
output += "{},{},{},{},{},".format(self.device_type, self.device_id.replace(",","_"), self.description.replace(",","_"), timestamp_ms, utilities.iso_format(dt))
for t in titles:
if t == processed_readings[timestamp_ms][0]:
output += "{},".format(processed_readings[timestamp_ms][1])
else:
output += "{},".format(last_measurements[t])
output += "\n"
return output
#===============================================================================
# These functions are outside the Device class above.
#===============================================================================
def send_command_reliably(botengine, device_id, param_name, param_value):
"""
Send a command reliably
:param botengine: BotEngine
:param device_id: Device ID to send the command to
:param param_name: Parameter name
:param param_value: Parameter value
"""
botengine.get_logger().info("{}: Send command reliably".format(device_id))
queue = botengine.load_variable(RELIABILITY_VARIABLE_NAME)
if queue is None:
queue = {}
if device_id not in queue:
queue[device_id] = {}
botengine.send_commands(device_id, [botengine.form_command(param_name, param_value)])
botengine.cancel_timers(device_id)
botengine.start_timer(TIME_BETWEEN_ATTEMPTS_SEC, _attempt_reliable_delivery, None, "reliability")
# queue[device_id] = {'param_name': ('param_value', attempts, timestamp)}
if param_name in queue[device_id]:
if queue[device_id][param_name][0] == param_value:
# No need to update the timestamp
return
queue[device_id][param_name] = (param_value, 0, botengine.get_timestamp())
botengine.save_variable(RELIABILITY_VARIABLE_NAME, queue)
def cancel_reliable_command(botengine, device_id, param_name):
"""
Stop trying to send a command reliably
:param botengine:
:param device_id: Device ID
:param param_name: Parameter name to cancel.
:return:
"""
queue = botengine.load_variable(RELIABILITY_VARIABLE_NAME)
if queue is None:
return
if device_id in queue:
if param_name in queue[device_id]:
del(queue[device_id][param_name])
if len(queue[device_id]) == 0:
del(queue[device_id])
botengine.save_variable(RELIABILITY_VARIABLE_NAME, queue)
def queued_commands_for_device(botengine, device_id):
"""
Get the queued commands for the current device in a dictionary of the form: { 'paramName': ('value', attempts, send_timestamp) , ... }
Basically if this response isn't empty, then there are commands in the queue that haven't been verified yet.
:return: Dictionary of commands in the queue, or a blank dictionary {} if there are no commands or the device isn't found
"""
queue = botengine.load_variable(RELIABILITY_VARIABLE_NAME)
if queue is not None:
if device_id in queue:
return queue[device_id]
return {}
def _attempt_reliable_delivery(botengine, args):
"""
Attempt reliable delivery of everything in our queue
This is executed by a timer.
"""
botengine.get_logger().info(">reliability")
queue = botengine.load_variable(RELIABILITY_VARIABLE_NAME)
if queue is None:
return
logger = botengine.get_logger()
logger.debug("RELIABILITY: Queue looks like " + str(queue))
import copy
for device_id in copy.copy(queue):
# Prune out all our successfully delivered commands, and commands that have timed out
params_to_remove = []
for param_name in queue[device_id]:
(param_value, attempts, timestamp) = queue[device_id][param_name]
if attempts < MAX_ATTEMPTS:
# Check to see if the last attempt went through
measures = None
try:
measures = botengine.get_measurements(device_id, param_name=param_name, oldest_timestamp_ms=timestamp)
except:
# No longer have access to the device
params_to_remove.append(param_name)
logger.debug("RELIABILITY: measurements since " + str(timestamp) + ": " + str(measures))
if measures is not None:
if 'measures' in measures:
for m in measures['measures']:
if m['name'] == param_name and m['value'] == param_value:
# Command had been delivered reliably
logger.debug("RELIABILITY: COMMAND HAS BEEN DELIVERED RELIABLY")
params_to_remove.append(param_name)
break
else:
# TODO log this error somewhere
logger.debug("RELIABILITY: MAXIMUM ATTEMPTS REACHED FOR DEVICE " + str(device_id) + "; PARAM_NAME=" + str(param_name) + "; PARAM_VALUE=" + str(param_value))
params_to_remove.append(param_name)
for param in params_to_remove:
if param in queue[device_id]:
del(queue[device_id][param])
if len(queue[device_id]) > 0:
botengine.cancel_timers("reliability")
botengine.start_timer(TIME_BETWEEN_ATTEMPTS_SEC, _attempt_reliable_delivery, None, "reliability")
for param_name in queue[device_id]:
# Increment our attempts
(param_value, attempts, timestamp) = queue[device_id][param_name]
attempts += 1
queue[device_id][param_name] = (param_value, attempts, timestamp)
logger.debug("RELIABILITY: Re-sending command to " + device_id + ": " + str(param_name) + " = " + str(param_value))
botengine.send_command(device_id, param_name, param_value)
else:
del(queue[device_id])
logger.debug("RELIABILITY: Cleaned queue looks like " + str(queue))
botengine.save_variable(RELIABILITY_VARIABLE_NAME, queue)
|
|
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Fetches, embeds, and displays lyrics.
"""
from __future__ import absolute_import, division, print_function
import difflib
import itertools
import json
import struct
import re
import requests
import unicodedata
import warnings
import six
from six.moves import urllib
try:
from bs4 import SoupStrainer, BeautifulSoup
HAS_BEAUTIFUL_SOUP = True
except ImportError:
HAS_BEAUTIFUL_SOUP = False
try:
import langdetect
HAS_LANGDETECT = True
except ImportError:
HAS_LANGDETECT = False
try:
# PY3: HTMLParseError was removed in 3.5 as strict mode
# was deprecated in 3.3.
# https://docs.python.org/3.3/library/html.parser.html
from six.moves.html_parser import HTMLParseError
except ImportError:
class HTMLParseError(Exception):
pass
from beets import plugins
from beets import ui
import beets
DIV_RE = re.compile(r'<(/?)div>?', re.I)
COMMENT_RE = re.compile(r'<!--.*-->', re.S)
TAG_RE = re.compile(r'<[^>]*>')
BREAK_RE = re.compile(r'\n?\s*<br([\s|/][^>]*)*>\s*\n?', re.I)
URL_CHARACTERS = {
u'\u2018': u"'",
u'\u2019': u"'",
u'\u201c': u'"',
u'\u201d': u'"',
u'\u2010': u'-',
u'\u2011': u'-',
u'\u2012': u'-',
u'\u2013': u'-',
u'\u2014': u'-',
u'\u2015': u'-',
u'\u2016': u'-',
u'\u2026': u'...',
}
USER_AGENT = 'beets/{}'.format(beets.__version__)
# Utilities.
def unichar(i):
try:
return six.unichr(i)
except ValueError:
return struct.pack('i', i).decode('utf-32')
def unescape(text):
"""Resolve &#xxx; HTML entities (and some others)."""
if isinstance(text, bytes):
text = text.decode('utf-8', 'ignore')
out = text.replace(u' ', u' ')
def replchar(m):
num = m.group(1)
return unichar(int(num))
out = re.sub(u"&#(\d+);", replchar, out)
return out
def extract_text_between(html, start_marker, end_marker):
try:
_, html = html.split(start_marker, 1)
html, _ = html.split(end_marker, 1)
except ValueError:
return u''
return html
def extract_text_in(html, starttag):
"""Extract the text from a <DIV> tag in the HTML starting with
``starttag``. Returns None if parsing fails.
"""
# Strip off the leading text before opening tag.
try:
_, html = html.split(starttag, 1)
except ValueError:
return
# Walk through balanced DIV tags.
level = 0
parts = []
pos = 0
for match in DIV_RE.finditer(html):
if match.group(1): # Closing tag.
level -= 1
if level == 0:
pos = match.end()
else: # Opening tag.
if level == 0:
parts.append(html[pos:match.start()])
level += 1
if level == -1:
parts.append(html[pos:match.start()])
break
else:
print(u'no closing tag found!')
return
return u''.join(parts)
def search_pairs(item):
"""Yield a pairs of artists and titles to search for.
The first item in the pair is the name of the artist, the second
item is a list of song names.
In addition to the artist and title obtained from the `item` the
method tries to strip extra information like paranthesized suffixes
and featured artists from the strings and add them as candidates.
The method also tries to split multiple titles separated with `/`.
"""
def generate_alternatives(string, patterns):
"""Generate string alternatives by extracting first matching group for
each given pattern.
"""
alternatives = [string]
for pattern in patterns:
match = re.search(pattern, string, re.IGNORECASE)
if match:
alternatives.append(match.group(1))
return alternatives
title, artist = item.title, item.artist
patterns = [
# Remove any featuring artists from the artists name
r"(.*?) {0}".format(plugins.feat_tokens())]
artists = generate_alternatives(artist, patterns)
patterns = [
# Remove a parenthesized suffix from a title string. Common
# examples include (live), (remix), and (acoustic).
r"(.+?)\s+[(].*[)]$",
# Remove any featuring artists from the title
r"(.*?) {0}".format(plugins.feat_tokens(for_artist=False)),
# Remove part of title after colon ':' for songs with subtitles
r"(.+?)\s*:.*"]
titles = generate_alternatives(title, patterns)
# Check for a dual song (e.g. Pink Floyd - Speak to Me / Breathe)
# and each of them.
multi_titles = []
for title in titles:
multi_titles.append([title])
if '/' in title:
multi_titles.append([x.strip() for x in title.split('/')])
return itertools.product(artists, multi_titles)
class Backend(object):
def __init__(self, config, log):
self._log = log
@staticmethod
def _encode(s):
"""Encode the string for inclusion in a URL"""
if isinstance(s, six.text_type):
for char, repl in URL_CHARACTERS.items():
s = s.replace(char, repl)
s = s.encode('utf-8', 'ignore')
return urllib.parse.quote(s)
def build_url(self, artist, title):
return self.URL_PATTERN % (self._encode(artist.title()),
self._encode(title.title()))
def fetch_url(self, url):
"""Retrieve the content at a given URL, or return None if the source
is unreachable.
"""
try:
# Disable the InsecureRequestWarning that comes from using
# `verify=false`.
# https://github.com/kennethreitz/requests/issues/2214
# We're not overly worried about the NSA MITMing our lyrics scraper
with warnings.catch_warnings():
warnings.simplefilter('ignore')
r = requests.get(url, verify=False, headers={
'User-Agent': USER_AGENT,
})
except requests.RequestException as exc:
self._log.debug(u'lyrics request failed: {0}', exc)
return
if r.status_code == requests.codes.ok:
return r.text
else:
self._log.debug(u'failed to fetch: {0} ({1})', url, r.status_code)
def fetch(self, artist, title):
raise NotImplementedError()
class SymbolsReplaced(Backend):
REPLACEMENTS = {
r'\s+': '_',
'<': 'Less_Than',
'>': 'Greater_Than',
'#': 'Number_',
r'[\[\{]': '(',
r'[\]\}]': ')',
}
@classmethod
def _encode(cls, s):
for old, new in cls.REPLACEMENTS.items():
s = re.sub(old, new, s)
return super(SymbolsReplaced, cls)._encode(s)
class MusiXmatch(SymbolsReplaced):
REPLACEMENTS = dict(SymbolsReplaced.REPLACEMENTS, **{
r'\s+': '-'
})
URL_PATTERN = 'https://www.musixmatch.com/lyrics/%s/%s'
def fetch(self, artist, title):
url = self.build_url(artist, title)
html = self.fetch_url(url)
if not html:
return
html_part = html.split('<p class="mxm-lyrics__content')[-1]
lyrics = extract_text_between(html_part, '>', '</p>')
return lyrics.strip(',"').replace('\\n', '\n')
class Genius(Backend):
"""Fetch lyrics from Genius via genius-api."""
def __init__(self, config, log):
super(Genius, self).__init__(config, log)
self.api_key = config['genius_api_key'].as_str()
self.headers = {
'Authorization': "Bearer %s" % self.api_key,
'User-Agent': USER_AGENT,
}
def search_genius(self, artist, title):
query = u"%s %s" % (artist, title)
url = u'https://api.genius.com/search?q=%s' \
% (urllib.parse.quote(query.encode('utf-8')))
self._log.debug(u'genius: requesting search {}', url)
try:
req = requests.get(
url,
headers=self.headers,
allow_redirects=True
)
req.raise_for_status()
except requests.RequestException as exc:
self._log.debug(u'genius: request error: {}', exc)
return None
try:
return req.json()
except ValueError:
self._log.debug(u'genius: invalid response: {}', req.text)
return None
def get_lyrics(self, link):
url = u'http://genius-api.com/api/lyricsInfo'
self._log.debug(u'genius: requesting lyrics for link {}', link)
try:
req = requests.post(
url,
data={'link': link},
headers=self.headers,
allow_redirects=True
)
req.raise_for_status()
except requests.RequestException as exc:
self._log.debug(u'genius: request error: {}', exc)
return None
try:
return req.json()
except ValueError:
self._log.debug(u'genius: invalid response: {}', req.text)
return None
def build_lyric_string(self, lyrics):
if 'lyrics' not in lyrics:
return
sections = lyrics['lyrics']['sections']
lyrics_list = []
for section in sections:
lyrics_list.append(section['name'])
lyrics_list.append('\n')
for verse in section['verses']:
if 'content' in verse:
lyrics_list.append(verse['content'])
return ''.join(lyrics_list)
def fetch(self, artist, title):
search_data = self.search_genius(artist, title)
if not search_data:
return
if not search_data['meta']['status'] == 200:
return
else:
records = search_data['response']['hits']
if not records:
return
record_url = records[0]['result']['url']
lyric_data = self.get_lyrics(record_url)
if not lyric_data:
return
lyrics = self.build_lyric_string(lyric_data)
return lyrics
class LyricsWiki(SymbolsReplaced):
"""Fetch lyrics from LyricsWiki."""
URL_PATTERN = 'http://lyrics.wikia.com/%s:%s'
def fetch(self, artist, title):
url = self.build_url(artist, title)
html = self.fetch_url(url)
if not html:
return
# Get the HTML fragment inside the appropriate HTML element and then
# extract the text from it.
html_frag = extract_text_in(html, u"<div class='lyricbox'>")
if html_frag:
lyrics = _scrape_strip_cruft(html_frag, True)
if lyrics and 'Unfortunately, we are not licensed' not in lyrics:
return lyrics
def remove_credits(text):
"""Remove first/last line of text if it contains the word 'lyrics'
eg 'Lyrics by songsdatabase.com'
"""
textlines = text.split('\n')
credits = None
for i in (0, -1):
if textlines and 'lyrics' in textlines[i].lower():
credits = textlines.pop(i)
if credits:
text = '\n'.join(textlines)
return text
def _scrape_strip_cruft(html, plain_text_out=False):
"""Clean up HTML
"""
html = unescape(html)
html = html.replace('\r', '\n') # Normalize EOL.
html = re.sub(r' +', ' ', html) # Whitespaces collapse.
html = BREAK_RE.sub('\n', html) # <br> eats up surrounding '\n'.
html = re.sub(r'<(script).*?</\1>(?s)', '', html) # Strip script tags.
if plain_text_out: # Strip remaining HTML tags
html = COMMENT_RE.sub('', html)
html = TAG_RE.sub('', html)
html = '\n'.join([x.strip() for x in html.strip().split('\n')])
html = re.sub(r'\n{3,}', r'\n\n', html)
return html
def _scrape_merge_paragraphs(html):
html = re.sub(r'</p>\s*<p(\s*[^>]*)>', '\n', html)
return re.sub(r'<div .*>\s*</div>', '\n', html)
def scrape_lyrics_from_html(html):
"""Scrape lyrics from a URL. If no lyrics can be found, return None
instead.
"""
if not HAS_BEAUTIFUL_SOUP:
return None
if not html:
return None
def is_text_notcode(text):
length = len(text)
return (length > 20 and
text.count(' ') > length / 25 and
(text.find('{') == -1 or text.find(';') == -1))
html = _scrape_strip_cruft(html)
html = _scrape_merge_paragraphs(html)
# extract all long text blocks that are not code
try:
soup = BeautifulSoup(html, "html.parser",
parse_only=SoupStrainer(text=is_text_notcode))
except HTMLParseError:
return None
# Get the longest text element (if any).
strings = sorted(soup.stripped_strings, key=len, reverse=True)
if strings:
return strings[0]
else:
return None
class Google(Backend):
"""Fetch lyrics from Google search results."""
def __init__(self, config, log):
super(Google, self).__init__(config, log)
self.api_key = config['google_API_key'].as_str()
self.engine_id = config['google_engine_ID'].as_str()
def is_lyrics(self, text, artist=None):
"""Determine whether the text seems to be valid lyrics.
"""
if not text:
return False
bad_triggers_occ = []
nb_lines = text.count('\n')
if nb_lines <= 1:
self._log.debug(u"Ignoring too short lyrics '{0}'", text)
return False
elif nb_lines < 5:
bad_triggers_occ.append('too_short')
else:
# Lyrics look legit, remove credits to avoid being penalized
# further down
text = remove_credits(text)
bad_triggers = ['lyrics', 'copyright', 'property', 'links']
if artist:
bad_triggers_occ += [artist]
for item in bad_triggers:
bad_triggers_occ += [item] * len(re.findall(r'\W%s\W' % item,
text, re.I))
if bad_triggers_occ:
self._log.debug(u'Bad triggers detected: {0}', bad_triggers_occ)
return len(bad_triggers_occ) < 2
def slugify(self, text):
"""Normalize a string and remove non-alphanumeric characters.
"""
text = re.sub(r"[-'_\s]", '_', text)
text = re.sub(r"_+", '_', text).strip('_')
pat = "([^,\(]*)\((.*?)\)" # Remove content within parentheses
text = re.sub(pat, '\g<1>', text).strip()
try:
text = unicodedata.normalize('NFKD', text).encode('ascii',
'ignore')
text = six.text_type(re.sub('[-\s]+', ' ', text.decode('utf-8')))
except UnicodeDecodeError:
self._log.exception(u"Failing to normalize '{0}'", text)
return text
BY_TRANS = ['by', 'par', 'de', 'von']
LYRICS_TRANS = ['lyrics', 'paroles', 'letras', 'liedtexte']
def is_page_candidate(self, url_link, url_title, title, artist):
"""Return True if the URL title makes it a good candidate to be a
page that contains lyrics of title by artist.
"""
title = self.slugify(title.lower())
artist = self.slugify(artist.lower())
sitename = re.search(u"//([^/]+)/.*",
self.slugify(url_link.lower())).group(1)
url_title = self.slugify(url_title.lower())
# Check if URL title contains song title (exact match)
if url_title.find(title) != -1:
return True
# or try extracting song title from URL title and check if
# they are close enough
tokens = [by + '_' + artist for by in self.BY_TRANS] + \
[artist, sitename, sitename.replace('www.', '')] + \
self.LYRICS_TRANS
tokens = [re.escape(t) for t in tokens]
song_title = re.sub(u'(%s)' % u'|'.join(tokens), u'', url_title)
song_title = song_title.strip('_|')
typo_ratio = .9
ratio = difflib.SequenceMatcher(None, song_title, title).ratio()
return ratio >= typo_ratio
def fetch(self, artist, title):
query = u"%s %s" % (artist, title)
url = u'https://www.googleapis.com/customsearch/v1?key=%s&cx=%s&q=%s' \
% (self.api_key, self.engine_id,
urllib.parse.quote(query.encode('utf-8')))
data = self.fetch_url(url)
if not data:
self._log.debug(u'google backend returned no data')
return None
try:
data = json.loads(data)
except ValueError as exc:
self._log.debug(u'google backend returned malformed JSON: {}', exc)
if 'error' in data:
reason = data['error']['errors'][0]['reason']
self._log.debug(u'google backend error: {0}', reason)
return None
if 'items' in data.keys():
for item in data['items']:
url_link = item['link']
url_title = item.get('title', u'')
if not self.is_page_candidate(url_link, url_title,
title, artist):
continue
html = self.fetch_url(url_link)
lyrics = scrape_lyrics_from_html(html)
if not lyrics:
continue
if self.is_lyrics(lyrics, artist):
self._log.debug(u'got lyrics from {0}',
item['displayLink'])
return lyrics
class LyricsPlugin(plugins.BeetsPlugin):
SOURCES = ['google', 'lyricwiki', 'musixmatch']
SOURCE_BACKENDS = {
'google': Google,
'lyricwiki': LyricsWiki,
'musixmatch': MusiXmatch,
'genius': Genius,
}
def __init__(self):
super(LyricsPlugin, self).__init__()
self.import_stages = [self.imported]
self.config.add({
'auto': True,
'bing_client_secret': None,
'bing_lang_from': [],
'bing_lang_to': None,
'google_API_key': None,
'google_engine_ID': u'009217259823014548361:lndtuqkycfu',
'genius_api_key':
"Ryq93pUGm8bM6eUWwD_M3NOFFDAtp2yEE7W"
"76V-uFL5jks5dNvcGCdarqFjDhP9c",
'fallback': None,
'force': False,
'sources': self.SOURCES,
})
self.config['bing_client_secret'].redact = True
self.config['google_API_key'].redact = True
self.config['google_engine_ID'].redact = True
self.config['genius_api_key'].redact = True
available_sources = list(self.SOURCES)
sources = plugins.sanitize_choices(
self.config['sources'].as_str_seq(), available_sources)
if 'google' in sources:
if not self.config['google_API_key'].get():
# We log a *debug* message here because the default
# configuration includes `google`. This way, the source
# is silent by default but can be enabled just by
# setting an API key.
self._log.debug(u'Disabling google source: '
u'no API key configured.')
sources.remove('google')
elif not HAS_BEAUTIFUL_SOUP:
self._log.warning(u'To use the google lyrics source, you must '
u'install the beautifulsoup4 module. See '
u'the documentation for further details.')
sources.remove('google')
self.config['bing_lang_from'] = [
x.lower() for x in self.config['bing_lang_from'].as_str_seq()]
self.bing_auth_token = None
if not HAS_LANGDETECT and self.config['bing_client_secret'].get():
self._log.warning(u'To use bing translations, you need to '
u'install the langdetect module. See the '
u'documentation for further details.')
self.backends = [self.SOURCE_BACKENDS[source](self.config, self._log)
for source in sources]
def get_bing_access_token(self):
params = {
'client_id': 'beets',
'client_secret': self.config['bing_client_secret'],
'scope': "https://api.microsofttranslator.com",
'grant_type': 'client_credentials',
}
oauth_url = 'https://datamarket.accesscontrol.windows.net/v2/OAuth2-13'
oauth_token = json.loads(requests.post(
oauth_url,
data=urllib.parse.urlencode(params)).content)
if 'access_token' in oauth_token:
return "Bearer " + oauth_token['access_token']
else:
self._log.warning(u'Could not get Bing Translate API access token.'
u' Check your "bing_client_secret" password')
def commands(self):
cmd = ui.Subcommand('lyrics', help='fetch song lyrics')
cmd.parser.add_option(
u'-p', u'--print', dest='printlyr',
action='store_true', default=False,
help=u'print lyrics to console',
)
cmd.parser.add_option(
u'-f', u'--force', dest='force_refetch',
action='store_true', default=False,
help=u'always re-download lyrics',
)
def func(lib, opts, args):
# The "write to files" option corresponds to the
# import_write config value.
write = ui.should_write()
for item in lib.items(ui.decargs(args)):
self.fetch_item_lyrics(
lib, item, write,
opts.force_refetch or self.config['force'],
)
if opts.printlyr and item.lyrics:
ui.print_(item.lyrics)
cmd.func = func
return [cmd]
def imported(self, session, task):
"""Import hook for fetching lyrics automatically.
"""
if self.config['auto']:
for item in task.imported_items():
self.fetch_item_lyrics(session.lib, item,
False, self.config['force'])
def fetch_item_lyrics(self, lib, item, write, force):
"""Fetch and store lyrics for a single item. If ``write``, then the
lyrics will also be written to the file itself.
"""
# Skip if the item already has lyrics.
if not force and item.lyrics:
self._log.info(u'lyrics already present: {0}', item)
return
lyrics = None
for artist, titles in search_pairs(item):
lyrics = [self.get_lyrics(artist, title) for title in titles]
if any(lyrics):
break
lyrics = u"\n\n---\n\n".join([l for l in lyrics if l])
if lyrics:
self._log.info(u'fetched lyrics: {0}', item)
if HAS_LANGDETECT and self.config['bing_client_secret'].get():
lang_from = langdetect.detect(lyrics)
if self.config['bing_lang_to'].get() != lang_from and (
not self.config['bing_lang_from'] or (
lang_from in self.config[
'bing_lang_from'].as_str_seq())):
lyrics = self.append_translation(
lyrics, self.config['bing_lang_to'])
else:
self._log.info(u'lyrics not found: {0}', item)
fallback = self.config['fallback'].get()
if fallback:
lyrics = fallback
else:
return
item.lyrics = lyrics
if write:
item.try_write()
item.store()
def get_lyrics(self, artist, title):
"""Fetch lyrics, trying each source in turn. Return a string or
None if no lyrics were found.
"""
for backend in self.backends:
lyrics = backend.fetch(artist, title)
if lyrics:
self._log.debug(u'got lyrics from backend: {0}',
backend.__class__.__name__)
return _scrape_strip_cruft(lyrics, True)
def append_translation(self, text, to_lang):
import xml.etree.ElementTree as ET
if not self.bing_auth_token:
self.bing_auth_token = self.get_bing_access_token()
if self.bing_auth_token:
# Extract unique lines to limit API request size per song
text_lines = set(text.split('\n'))
url = ('https://api.microsofttranslator.com/v2/Http.svc/'
'Translate?text=%s&to=%s' % ('|'.join(text_lines), to_lang))
r = requests.get(url,
headers={"Authorization ": self.bing_auth_token})
if r.status_code != 200:
self._log.debug('translation API error {}: {}', r.status_code,
r.text)
if 'token has expired' in r.text:
self.bing_auth_token = None
return self.append_translation(text, to_lang)
return text
lines_translated = ET.fromstring(r.text.encode('utf-8')).text
# Use a translation mapping dict to build resulting lyrics
translations = dict(zip(text_lines, lines_translated.split('|')))
result = ''
for line in text.split('\n'):
result += '%s / %s\n' % (line, translations[line])
return result
|
|
# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import xml.sax
import urllib, base64
import time
import boto.utils
from boto.connection import AWSAuthConnection
from boto import handler
from boto.s3.bucket import Bucket
from boto.s3.key import Key
from boto.resultset import ResultSet
from boto.exception import BotoClientError
def check_lowercase_bucketname(n):
"""
Bucket names must not contain uppercase characters. We check for
this by appending a lowercase character and testing with islower().
Note this also covers cases like numeric bucket names with dashes.
>>> check_lowercase_bucketname("Aaaa")
Traceback (most recent call last):
...
BotoClientError: S3Error: Bucket names cannot contain upper-case
characters when using either the sub-domain or virtual hosting calling
format.
>>> check_lowercase_bucketname("1234-5678-9123")
True
>>> check_lowercase_bucketname("abcdefg1234")
True
"""
if not (n + 'a').islower():
raise BotoClientError("Bucket names cannot contain upper-case " \
"characters when using either the sub-domain or virtual " \
"hosting calling format.")
return True
def assert_case_insensitive(f):
def wrapper(*args, **kwargs):
if len(args) == 3 and check_lowercase_bucketname(args[2]):
pass
return f(*args, **kwargs)
return wrapper
class _CallingFormat(object):
def get_bucket_server(self, server, bucket):
return ''
def build_url_base(self, connection, protocol, server, bucket, key=''):
url_base = '%s://' % protocol
url_base += self.build_host(server, bucket)
url_base += connection.get_path(self.build_path_base(bucket, key))
return url_base
def build_host(self, server, bucket):
if bucket == '':
return server
else:
return self.get_bucket_server(server, bucket)
def build_auth_path(self, bucket, key=''):
key = boto.utils.get_utf8_value(key)
path = ''
if bucket != '':
path = '/' + bucket
return path + '/%s' % urllib.quote(key)
def build_path_base(self, bucket, key=''):
key = boto.utils.get_utf8_value(key)
return '/%s' % urllib.quote(key)
class SubdomainCallingFormat(_CallingFormat):
@assert_case_insensitive
def get_bucket_server(self, server, bucket):
return '%s.%s' % (bucket, server)
class VHostCallingFormat(_CallingFormat):
@assert_case_insensitive
def get_bucket_server(self, server, bucket):
return bucket
class OrdinaryCallingFormat(_CallingFormat):
def get_bucket_server(self, server, bucket):
return server
def build_path_base(self, bucket, key=''):
key = boto.utils.get_utf8_value(key)
path_base = '/'
if bucket:
path_base += "%s/" % bucket
return path_base + urllib.quote(key)
class ProtocolIndependentOrdinaryCallingFormat(OrdinaryCallingFormat):
def build_url_base(self, connection, protocol, server, bucket, key=''):
url_base = '//'
url_base += self.build_host(server, bucket)
url_base += connection.get_path(self.build_path_base(bucket, key))
return url_base
class Location:
DEFAULT = '' # US Classic Region
EU = 'EU'
USWest = 'us-west-1'
USWest2 = 'us-west-2'
SAEast = 'sa-east-1'
APNortheast = 'ap-northeast-1'
APSoutheast = 'ap-southeast-1'
class S3Connection(AWSAuthConnection):
DefaultHost = 's3.amazonaws.com'
QueryString = 'Signature=%s&Expires=%d&AWSAccessKeyId=%s'
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None,
host=DefaultHost, debug=0, https_connection_factory=None,
calling_format=SubdomainCallingFormat(), path='/',
provider='aws', bucket_class=Bucket, security_token=None,
suppress_consec_slashes=True, anon=False):
self.calling_format = calling_format
self.bucket_class = bucket_class
self.anon = anon
AWSAuthConnection.__init__(self, host,
aws_access_key_id, aws_secret_access_key,
is_secure, port, proxy, proxy_port, proxy_user, proxy_pass,
debug=debug, https_connection_factory=https_connection_factory,
path=path, provider=provider, security_token=security_token,
suppress_consec_slashes=suppress_consec_slashes)
def _required_auth_capability(self):
if self.anon:
return ['anon']
else:
return ['s3']
def __iter__(self):
for bucket in self.get_all_buckets():
yield bucket
def __contains__(self, bucket_name):
return not (self.lookup(bucket_name) is None)
def set_bucket_class(self, bucket_class):
"""
Set the Bucket class associated with this bucket. By default, this
would be the boto.s3.key.Bucket class but if you want to subclass that
for some reason this allows you to associate your new class.
:type bucket_class: class
:param bucket_class: A subclass of Bucket that can be more specific
"""
self.bucket_class = bucket_class
def build_post_policy(self, expiration_time, conditions):
"""
Taken from the AWS book Python examples and modified for use with boto
"""
assert type(expiration_time) == time.struct_time, \
'Policy document must include a valid expiration Time object'
# Convert conditions object mappings to condition statements
return '{"expiration": "%s",\n"conditions": [%s]}' % \
(time.strftime(boto.utils.ISO8601, expiration_time), ",".join(conditions))
def build_post_form_args(self, bucket_name, key, expires_in = 6000,
acl = None, success_action_redirect = None,
max_content_length = None,
http_method = "http", fields=None,
conditions=None):
"""
Taken from the AWS book Python examples and modified for use with boto
This only returns the arguments required for the post form, not the
actual form. This does not return the file input field which also
needs to be added
:type bucket_name: string
:param bucket_name: Bucket to submit to
:type key: string
:param key: Key name, optionally add ${filename} to the end to
attach the submitted filename
:type expires_in: integer
:param expires_in: Time (in seconds) before this expires, defaults
to 6000
:type acl: :class:`boto.s3.acl.ACL`
:param acl: ACL rule to use, if any
:type success_action_redirect: string
:param success_action_redirect: URL to redirect to on success
:type max_content_length: integer
:param max_content_length: Maximum size for this file
:type http_method: string
:param http_method: HTTP Method to use, "http" or "https"
:rtype: dict
:return: A dictionary containing field names/values as well as
a url to POST to
.. code-block:: python
{
"action": action_url_to_post_to,
"fields": [
{
"name": field_name,
"value": field_value
},
{
"name": field_name2,
"value": field_value2
}
]
}
"""
if fields == None:
fields = []
if conditions == None:
conditions = []
expiration = time.gmtime(int(time.time() + expires_in))
# Generate policy document
conditions.append('{"bucket": "%s"}' % bucket_name)
if key.endswith("${filename}"):
conditions.append('["starts-with", "$key", "%s"]' % key[:-len("${filename}")])
else:
conditions.append('{"key": "%s"}' % key)
if acl:
conditions.append('{"acl": "%s"}' % acl)
fields.append({ "name": "acl", "value": acl})
if success_action_redirect:
conditions.append('{"success_action_redirect": "%s"}' % success_action_redirect)
fields.append({ "name": "success_action_redirect", "value": success_action_redirect})
if max_content_length:
conditions.append('["content-length-range", 0, %i]' % max_content_length)
fields.append({"name":'content-length-range', "value": "0,%i" % max_content_length})
policy = self.build_post_policy(expiration, conditions)
# Add the base64-encoded policy document as the 'policy' field
policy_b64 = base64.b64encode(policy)
fields.append({"name": "policy", "value": policy_b64})
# Add the AWS access key as the 'AWSAccessKeyId' field
fields.append({"name": "AWSAccessKeyId",
"value": self.aws_access_key_id})
# Add signature for encoded policy document as the 'AWSAccessKeyId' field
signature = self._auth_handler.sign_string(policy_b64)
fields.append({"name": "signature", "value": signature})
fields.append({"name": "key", "value": key})
# HTTPS protocol will be used if the secure HTTP option is enabled.
url = '%s://%s/' % (http_method,
self.calling_format.build_host(self.server_name(),
bucket_name))
return {"action": url, "fields": fields}
def generate_url(self, expires_in, method, bucket='', key='', headers=None,
query_auth=True, force_http=False, response_headers=None,
expires_in_absolute=False):
if not headers:
headers = {}
if expires_in_absolute:
expires = int(expires_in)
else:
expires = int(time.time() + expires_in)
auth_path = self.calling_format.build_auth_path(bucket, key)
auth_path = self.get_path(auth_path)
# Arguments to override response headers become part of the canonical
# string to be signed.
if response_headers:
response_hdrs = ["%s=%s" % (k, v) for k, v in
response_headers.items()]
delimiter = '?' if '?' not in auth_path else '&'
auth_path = "%s%s" % (auth_path, delimiter)
auth_path += '&'.join(response_hdrs)
else:
response_headers = {}
c_string = boto.utils.canonical_string(method, auth_path, headers,
expires, self.provider)
b64_hmac = self._auth_handler.sign_string(c_string)
encoded_canonical = urllib.quote_plus(b64_hmac)
self.calling_format.build_path_base(bucket, key)
if query_auth:
query_part = '?' + self.QueryString % (encoded_canonical, expires,
self.aws_access_key_id)
# The response headers must also be GET parameters in the URL.
headers.update(response_headers)
hdrs = ['%s=%s'%(n, urllib.quote(v)) for n, v in headers.items()]
q_str = '&'.join(hdrs)
if q_str:
query_part += '&' + q_str
else:
query_part = ''
if force_http:
protocol = 'http'
port = 80
else:
protocol = self.protocol
port = self.port
return self.calling_format.build_url_base(self, protocol,
self.server_name(port),
bucket, key) + query_part
def get_all_buckets(self, headers=None):
response = self.make_request('GET', headers=headers)
body = response.read()
if response.status > 300:
raise self.provider.storage_response_error(
response.status, response.reason, body)
rs = ResultSet([('Bucket', self.bucket_class)])
h = handler.XmlHandler(rs, self)
xml.sax.parseString(body, h)
return rs
def get_canonical_user_id(self, headers=None):
"""
Convenience method that returns the "CanonicalUserID" of the
user who's credentials are associated with the connection.
The only way to get this value is to do a GET request on the
service which returns all buckets associated with the account.
As part of that response, the canonical userid is returned.
This method simply does all of that and then returns just the
user id.
:rtype: string
:return: A string containing the canonical user id.
"""
rs = self.get_all_buckets(headers=headers)
return rs.ID
def get_bucket(self, bucket_name, validate=True, headers=None):
bucket = self.bucket_class(self, bucket_name)
if validate:
bucket.get_all_keys(headers, maxkeys=0)
return bucket
def lookup(self, bucket_name, validate=True, headers=None):
try:
bucket = self.get_bucket(bucket_name, validate, headers=headers)
except:
bucket = None
return bucket
def create_bucket(self, bucket_name, headers=None,
location=Location.DEFAULT, policy=None):
"""
Creates a new located bucket. By default it's in the USA. You can pass
Location.EU to create an European bucket.
:type bucket_name: string
:param bucket_name: The name of the new bucket
:type headers: dict
:param headers: Additional headers to pass along with the request to AWS.
:type location: :class:`boto.s3.connection.Location`
:param location: The location of the new bucket
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
"""
check_lowercase_bucketname(bucket_name)
if policy:
if headers:
headers[self.provider.acl_header] = policy
else:
headers = {self.provider.acl_header : policy}
if location == Location.DEFAULT:
data = ''
else:
data = '<CreateBucketConstraint><LocationConstraint>' + \
location + '</LocationConstraint></CreateBucketConstraint>'
response = self.make_request('PUT', bucket_name, headers=headers,
data=data)
body = response.read()
if response.status == 409:
raise self.provider.storage_create_error(
response.status, response.reason, body)
if response.status == 200:
return self.bucket_class(self, bucket_name)
else:
raise self.provider.storage_response_error(
response.status, response.reason, body)
def delete_bucket(self, bucket, headers=None):
response = self.make_request('DELETE', bucket, headers=headers)
body = response.read()
if response.status != 204:
raise self.provider.storage_response_error(
response.status, response.reason, body)
def make_request(self, method, bucket='', key='', headers=None, data='',
query_args=None, sender=None, override_num_retries=None):
if isinstance(bucket, self.bucket_class):
bucket = bucket.name
if isinstance(key, Key):
key = key.name
path = self.calling_format.build_path_base(bucket, key)
boto.log.debug('path=%s' % path)
auth_path = self.calling_format.build_auth_path(bucket, key)
boto.log.debug('auth_path=%s' % auth_path)
host = self.calling_format.build_host(self.server_name(), bucket)
if query_args:
path += '?' + query_args
boto.log.debug('path=%s' % path)
auth_path += '?' + query_args
boto.log.debug('auth_path=%s' % auth_path)
return AWSAuthConnection.make_request(self, method, path, headers,
data, host, auth_path, sender,
override_num_retries=override_num_retries)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.